From 016b10f4ea76ba2189d476b4a5f7168e57a8d7ad Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 18 Jun 2014 01:59:28 -0700 Subject: dma: rcar-audmapp: enable .set_slave Current .set_slave callback did nothing, since it assumed src/dst address come from platform settings. But, it isn't good match to DT probing. This patch enables .set_slave callback to this issue. Signed-off-by: Kuninori Morimoto Signed-off-by: Simon Horman --- drivers/dma/sh/rcar-audmapp.c | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index 2de77289a2e9..858483bce719 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c @@ -47,6 +47,7 @@ struct audmapp_chan { struct shdma_chan shdma_chan; struct audmapp_slave_config *config; void __iomem *base; + dma_addr_t slave_addr; }; struct audmapp_device { @@ -56,7 +57,14 @@ struct audmapp_device { void __iomem *chan_reg; }; +struct audmapp_desc { + struct shdma_desc shdma_desc; + dma_addr_t src; + dma_addr_t dst; +}; + #define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) +#define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc) #define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \ struct audmapp_device, shdma_dev.dma_dev) @@ -90,19 +98,20 @@ static void audmapp_halt(struct shdma_chan *schan) } static void audmapp_start_xfer(struct shdma_chan *schan, - struct shdma_desc *sdecs) + struct shdma_desc *sdesc) { struct audmapp_chan *auchan = to_chan(schan); struct audmapp_device *audev = to_dev(auchan); + struct audmapp_desc *desc = to_desc(sdesc); struct audmapp_slave_config *cfg = auchan->config; struct device *dev = audev->dev; u32 chcr = cfg->chcr | PDMACHCR_DE; - dev_dbg(dev, "src/dst/chcr = %pad/%pad/%x\n", - &cfg->src, &cfg->dst, cfg->chcr); + dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n", + &desc->src, &desc->dst, chcr); - audmapp_write(auchan, cfg->src, PDMASAR); - audmapp_write(auchan, cfg->dst, PDMADAR); + audmapp_write(auchan, desc->src, PDMASAR); + audmapp_write(auchan, desc->dst, PDMADAR); audmapp_write(auchan, chcr, PDMACHCR); } @@ -137,14 +146,16 @@ static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, return 0; auchan->config = cfg; + auchan->slave_addr = slave_addr ? : cfg->dst; return 0; } static int audmapp_desc_setup(struct shdma_chan *schan, - struct shdma_desc *sdecs, + struct shdma_desc *sdesc, dma_addr_t src, dma_addr_t dst, size_t *len) { + struct audmapp_desc *desc = to_desc(sdesc); struct audmapp_chan *auchan = to_chan(schan); struct audmapp_slave_config *cfg = auchan->config; @@ -154,6 +165,9 @@ static int audmapp_desc_setup(struct shdma_chan *schan, if (*len > (size_t)AUDMAPP_LEN_MAX) *len = (size_t)AUDMAPP_LEN_MAX; + desc->src = src; + desc->dst = dst; + return 0; } @@ -164,7 +178,9 @@ static void audmapp_setup_xfer(struct shdma_chan *schan, static dma_addr_t audmapp_slave_addr(struct shdma_chan *schan) { - return 0; /* always fixed address */ + struct audmapp_chan *auchan = to_chan(schan); + + return auchan->slave_addr; } static bool audmapp_channel_busy(struct shdma_chan *schan) @@ -183,7 +199,7 @@ static bool audmapp_desc_completed(struct shdma_chan *schan, static struct shdma_desc *audmapp_embedded_desc(void *buf, int i) { - return &((struct shdma_desc *)buf)[i]; + return &((struct audmapp_desc *)buf)[i].shdma_desc; } static const struct shdma_ops audmapp_shdma_ops = { @@ -260,7 +276,7 @@ static int audmapp_probe(struct platform_device *pdev) sdev = &audev->shdma_dev; sdev->ops = &audmapp_shdma_ops; - sdev->desc_size = sizeof(struct shdma_desc); + sdev->desc_size = sizeof(struct audmapp_desc); dma_dev = &sdev->dma_dev; dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; -- cgit v1.2.3-58-ga151 From 75bfa5f60a368b1ccacaf71bfc0376b8d9eb2e9f Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 18 Jun 2014 01:59:35 -0700 Subject: dma: rcar-audmapp: don't keep audmapp_slave_config for each channeles Current audmapp driver is keeping audmapp_slave_config for each channeles, but, nessasary information is only "chcr". Current style (= keeping audmapp_slave_config) is not good match for DT support. Keep "chcr" instead of audmapp_slave_config Signed-off-by: Kuninori Morimoto Signed-off-by: Simon Horman --- drivers/dma/sh/rcar-audmapp.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index 858483bce719..dd0077519e3e 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c @@ -45,9 +45,9 @@ struct audmapp_chan { struct shdma_chan shdma_chan; - struct audmapp_slave_config *config; void __iomem *base; dma_addr_t slave_addr; + u32 chcr; }; struct audmapp_device { @@ -103,9 +103,8 @@ static void audmapp_start_xfer(struct shdma_chan *schan, struct audmapp_chan *auchan = to_chan(schan); struct audmapp_device *audev = to_dev(auchan); struct audmapp_desc *desc = to_desc(sdesc); - struct audmapp_slave_config *cfg = auchan->config; struct device *dev = audev->dev; - u32 chcr = cfg->chcr | PDMACHCR_DE; + u32 chcr = auchan->chcr | PDMACHCR_DE; dev_dbg(dev, "src/dst/chcr = %pad/%pad/%08x\n", &desc->src, &desc->dst, chcr); @@ -145,7 +144,7 @@ static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, if (try) return 0; - auchan->config = cfg; + auchan->chcr = cfg->chcr; auchan->slave_addr = slave_addr ? : cfg->dst; return 0; @@ -156,11 +155,6 @@ static int audmapp_desc_setup(struct shdma_chan *schan, dma_addr_t src, dma_addr_t dst, size_t *len) { struct audmapp_desc *desc = to_desc(sdesc); - struct audmapp_chan *auchan = to_chan(schan); - struct audmapp_slave_config *cfg = auchan->config; - - if (!cfg) - return -ENODEV; if (*len > (size_t)AUDMAPP_LEN_MAX) *len = (size_t)AUDMAPP_LEN_MAX; -- cgit v1.2.3-58-ga151 From 6b32fafee2bb5fcf0b3d3d04a9762d3a0212089e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 20 Jun 2014 14:37:38 +0200 Subject: dmaengine: shdma: Add more register documentation Also add a few definitions that were missing. Signed-off-by: Geert Uytterhoeven Signed-off-by: Simon Horman --- arch/sh/include/asm/dma-register.h | 36 +++++++++++++++++++----------------- drivers/dma/sh/shdmac.c | 12 ++++++------ include/linux/sh_dma.h | 24 +++++++++++++----------- 3 files changed, 38 insertions(+), 34 deletions(-) (limited to 'drivers') diff --git a/arch/sh/include/asm/dma-register.h b/arch/sh/include/asm/dma-register.h index 51cd78feacff..c757b47e6b64 100644 --- a/arch/sh/include/asm/dma-register.h +++ b/arch/sh/include/asm/dma-register.h @@ -13,17 +13,17 @@ #ifndef DMA_REGISTER_H #define DMA_REGISTER_H -/* DMA register */ -#define SAR 0x00 -#define DAR 0x04 -#define TCR 0x08 -#define CHCR 0x0C -#define DMAOR 0x40 +/* DMA registers */ +#define SAR 0x00 /* Source Address Register */ +#define DAR 0x04 /* Destination Address Register */ +#define TCR 0x08 /* Transfer Count Register */ +#define CHCR 0x0C /* Channel Control Register */ +#define DMAOR 0x40 /* DMA Operation Register */ /* DMAOR definitions */ -#define DMAOR_AE 0x00000004 +#define DMAOR_AE 0x00000004 /* Address Error Flag */ #define DMAOR_NMIF 0x00000002 -#define DMAOR_DME 0x00000001 +#define DMAOR_DME 0x00000001 /* DMA Master Enable */ /* Definitions for the SuperH DMAC */ #define REQ_L 0x00000000 @@ -34,18 +34,20 @@ #define ACK_W 0x00020000 #define ACK_H 0x00000000 #define ACK_L 0x00010000 -#define DM_INC 0x00004000 -#define DM_DEC 0x00008000 -#define DM_FIX 0x0000c000 -#define SM_INC 0x00001000 -#define SM_DEC 0x00002000 -#define SM_FIX 0x00003000 +#define DM_INC 0x00004000 /* Destination addresses are incremented */ +#define DM_DEC 0x00008000 /* Destination addresses are decremented */ +#define DM_FIX 0x0000c000 /* Destination address is fixed */ +#define SM_INC 0x00001000 /* Source addresses are incremented */ +#define SM_DEC 0x00002000 /* Source addresses are decremented */ +#define SM_FIX 0x00003000 /* Source address is fixed */ #define RS_IN 0x00000200 #define RS_OUT 0x00000300 +#define RS_AUTO 0x00000400 /* Auto Request */ +#define RS_ERS 0x00000800 /* DMA extended resource selector */ #define TS_BLK 0x00000040 #define TM_BUR 0x00000020 -#define CHCR_DE 0x00000001 -#define CHCR_TE 0x00000002 -#define CHCR_IE 0x00000004 +#define CHCR_DE 0x00000001 /* DMA Enable */ +#define CHCR_TE 0x00000002 /* Transfer End Flag */ +#define CHCR_IE 0x00000004 /* Interrupt Enable */ #endif diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 146d5df926db..1a6f6595c6c1 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -38,12 +38,12 @@ #include "../dmaengine.h" #include "shdma.h" -/* DMA register */ -#define SAR 0x00 -#define DAR 0x04 -#define TCR 0x08 -#define CHCR 0x0C -#define DMAOR 0x40 +/* DMA registers */ +#define SAR 0x00 /* Source Address Register */ +#define DAR 0x04 /* Destination Address Register */ +#define TCR 0x08 /* Transfer Count Register */ +#define CHCR 0x0C /* Channel Control Register */ +#define DMAOR 0x40 /* DMA Operation Register */ #define TEND 0x18 /* USB-DMAC */ diff --git a/include/linux/sh_dma.h b/include/linux/sh_dma.h index b7b43b82231e..56b97eed28a4 100644 --- a/include/linux/sh_dma.h +++ b/include/linux/sh_dma.h @@ -95,19 +95,21 @@ struct sh_dmae_pdata { }; /* DMAOR definitions */ -#define DMAOR_AE 0x00000004 +#define DMAOR_AE 0x00000004 /* Address Error Flag */ #define DMAOR_NMIF 0x00000002 -#define DMAOR_DME 0x00000001 +#define DMAOR_DME 0x00000001 /* DMA Master Enable */ /* Definitions for the SuperH DMAC */ -#define DM_INC 0x00004000 -#define DM_DEC 0x00008000 -#define DM_FIX 0x0000c000 -#define SM_INC 0x00001000 -#define SM_DEC 0x00002000 -#define SM_FIX 0x00003000 -#define CHCR_DE 0x00000001 -#define CHCR_TE 0x00000002 -#define CHCR_IE 0x00000004 +#define DM_INC 0x00004000 /* Destination addresses are incremented */ +#define DM_DEC 0x00008000 /* Destination addresses are decremented */ +#define DM_FIX 0x0000c000 /* Destination address is fixed */ +#define SM_INC 0x00001000 /* Source addresses are incremented */ +#define SM_DEC 0x00002000 /* Source addresses are decremented */ +#define SM_FIX 0x00003000 /* Source address is fixed */ +#define RS_AUTO 0x00000400 /* Auto Request */ +#define RS_ERS 0x00000800 /* DMA extended resource selector */ +#define CHCR_DE 0x00000001 /* DMA Enable */ +#define CHCR_TE 0x00000002 /* Transfer End Flag */ +#define CHCR_IE 0x00000004 /* Interrupt Enable */ #endif -- cgit v1.2.3-58-ga151 From caf18c27ddb2fb8ae2a7591b908e7efb7484e459 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 18 Jun 2014 01:59:44 -0700 Subject: dma: rcar-audmapp: add DT support This patch adds DT support to Audio DMAC peri peri driver. Signed-off-by: Kuninori Morimoto [horms+renesas@verge.net.au: Do not add trailing blank line to rcar-audmapp.txt] [horms+renesas@verge.net.au: squashed patch to add NULL terminater to audmapp_of_match] Signed-off-by: Simon Horman Signed-off-by: Kuninori Morimoto Signed-off-by: Simon Horman --- .../devicetree/bindings/dma/rcar-audmapp.txt | 29 +++++++++ drivers/dma/sh/rcar-audmapp.c | 72 +++++++++++++++++----- 2 files changed, 87 insertions(+), 14 deletions(-) create mode 100644 Documentation/devicetree/bindings/dma/rcar-audmapp.txt (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/dma/rcar-audmapp.txt b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt new file mode 100644 index 000000000000..9f1d750d76de --- /dev/null +++ b/Documentation/devicetree/bindings/dma/rcar-audmapp.txt @@ -0,0 +1,29 @@ +* R-Car Audio DMAC peri peri Device Tree bindings + +Required properties: +- compatible: should be "renesas,rcar-audmapp" +- #dma-cells: should be <1>, see "dmas" property below + +Example: + audmapp: audio-dma-pp@0xec740000 { + compatible = "renesas,rcar-audmapp"; + #dma-cells = <1>; + + reg = <0 0xec740000 0 0x200>; + }; + + +* DMA client + +Required properties: +- dmas: a list of <[DMA multiplexer phandle] [SRS/DRS value]> pairs, + where SRS/DRS values are fixed handles, specified in the SoC + manual as the value that would be written into the PDMACHCR. +- dma-names: a list of DMA channel names, one per "dmas" entry + +Example: + + dmas = <&audmapp 0x2d00 + &audmapp 0x3700>; + dma-names = "src0_ssiu0", + "dvc0_ssiu0"; diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c index dd0077519e3e..dabbf0aba2e9 100644 --- a/drivers/dma/sh/rcar-audmapp.c +++ b/drivers/dma/sh/rcar-audmapp.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -63,6 +64,8 @@ struct audmapp_desc { dma_addr_t dst; }; +#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) + #define to_chan(chan) container_of(chan, struct audmapp_chan, shdma_chan) #define to_desc(sdesc) container_of(sdesc, struct audmapp_desc, shdma_desc) #define to_dev(chan) container_of(chan->shdma_chan.dma_chan.device, \ @@ -114,38 +117,50 @@ static void audmapp_start_xfer(struct shdma_chan *schan, audmapp_write(auchan, chcr, PDMACHCR); } -static struct audmapp_slave_config * -audmapp_find_slave(struct audmapp_chan *auchan, int slave_id) +static void audmapp_get_config(struct audmapp_chan *auchan, int slave_id, + u32 *chcr, dma_addr_t *dst) { struct audmapp_device *audev = to_dev(auchan); struct audmapp_pdata *pdata = audev->pdata; struct audmapp_slave_config *cfg; int i; + *chcr = 0; + *dst = 0; + + if (!pdata) { /* DT */ + *chcr = ((u32)slave_id) << 16; + auchan->shdma_chan.slave_id = (slave_id) >> 8; + return; + } + + /* non-DT */ + if (slave_id >= AUDMAPP_SLAVE_NUMBER) - return NULL; + return; for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) - if (cfg->slave_id == slave_id) - return cfg; - - return NULL; + if (cfg->slave_id == slave_id) { + *chcr = cfg->chcr; + *dst = cfg->dst; + break; + } } static int audmapp_set_slave(struct shdma_chan *schan, int slave_id, dma_addr_t slave_addr, bool try) { struct audmapp_chan *auchan = to_chan(schan); - struct audmapp_slave_config *cfg = - audmapp_find_slave(auchan, slave_id); + u32 chcr; + dma_addr_t dst; + + audmapp_get_config(auchan, slave_id, &chcr, &dst); - if (!cfg) - return -ENODEV; if (try) return 0; - auchan->chcr = cfg->chcr; - auchan->slave_addr = slave_addr ? : cfg->dst; + auchan->chcr = chcr; + auchan->slave_addr = slave_addr ? : dst; return 0; } @@ -244,16 +259,39 @@ static void audmapp_chan_remove(struct audmapp_device *audev) dma_dev->chancnt = 0; } +static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + dma_cap_mask_t mask; + struct dma_chan *chan; + u32 chcr = dma_spec->args[0]; + + if (dma_spec->args_count != 1) + return NULL; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = dma_request_channel(mask, shdma_chan_filter, NULL); + if (chan) + to_shdma_chan(chan)->hw_req = chcr; + + return chan; +} + static int audmapp_probe(struct platform_device *pdev) { struct audmapp_pdata *pdata = pdev->dev.platform_data; + struct device_node *np = pdev->dev.of_node; struct audmapp_device *audev; struct shdma_dev *sdev; struct dma_device *dma_dev; struct resource *res; int err, i; - if (!pdata) + if (np) + of_dma_controller_register(np, audmapp_of_xlate, pdev); + else if (!pdata) return -ENODEV; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -315,12 +353,18 @@ static int audmapp_remove(struct platform_device *pdev) return 0; } +static const struct of_device_id audmapp_of_match[] = { + { .compatible = "renesas,rcar-audmapp", }, + {}, +}; + static struct platform_driver audmapp_driver = { .probe = audmapp_probe, .remove = audmapp_remove, .driver = { .owner = THIS_MODULE, .name = "rcar-audmapp-engine", + .of_match_table = audmapp_of_match, }, }; module_platform_driver(audmapp_driver); -- cgit v1.2.3-58-ga151 From 2baff5700b0832632f05c2ae93362fe3320cc735 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Wed, 21 May 2014 16:03:01 +0800 Subject: dmaengine: Freescale: use spin_lock_bh instead of spin_lock_irqsave The usage of spin_lock_irqsave() is a stronger locking mechanism than is required throughout the driver. The minimum locking required should be used instead. Interrupts will be turned off and context will be saved, it is unnecessary to use irqsave. This patch changes all instances of spin_lock_irqsave() to spin_lock_bh(). All manipulation of protected fields is done using tasklet context or weaker, which makes spin_lock_bh() the correct choice. Signed-off-by: Hongbo Zhang Signed-off-by: Qiang Liu Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index e0fec68aed25..b291e6c65053 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -396,10 +396,9 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) struct fsldma_chan *chan = to_fsl_chan(tx->chan); struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); struct fsl_desc_sw *child; - unsigned long flags; dma_cookie_t cookie = -EINVAL; - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); /* * assign cookies to all of the software descriptors @@ -412,7 +411,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) /* put this transaction onto the tail of the pending queue */ append_ld_queue(chan, desc); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); return cookie; } @@ -617,13 +616,12 @@ static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan, static void fsl_dma_free_chan_resources(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); - unsigned long flags; chan_dbg(chan, "free all channel resources\n"); - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); dma_pool_destroy(chan->desc_pool); chan->desc_pool = NULL; @@ -842,7 +840,6 @@ static int fsl_dma_device_control(struct dma_chan *dchan, { struct dma_slave_config *config; struct fsldma_chan *chan; - unsigned long flags; int size; if (!dchan) @@ -852,7 +849,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, switch (cmd) { case DMA_TERMINATE_ALL: - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); /* Halt the DMA engine */ dma_halt(chan); @@ -862,7 +859,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, fsldma_free_desc_list(chan, &chan->ld_running); chan->idle = true; - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); return 0; case DMA_SLAVE_CONFIG: @@ -904,11 +901,10 @@ static int fsl_dma_device_control(struct dma_chan *dchan, static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) { struct fsldma_chan *chan = to_fsl_chan(dchan); - unsigned long flags; - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); fsl_chan_xfer_ld_queue(chan); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); } /** @@ -998,11 +994,10 @@ static void dma_do_tasklet(unsigned long data) struct fsldma_chan *chan = (struct fsldma_chan *)data; struct fsl_desc_sw *desc, *_desc; LIST_HEAD(ld_cleanup); - unsigned long flags; chan_dbg(chan, "tasklet entry\n"); - spin_lock_irqsave(&chan->desc_lock, flags); + spin_lock_bh(&chan->desc_lock); /* update the cookie if we have some descriptors to cleanup */ if (!list_empty(&chan->ld_running)) { @@ -1031,7 +1026,7 @@ static void dma_do_tasklet(unsigned long data) * ahead and free the descriptors below. */ fsl_chan_xfer_ld_queue(chan); - spin_unlock_irqrestore(&chan->desc_lock, flags); + spin_unlock_bh(&chan->desc_lock); /* Run the callback for each descriptor, in order */ list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { -- cgit v1.2.3-58-ga151 From 14c6a3333c8e885604fc98768d8b9a32e08110ac Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Wed, 21 May 2014 16:03:02 +0800 Subject: dmaengine: Freescale: add suspend resume functions for DMA driver This patch adds suspend and resume functions for Freescale DMA driver. Signed-off-by: Hongbo Zhang Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++++ drivers/dma/fsldma.h | 15 ++++++++++ 2 files changed, 92 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index b291e6c65053..465f16dd78e5 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -400,6 +400,14 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_bh(&chan->desc_lock); +#ifdef CONFIG_PM + if (unlikely(chan->pm_state != RUNNING)) { + chan_dbg(chan, "cannot submit due to suspend\n"); + spin_unlock_bh(&chan->desc_lock); + return -1; + } +#endif + /* * assign cookies to all of the software descriptors * that make up this transaction @@ -1221,6 +1229,9 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, INIT_LIST_HEAD(&chan->ld_pending); INIT_LIST_HEAD(&chan->ld_running); chan->idle = true; +#ifdef CONFIG_PM + chan->pm_state = RUNNING; +#endif chan->common.device = &fdev->common; dma_cookie_init(&chan->common); @@ -1360,6 +1371,69 @@ static int fsldma_of_remove(struct platform_device *op) return 0; } +#ifdef CONFIG_PM +static int fsldma_suspend_late(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + if (unlikely(!chan->idle)) + goto out; + chan->regs_save.mr = get_mr(chan); + chan->pm_state = SUSPENDED; + spin_unlock_bh(&chan->desc_lock); + } + return 0; + +out: + for (; i >= 0; i--) { + chan = fdev->chan[i]; + if (!chan) + continue; + chan->pm_state = RUNNING; + spin_unlock_bh(&chan->desc_lock); + } + return -EBUSY; +} + +static int fsldma_resume_early(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct fsldma_device *fdev = platform_get_drvdata(pdev); + struct fsldma_chan *chan; + u32 mode; + int i; + + for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { + chan = fdev->chan[i]; + if (!chan) + continue; + + spin_lock_bh(&chan->desc_lock); + mode = chan->regs_save.mr + & ~FSL_DMA_MR_CS & ~FSL_DMA_MR_CC & ~FSL_DMA_MR_CA; + set_mr(chan, mode); + chan->pm_state = RUNNING; + spin_unlock_bh(&chan->desc_lock); + } + + return 0; +} + +static const struct dev_pm_ops fsldma_pm_ops = { + .suspend_late = fsldma_suspend_late, + .resume_early = fsldma_resume_early, +}; +#endif + static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,elo3-dma", }, { .compatible = "fsl,eloplus-dma", }, @@ -1372,6 +1446,9 @@ static struct platform_driver fsldma_of_driver = { .name = "fsl-elo-dma", .owner = THIS_MODULE, .of_match_table = fsldma_of_ids, +#ifdef CONFIG_PM + .pm = &fsldma_pm_ops, +#endif }, .probe = fsldma_of_probe, .remove = fsldma_of_remove, diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index d56e83599825..f2e0c4dcf901 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -134,6 +134,17 @@ struct fsldma_device { #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000 #define FSL_DMA_CHAN_START_EXT 0x00002000 +#ifdef CONFIG_PM +struct fsldma_chan_regs_save { + u32 mr; +}; + +enum fsldma_pm_state { + RUNNING = 0, + SUSPENDED, +}; +#endif + struct fsldma_chan { char name[8]; /* Channel name */ struct fsldma_chan_regs __iomem *regs; @@ -148,6 +159,10 @@ struct fsldma_chan { struct tasklet_struct tasklet; u32 feature; bool idle; /* DMA controller is idle */ +#ifdef CONFIG_PM + struct fsldma_chan_regs_save regs_save; + enum fsldma_pm_state pm_state; +#endif void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable); -- cgit v1.2.3-58-ga151 From 43452fadd614b62b84e950838cb7d2419f3aafb1 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Wed, 21 May 2014 16:03:03 +0800 Subject: dmaengine: Freescale: change descriptor release process for supporting async_tx Fix the potential risk when enable config NET_DMA and ASYNC_TX. Async_tx is lack of support in current release process of dma descriptor, all descriptors will be released whatever is acked or no-acked by async_tx, so there is a potential race condition when dma engine is uesd by others clients (e.g. when enable NET_DMA to offload TCP). In our case, a race condition which is raised when use both of talitos and dmaengine to offload xor is because napi scheduler will sync all pending requests in dma channels, it affects the process of raid operations due to ack_tx is not checked in fsl dma. The no-acked descriptor is freed which is submitted just now, as a dependent tx, this freed descriptor trigger BUG_ON(async_tx_test_ack(depend_tx)) in async_tx_submit(). TASK = ee1a94a0[1390] 'md0_raid5' THREAD: ecf40000 CPU: 0 GPR00: 00000001 ecf41ca0 ee44/921a94a0 0000003f 00000001 c00593e4 00000000 00000001 GPR08: 00000000 a7a7a7a7 00000001 045/920000002 42028042 100a38d4 ed576d98 00000000 GPR16: ed5a11b0 00000000 2b162000 00000200 046/920000000 2d555000 ed3015e8 c15a7aa0 GPR24: 00000000 c155fc40 00000000 ecb63220 ecf41d28 e47/92f640bb0 ef640c30 ecf41ca0 NIP [c02b048c] async_tx_submit+0x6c/0x2b4 LR [c02b068c] async_tx_submit+0x26c/0x2b4 Call Trace: [ecf41ca0] [c02b068c] async_tx_submit+0x26c/0x2b448/92 (unreliable) [ecf41cd0] [c02b0a4c] async_memcpy+0x240/0x25c [ecf41d20] [c0421064] async_copy_data+0xa0/0x17c [ecf41d70] [c0421cf4] __raid_run_ops+0x874/0xe10 [ecf41df0] [c0426ee4] handle_stripe+0x820/0x25e8 [ecf41e90] [c0429080] raid5d+0x3d4/0x5b4 [ecf41f40] [c04329b8] md_thread+0x138/0x16c [ecf41f90] [c008277c] kthread+0x8c/0x90 [ecf41ff0] [c0011630] kernel_thread+0x4c/0x68 Another modification in this patch is the change of completed descriptors, there is a potential risk which caused by exception interrupt, all descriptors in ld_running list are seemed completed when an interrupt raised, it works fine under normal condition, but if there is an exception occured, it cannot work as our excepted. Hardware should not be depend on s/w list, the right way is to read current descriptor address register to find the last completed descriptor. If an interrupt is raised by an error, all descriptors in ld_running should not be seemed finished, or these unfinished descriptors in ld_running will be released wrongly. A simple way to reproduce: Enable dmatest first, then insert some bad descriptors which can trigger Programming Error interrupts before the good descriptors. Last, the good descriptors will be freed before they are processsed because of the exception intrerrupt. Note: the bad descriptors are only for simulating an exception interrupt. This case can illustrate the potential risk in current fsl-dma very well. Signed-off-by: Hongbo Zhang Signed-off-by: Qiang Liu Signed-off-by: Ira W. Snyder Signed-off-by: Vinod Koul --- drivers/dma/fsldma.c | 197 +++++++++++++++++++++++++++++++++++++-------------- drivers/dma/fsldma.h | 17 ++++- 2 files changed, 159 insertions(+), 55 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 465f16dd78e5..d5d6885ab341 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -465,6 +465,88 @@ static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) return desc; } +/** + * fsldma_clean_completed_descriptor - free all descriptors which + * has been completed and acked + * @chan: Freescale DMA channel + * + * This function is used on all completed and acked descriptors. + * All descriptors should only be freed in this function. + */ +static void fsldma_clean_completed_descriptor(struct fsldma_chan *chan) +{ + struct fsl_desc_sw *desc, *_desc; + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc, _desc, &chan->ld_completed, node) + if (async_tx_test_ack(&desc->async_tx)) + fsl_dma_free_descriptor(chan, desc); +} + +/** + * fsldma_run_tx_complete_actions - cleanup a single link descriptor + * @chan: Freescale DMA channel + * @desc: descriptor to cleanup and free + * @cookie: Freescale DMA transaction identifier + * + * This function is used on a descriptor which has been executed by the DMA + * controller. It will run any callbacks, submit any dependencies. + */ +static dma_cookie_t fsldma_run_tx_complete_actions(struct fsldma_chan *chan, + struct fsl_desc_sw *desc, dma_cookie_t cookie) +{ + struct dma_async_tx_descriptor *txd = &desc->async_tx; + dma_cookie_t ret = cookie; + + BUG_ON(txd->cookie < 0); + + if (txd->cookie > 0) { + ret = txd->cookie; + + /* Run the link descriptor callback function */ + if (txd->callback) { + chan_dbg(chan, "LD %p callback\n", desc); + txd->callback(txd->callback_param); + } + } + + /* Run any dependencies */ + dma_run_dependencies(txd); + + return ret; +} + +/** + * fsldma_clean_running_descriptor - move the completed descriptor from + * ld_running to ld_completed + * @chan: Freescale DMA channel + * @desc: the descriptor which is completed + * + * Free the descriptor directly if acked by async_tx api, or move it to + * queue ld_completed. + */ +static void fsldma_clean_running_descriptor(struct fsldma_chan *chan, + struct fsl_desc_sw *desc) +{ + /* Remove from the list of transactions */ + list_del(&desc->node); + + /* + * the client is allowed to attach dependent operations + * until 'ack' is set + */ + if (!async_tx_test_ack(&desc->async_tx)) { + /* + * Move this descriptor to the list of descriptors which is + * completed, but still awaiting the 'ack' bit to be set. + */ + list_add_tail(&desc->node, &chan->ld_completed); + return; + } + + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); +} + /** * fsl_chan_xfer_ld_queue - transfer any pending transactions * @chan : Freescale DMA channel @@ -533,31 +615,58 @@ static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) } /** - * fsldma_cleanup_descriptor - cleanup and free a single link descriptor + * fsldma_cleanup_descriptors - cleanup link descriptors which are completed + * and move them to ld_completed to free until flag 'ack' is set * @chan: Freescale DMA channel - * @desc: descriptor to cleanup and free * - * This function is used on a descriptor which has been executed by the DMA - * controller. It will run any callbacks, submit any dependencies, and then - * free the descriptor. + * This function is used on descriptors which have been executed by the DMA + * controller. It will run any callbacks, submit any dependencies, then + * free these descriptors if flag 'ack' is set. */ -static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, - struct fsl_desc_sw *desc) +static void fsldma_cleanup_descriptors(struct fsldma_chan *chan) { - struct dma_async_tx_descriptor *txd = &desc->async_tx; + struct fsl_desc_sw *desc, *_desc; + dma_cookie_t cookie = 0; + dma_addr_t curr_phys = get_cdar(chan); + int seen_current = 0; - /* Run the link descriptor callback function */ - if (txd->callback) { - chan_dbg(chan, "LD %p callback\n", desc); - txd->callback(txd->callback_param); + fsldma_clean_completed_descriptor(chan); + + /* Run the callback for each descriptor, in order */ + list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { + /* + * do not advance past the current descriptor loaded into the + * hardware channel, subsequent descriptors are either in + * process or have not been submitted + */ + if (seen_current) + break; + + /* + * stop the search if we reach the current descriptor and the + * channel is busy + */ + if (desc->async_tx.phys == curr_phys) { + seen_current = 1; + if (!dma_is_idle(chan)) + break; + } + + cookie = fsldma_run_tx_complete_actions(chan, desc, cookie); + + fsldma_clean_running_descriptor(chan, desc); } - /* Run any dependencies */ - dma_run_dependencies(txd); + /* + * Start any pending transactions automatically + * + * In the ideal case, we keep the DMA controller busy while we go + * ahead and free the descriptors below. + */ + fsl_chan_xfer_ld_queue(chan); - dma_descriptor_unmap(txd); - chan_dbg(chan, "LD %p free\n", desc); - dma_pool_free(chan->desc_pool, desc, txd->phys); + if (cookie > 0) + chan->common.completed_cookie = cookie; } /** @@ -627,8 +736,10 @@ static void fsl_dma_free_chan_resources(struct dma_chan *dchan) chan_dbg(chan, "free all channel resources\n"); spin_lock_bh(&chan->desc_lock); + fsldma_cleanup_descriptors(chan); fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); + fsldma_free_desc_list(chan, &chan->ld_completed); spin_unlock_bh(&chan->desc_lock); dma_pool_destroy(chan->desc_pool); @@ -865,6 +976,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan, /* Remove and free all of the descriptors in the LD queue */ fsldma_free_desc_list(chan, &chan->ld_pending); fsldma_free_desc_list(chan, &chan->ld_running); + fsldma_free_desc_list(chan, &chan->ld_completed); chan->idle = true; spin_unlock_bh(&chan->desc_lock); @@ -923,6 +1035,17 @@ static enum dma_status fsl_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, struct dma_tx_state *txstate) { + struct fsldma_chan *chan = to_fsl_chan(dchan); + enum dma_status ret; + + ret = dma_cookie_status(dchan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_bh(&chan->desc_lock); + fsldma_cleanup_descriptors(chan); + spin_unlock_bh(&chan->desc_lock); + return dma_cookie_status(dchan, cookie, txstate); } @@ -1000,51 +1123,18 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) static void dma_do_tasklet(unsigned long data) { struct fsldma_chan *chan = (struct fsldma_chan *)data; - struct fsl_desc_sw *desc, *_desc; - LIST_HEAD(ld_cleanup); chan_dbg(chan, "tasklet entry\n"); spin_lock_bh(&chan->desc_lock); - /* update the cookie if we have some descriptors to cleanup */ - if (!list_empty(&chan->ld_running)) { - dma_cookie_t cookie; - - desc = to_fsl_desc(chan->ld_running.prev); - cookie = desc->async_tx.cookie; - dma_cookie_complete(&desc->async_tx); - - chan_dbg(chan, "completed_cookie=%d\n", cookie); - } - - /* - * move the descriptors to a temporary list so we can drop the lock - * during the entire cleanup operation - */ - list_splice_tail_init(&chan->ld_running, &ld_cleanup); - /* the hardware is now idle and ready for more */ chan->idle = true; - /* - * Start any pending transactions automatically - * - * In the ideal case, we keep the DMA controller busy while we go - * ahead and free the descriptors below. - */ - fsl_chan_xfer_ld_queue(chan); - spin_unlock_bh(&chan->desc_lock); - - /* Run the callback for each descriptor, in order */ - list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { - - /* Remove from the list of transactions */ - list_del(&desc->node); + /* Run all cleanup for descriptors which have been completed */ + fsldma_cleanup_descriptors(chan); - /* Run all cleanup for this descriptor */ - fsldma_cleanup_descriptor(chan, desc); - } + spin_unlock_bh(&chan->desc_lock); chan_dbg(chan, "tasklet exit\n"); } @@ -1228,6 +1318,7 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev, spin_lock_init(&chan->desc_lock); INIT_LIST_HEAD(&chan->ld_pending); INIT_LIST_HEAD(&chan->ld_running); + INIT_LIST_HEAD(&chan->ld_completed); chan->idle = true; #ifdef CONFIG_PM chan->pm_state = RUNNING; diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index f2e0c4dcf901..239c20c84382 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -149,8 +149,21 @@ struct fsldma_chan { char name[8]; /* Channel name */ struct fsldma_chan_regs __iomem *regs; spinlock_t desc_lock; /* Descriptor operation lock */ - struct list_head ld_pending; /* Link descriptors queue */ - struct list_head ld_running; /* Link descriptors queue */ + /* + * Descriptors which are queued to run, but have not yet been + * submitted to the hardware for execution + */ + struct list_head ld_pending; + /* + * Descriptors which are currently being executed by the hardware + */ + struct list_head ld_running; + /* + * Descriptors which have finished execution by the hardware. These + * descriptors have already had their cleanup actions run. They are + * waiting for the ACK bit to be set by the async_tx API. + */ + struct list_head ld_completed; /* Link descriptors queue */ struct dma_chan common; /* DMA common channel */ struct dma_pool *desc_pool; /* Descriptors pool */ struct device *dev; /* Channel device */ -- cgit v1.2.3-58-ga151 From 89751d0a2a05197e0823eb0a363efd034175a76f Mon Sep 17 00:00:00 2001 From: Andy Gross Date: Fri, 30 May 2014 15:49:50 -0500 Subject: dmaengine: qcom_bam_dma: Add descriptor flags This patch adds support for end of transaction (EOT) and notify when done (NWD) hardware descriptor flags. The EOT flag requests that the peripheral assert an end of transaction interrupt when that descriptor is complete. It also results in special signaling protocol that is used between the attached peripheral and the core using the DMA controller. Clients will specify DMA_PREP_INTERRUPT to enable this flag. The NWD flag requests that the peripheral wait until the data has been fully processed by the peripheral before moving on to the next descriptor. Clients will specify DMA_PREP_FENCE to enable this flag. Signed-off-by: Andy Gross Signed-off-by: Vinod Koul --- drivers/dma/qcom_bam_dma.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 82c923146e49..7a4bbb0f80a5 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c @@ -61,12 +61,17 @@ struct bam_desc_hw { #define DESC_FLAG_INT BIT(15) #define DESC_FLAG_EOT BIT(14) #define DESC_FLAG_EOB BIT(13) +#define DESC_FLAG_NWD BIT(12) struct bam_async_desc { struct virt_dma_desc vd; u32 num_desc; u32 xfer_len; + + /* transaction flags, EOT|EOB|NWD */ + u16 flags; + struct bam_desc_hw *curr_desc; enum dma_transfer_direction dir; @@ -490,6 +495,14 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan, if (!async_desc) goto err_out; + if (flags & DMA_PREP_FENCE) + async_desc->flags |= DESC_FLAG_NWD; + + if (flags & DMA_PREP_INTERRUPT) + async_desc->flags |= DESC_FLAG_EOT; + else + async_desc->flags |= DESC_FLAG_INT; + async_desc->num_desc = num_alloc; async_desc->curr_desc = async_desc->desc; async_desc->dir = direction; @@ -793,8 +806,11 @@ static void bam_start_dma(struct bam_chan *bchan) else async_desc->xfer_len = async_desc->num_desc; - /* set INT on last descriptor */ - desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; + /* set any special flags on the last descriptor */ + if (async_desc->num_desc == async_desc->xfer_len) + desc[async_desc->xfer_len - 1].flags = async_desc->flags; + else + desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { u32 partial = MAX_DESCRIPTORS - bchan->tail; -- cgit v1.2.3-58-ga151 From 9f92d2232d5fabe18741044d8004160f8cf80e9e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sun, 15 Jun 2014 13:37:35 -0700 Subject: dmaengine: Use dma_zalloc_coherent Use the zeroing function instead of dma_alloc_coherent & memset(,0,) Signed-off-by: Joe Perches Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 5 ++--- drivers/dma/mxs-dma.c | 8 +++----- 2 files changed, 5 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 14867e3ac8ff..bbcf783707f1 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -911,14 +911,13 @@ static int sdma_request_channel(struct sdma_channel *sdmac) int channel = sdmac->channel; int ret = -EBUSY; - sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); + sdmac->bd = dma_zalloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, + GFP_KERNEL); if (!sdmac->bd) { ret = -ENOMEM; goto out; } - memset(sdmac->bd, 0, PAGE_SIZE); - sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index ead491346da7..dc1dba78e529 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -413,16 +413,14 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int ret; - mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, - CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, - GFP_KERNEL); + mxs_chan->ccw = dma_zalloc_coherent(mxs_dma->dma_device.dev, + CCW_BLOCK_SIZE, + &mxs_chan->ccw_phys, GFP_KERNEL); if (!mxs_chan->ccw) { ret = -ENOMEM; goto err_alloc; } - memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); - if (mxs_chan->chan_irq != NO_IRQ) { ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 0, "mxs-dma", mxs_dma); -- cgit v1.2.3-58-ga151 From 0cdbee33ea42bfd5f61f824e258a1a0944dd2a27 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 20 Jun 2014 14:37:41 +0200 Subject: dmaengine: shdma: Use defines instead of hardcoded numbers Signed-off-by: Geert Uytterhoeven Signed-off-by: Simon Horman --- drivers/dma/sh/shdma-arm.h | 4 ++-- drivers/dma/sh/shdmac.c | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sh/shdma-arm.h b/drivers/dma/sh/shdma-arm.h index a2b8258426c9..a1b0ef45d6a2 100644 --- a/drivers/dma/sh/shdma-arm.h +++ b/drivers/dma/sh/shdma-arm.h @@ -45,7 +45,7 @@ enum { ((((i) & TS_LOW_BIT) << TS_LOW_SHIFT) |\ (((i) & TS_HI_BIT) << TS_HI_SHIFT)) -#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL((xmit_sz))) -#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL((xmit_sz))) +#define CHCR_TX(xmit_sz) (DM_FIX | SM_INC | RS_ERS | TS_INDEX2VAL((xmit_sz))) +#define CHCR_RX(xmit_sz) (DM_INC | SM_FIX | RS_ERS | TS_INDEX2VAL((xmit_sz))) #endif diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 1a6f6595c6c1..58eb85770eba 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -239,9 +239,8 @@ static void dmae_init(struct sh_dmae_chan *sh_chan) { /* * Default configuration for dual address memory-memory transfer. - * 0x400 represents auto-request. */ - u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, + u32 chcr = DM_INC | SM_INC | RS_AUTO | log2size_to_chcr(sh_chan, LOG2_DEFAULT_XFER_SIZE); sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); chcr_write(sh_chan, chcr); -- cgit v1.2.3-58-ga151 From 653e67f7e5e008258ff65c2067460cc9e8ee8f94 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 14 Jul 2014 22:47:26 +0200 Subject: dmaengine: inherit debug settings from the subsystem for subdirectories To be able to see debug messages during boot, enable the debug settings from Kconfig also for drivers in subdirectories. Signed-off-by: Wolfram Sang Signed-off-by: Vinod Koul --- drivers/dma/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index c779e1eb2db2..aca5eb577d44 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -1,5 +1,5 @@ -ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG -ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG +subdir-ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG +subdir-ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o -- cgit v1.2.3-58-ga151 From 585a9d0b43d109ad76c0e43eae0fbe5ec3196246 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:18 +0200 Subject: dmaengine: pl330: Use dma_transfer_direction instead of custom pl330_reqtype The pl330 driver has the custom pl330_reqtype enum which has the same possible settings as the generic dma_transfer_direction enum. Switching over to the generic enum internally makes it possible to directly initialize it from the transfer request direction. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 73fa9b7a10ab..3aff676af853 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -63,13 +63,6 @@ enum pl330_byteswap { SWAP_16, }; -enum pl330_reqtype { - MEMTOMEM, - MEMTODEV, - DEVTOMEM, - DEVTODEV, -}; - /* Register and Bit field Definitions */ #define DS 0x0 #define DS_ST_STOP 0x0 @@ -378,7 +371,7 @@ enum pl330_op_err { /* A request defining Scatter-Gather List ending with NULL xfer. */ struct pl330_req { - enum pl330_reqtype rqtype; + enum dma_transfer_direction rqtype; /* Index of peripheral for the xfer. */ unsigned peri:5; /* Unique token for this xfer, set by the client. */ @@ -1296,13 +1289,13 @@ static int _bursts(unsigned dry_run, u8 buf[], int off = 0; switch (pxs->r->rqtype) { - case MEMTODEV: + case DMA_MEM_TO_DEV: off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); break; - case DEVTOMEM: + case DMA_DEV_TO_MEM: off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); break; - case MEMTOMEM: + case DMA_MEM_TO_MEM: off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); break; default: @@ -1543,7 +1536,7 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) } /* If request for non-existing peripheral */ - if (r->rqtype != MEMTOMEM && r->peri >= pi->pcfg.num_peri) { + if (r->rqtype != DMA_MEM_TO_MEM && r->peri >= pi->pcfg.num_peri) { dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid peripheral(%u)!\n", __func__, __LINE__, r->peri); @@ -2698,14 +2691,12 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( case DMA_MEM_TO_DEV: desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; src = dma_addr; dst = pch->fifo_addr; break; case DMA_DEV_TO_MEM: desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; src = pch->fifo_addr; dst = dma_addr; break; @@ -2713,6 +2704,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( break; } + desc->req.rqtype = direction; desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; fill_px(&desc->px, dst, src, period_len); @@ -2754,7 +2746,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = MEMTOMEM; + desc->req.rqtype = DMA_MEM_TO_MEM; /* Select max possible burst size */ burst = pi->pcfg.data_bus_width / 8; @@ -2838,19 +2830,18 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (direction == DMA_MEM_TO_DEV) { desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 0; - desc->req.rqtype = MEMTODEV; fill_px(&desc->px, addr, sg_dma_address(sg), sg_dma_len(sg)); } else { desc->rqcfg.src_inc = 0; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DEVTOMEM; fill_px(&desc->px, sg_dma_address(sg), addr, sg_dma_len(sg)); } desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; + desc->req.rqtype = direction; } /* Return the last desc in the chain */ -- cgit v1.2.3-58-ga151 From f0564c7ecbc07a1290dabc8720f9dc03f22fa563 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:19 +0200 Subject: dmaengine: pl330: Remove duplicated cachecontrol enum The settings for destination and source cache control are exactly the same. This patch removes the duplicated enum and uses the same for both destination and source cache control. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 45 +++++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 3aff676af853..22739c436bc3 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -33,26 +33,15 @@ #define PL330_MAX_IRQS 32 #define PL330_MAX_PERI 32 -enum pl330_srccachectrl { - SCCTRL0, /* Noncacheable and nonbufferable */ - SCCTRL1, /* Bufferable only */ - SCCTRL2, /* Cacheable, but do not allocate */ - SCCTRL3, /* Cacheable and bufferable, but do not allocate */ - SINVALID1, - SINVALID2, - SCCTRL6, /* Cacheable write-through, allocate on reads only */ - SCCTRL7, /* Cacheable write-back, allocate on reads only */ -}; - -enum pl330_dstcachectrl { - DCCTRL0, /* Noncacheable and nonbufferable */ - DCCTRL1, /* Bufferable only */ - DCCTRL2, /* Cacheable, but do not allocate */ - DCCTRL3, /* Cacheable and bufferable, but do not allocate */ - DINVALID1, /* AWCACHE = 0x1000 */ - DINVALID2, - DCCTRL6, /* Cacheable write-through, allocate on writes only */ - DCCTRL7, /* Cacheable write-back, allocate on writes only */ +enum pl330_cachectrl { + CCTRL0, /* Noncacheable and nonbufferable */ + CCTRL1, /* Bufferable only */ + CCTRL2, /* Cacheable, but do not allocate */ + CCTRL3, /* Cacheable and bufferable, but do not allocate */ + INVALID1, /* AWCACHE = 0x1000 */ + INVALID2, + CCTRL6, /* Cacheable write-through, allocate on writes only */ + CCTRL7, /* Cacheable write-back, allocate on writes only */ }; enum pl330_byteswap { @@ -337,8 +326,8 @@ struct pl330_reqcfg { unsigned brst_len:5; unsigned brst_size:3; /* in power of 2 */ - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; + enum pl330_cachectrl dcctl; + enum pl330_cachectrl scctl; enum pl330_byteswap swap; struct pl330_config *pcfg; }; @@ -1490,14 +1479,14 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) static inline bool _is_valid(u32 ccr) { - enum pl330_dstcachectrl dcctl; - enum pl330_srccachectrl scctl; + enum pl330_cachectrl dcctl; + enum pl330_cachectrl scctl; dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; - if (dcctl == DINVALID1 || dcctl == DINVALID2 - || scctl == SINVALID1 || scctl == SINVALID2) + if (dcctl == INVALID1 || dcctl == INVALID2 + || scctl == INVALID1 || scctl == INVALID2) return false; else return true; @@ -2485,8 +2474,8 @@ static inline void _init_desc(struct dma_pl330_desc *desc) desc->req.x = &desc->px; desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; - desc->rqcfg.scctl = SCCTRL0; - desc->rqcfg.dcctl = DCCTRL0; + desc->rqcfg.scctl = CCTRL0; + desc->rqcfg.dcctl = CCTRL0; desc->req.cfg = &desc->rqcfg; desc->req.xfer_cb = dma_pl330_rqcb; desc->txd.tx_submit = pl330_tx_submit; -- cgit v1.2.3-58-ga151 From fa01ef38d60096aa689bcc2659aedd4a9ae83cd1 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:20 +0200 Subject: dmaengine: pl330: Remove unused pl330_chanstatus struct The pl330_chanstatus struct is completely unused, so remove it. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 29 ----------------------------- 1 file changed, 29 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 22739c436bc3..9121edeb2727 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -375,35 +375,6 @@ struct pl330_req { struct list_head rqd; }; -/* - * To know the status of the channel and DMAC, the client - * provides a pointer to this structure. The PL330 core - * fills it with current information. - */ -struct pl330_chanstatus { - /* - * If the DMAC engine halted due to some error, - * the client should remove-add DMAC. - */ - bool dmac_halted; - /* - * If channel is halted due to some error, - * the client should ABORT/FLUSH and START the channel. - */ - bool faulting; - /* Location of last load */ - u32 src_addr; - /* Location of last store */ - u32 dst_addr; - /* - * Pointer to the currently active req, NULL if channel is - * inactive, even though the requests may be present. - */ - struct pl330_req *top_req; - /* Pointer to req waiting second in the queue if any. */ - struct pl330_req *wait_req; -}; - enum pl330_chan_op { /* Start the channel */ PL330_OP_START, -- cgit v1.2.3-58-ga151 From 7ae342fd6fd2bc1f694cf2c8fcbbeb2d2a017658 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:21 +0200 Subject: dmaengine: pl330: Remove unused dmac_reset callback The dmac_reset() callaback of the pl330_info struct is always set to NULL, so remove it. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 12 ------------ 1 file changed, 12 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 9121edeb2727..f23c5f0b3dac 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -296,11 +296,6 @@ struct pl330_info { void *pl330_data; /* Populated by the PL330 core driver during pl330_add */ struct pl330_config pcfg; - /* - * If the DMAC has some reset mechanism, then the - * client may want to provide pointer to the method. - */ - void (*dmac_reset)(struct pl330_info *pi); }; /** @@ -2024,13 +2019,6 @@ static int pl330_add(struct pl330_info *pi) if (pi->pl330_data) return -EINVAL; - /* - * If the SoC can perform reset on the DMAC, then do it - * before reading its configuration. - */ - if (pi->dmac_reset) - pi->dmac_reset(pi); - regs = pi->base; /* Check if we can handle this DMAC */ -- cgit v1.2.3-58-ga151 From c9392d80ae0762425314fbf1d9210f68da400e12 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:22 +0200 Subject: dmaengine: pl330: Remove unused client_data field form pl330_info The field is completely unused, remove it. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f23c5f0b3dac..6b61c578c868 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -290,8 +290,6 @@ struct pl330_info { unsigned mcbufsz; /* ioremap'ed address of PL330 registers. */ void __iomem *base; - /* Client can freely use it. */ - void *client_data; /* PL330 core data, Client must not touch it. */ void *pl330_data; /* Populated by the PL330 core driver during pl330_add */ -- cgit v1.2.3-58-ga151 From d5cef121ffa6a2525107e4c58e126ea885911ff7 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:23 +0200 Subject: dmaengine: pl330: Remove unused next field form pl330_xfer struct The next field is always NULL, so we can remove it. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6b61c578c868..80a17a84ab75 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -334,11 +334,6 @@ struct pl330_xfer { u32 dst_addr; /* Size to xfer */ u32 bytes; - /* - * Pointer to next xfer in the list. - * The last xfer in the req must point to NULL. - */ - struct pl330_xfer *next; }; /* The xfer callbacks are made with one of these arguments. */ @@ -1390,16 +1385,12 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); x = pxs->r->x; - do { - /* Error if xfer length is not aligned at burst size */ - if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) - return -EINVAL; - - pxs->x = x; - off += _setup_xfer(dry_run, &buf[off], pxs); + /* Error if xfer length is not aligned at burst size */ + if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) + return -EINVAL; - x = x->next; - } while (x); + pxs->x = x; + off += _setup_xfer(dry_run, &buf[off], pxs); /* DMASEV peripheral/event */ off += _emit_SEV(dry_run, &buf[off], thrd->ev); @@ -2531,7 +2522,6 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) static inline void fill_px(struct pl330_xfer *px, dma_addr_t dst, dma_addr_t src, size_t len) { - px->next = NULL; px->bytes = len; px->dst_addr = dst; px->src_addr = src; -- cgit v1.2.3-58-ga151 From be025329fd4223f3442fffc35f8255bd64d0d526 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:24 +0200 Subject: dmaengine: pl330: Remove unused mc_len field from _pl330_req struct The mc_len is initialized but its value is never read again, so remove it. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 80a17a84ab75..453ce106b5fc 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -398,8 +398,6 @@ enum pl330_cond { struct _pl330_req { u32 mc_bus; void *mc_cpu; - /* Number of bytes taken to setup MC for the req */ - u32 mc_len; struct pl330_req *r; }; @@ -999,7 +997,6 @@ static void mark_free(struct pl330_thread *thrd, int idx) struct _pl330_req *req = &thrd->req[idx]; _emit_END(0, req->mc_cpu); - req->mc_len = 0; thrd->req_running = -1; } @@ -1536,8 +1533,8 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) /* Hook the request */ thrd->lstenq = idx; - thrd->req[idx].mc_len = _setup_req(0, thrd, idx, &xs); thrd->req[idx].r = r; + _setup_req(0, thrd, idx, &xs); ret = 0; -- cgit v1.2.3-58-ga151 From 6079d38ca21e80c13af2d8f8a7b062a0e28615c9 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:25 +0200 Subject: dmaengine: pl330: Remove useless xfer_cb indirection The xfer_cb callback of the pl330_req struct is always set to the same function. This adds an unnecessary step of indirection. Instead just call the callback function directly. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 62 +++++++++++++++++++++-------------------------------- 1 file changed, 24 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 453ce106b5fc..cf2522e429b0 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -351,10 +351,6 @@ struct pl330_req { enum dma_transfer_direction rqtype; /* Index of peripheral for the xfer. */ unsigned peri:5; - /* Unique token for this xfer, set by the client. */ - void *token; - /* Callback to be called after xfer. */ - void (*xfer_cb)(void *token, enum pl330_op_err err); /* If NULL, req will be done at last set parameters. */ struct pl330_reqcfg *cfg; /* Pointer to first xfer in the request. */ @@ -551,12 +547,6 @@ struct dma_pl330_desc { struct dma_pl330_chan *pchan; }; -static inline void _callback(struct pl330_req *r, enum pl330_op_err err) -{ - if (r && r->xfer_cb) - r->xfer_cb(r->token, err); -} - static inline bool _queue_empty(struct pl330_thread *thrd) { return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) @@ -1544,6 +1534,25 @@ xfer_exit: return ret; } +static void dma_pl330_rqcb(struct pl330_req *req, enum pl330_op_err err) +{ + struct dma_pl330_desc *desc = container_of(req, struct dma_pl330_desc, req); + struct dma_pl330_chan *pch = desc->pchan; + unsigned long flags; + + /* If desc aborted */ + if (!pch) + return; + + spin_lock_irqsave(&pch->lock, flags); + + desc->status = DONE; + + spin_unlock_irqrestore(&pch->lock, flags); + + tasklet_schedule(&pch->task); +} + static void pl330_dotask(unsigned long data) { struct pl330_dmac *pl330 = (struct pl330_dmac *) data; @@ -1585,10 +1594,8 @@ static void pl330_dotask(unsigned long data) err = PL330_ERR_ABORT; spin_unlock_irqrestore(&pl330->lock, flags); - - _callback(thrd->req[1 - thrd->lstenq].r, err); - _callback(thrd->req[thrd->lstenq].r, err); - + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].r, err); + dma_pl330_rqcb(thrd->req[thrd->lstenq].r, err); spin_lock_irqsave(&pl330->lock, flags); thrd->req[0].r = NULL; @@ -1695,7 +1702,7 @@ static int pl330_update(const struct pl330_info *pi) list_del(&rqdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); - _callback(rqdone, PL330_ERR_NONE); + dma_pl330_rqcb(rqdone, PL330_ERR_NONE); spin_lock_irqsave(&pl330->lock, flags); } @@ -1852,8 +1859,8 @@ static void pl330_release_channel(void *ch_id) _stop(thrd); - _callback(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); - _callback(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); pl330 = thrd->dmac; @@ -2207,25 +2214,6 @@ static void pl330_tasklet(unsigned long data) spin_unlock_irqrestore(&pch->lock, flags); } -static void dma_pl330_rqcb(void *token, enum pl330_op_err err) -{ - struct dma_pl330_desc *desc = token; - struct dma_pl330_chan *pch = desc->pchan; - unsigned long flags; - - /* If desc aborted */ - if (!pch) - return; - - spin_lock_irqsave(&pch->lock, flags); - - desc->status = DONE; - - spin_unlock_irqrestore(&pch->lock, flags); - - tasklet_schedule(&pch->task); -} - bool pl330_filter(struct dma_chan *chan, void *param) { u8 *peri_id; @@ -2417,12 +2405,10 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { desc->req.x = &desc->px; - desc->req.token = desc; desc->rqcfg.swap = SWAP_NO; desc->rqcfg.scctl = CCTRL0; desc->rqcfg.dcctl = CCTRL0; desc->req.cfg = &desc->rqcfg; - desc->req.xfer_cb = dma_pl330_rqcb; desc->txd.tx_submit = pl330_tx_submit; INIT_LIST_HEAD(&desc->node); -- cgit v1.2.3-58-ga151 From 65ad60600f5a3bf4c4edfbb95992af6ff2dfc18a Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:26 +0200 Subject: dmaengine: pl330: Change type pl330_chid from void * to struct pl330_thread * The pl330_chid field of the dma_pl330_chan struct always holds a pointer to the thread that is associated with the channel. Changing its type form void * to struct pl330_thread makes things more type safe and removes the need for unnecessary typecasts. While we are at it also rename the field from the cryptic pl330_chid to thread. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index cf2522e429b0..f0e9f47e7dc6 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -495,10 +495,11 @@ struct dma_pl330_chan { /* To protect channel manipulation */ spinlock_t lock; - /* Token of a hardware channel thread of PL330 DMAC - * NULL if the channel is available to be acquired. + /* + * Hardware channel thread of PL330 DMAC. NULL if the channel is + * available. */ - void *pl330_chid; + struct pl330_thread *thread; /* For D-to-M and M-to-D channels */ int burst_sz; /* the peripheral fifo width */ @@ -1439,9 +1440,8 @@ static inline bool _is_valid(u32 ccr) * Client is not notified after each xfer unit, just once after all * xfer units are done or some error occurs. */ -static int pl330_submit_req(void *ch_id, struct pl330_req *r) +static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) { - struct pl330_thread *thrd = ch_id; struct pl330_dmac *pl330; struct pl330_info *pi; struct _xfer_spec xs; @@ -1719,9 +1719,8 @@ updt_exit: return ret; } -static int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) +static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) { - struct pl330_thread *thrd = ch_id; struct pl330_dmac *pl330; unsigned long flags; int ret = 0, active; @@ -1794,7 +1793,7 @@ static bool _chan_ns(const struct pl330_info *pi, int i) /* Upon success, returns IdentityToken for the * allocated channel, NULL otherwise. */ -static void *pl330_request_channel(const struct pl330_info *pi) +static struct pl330_thread *pl330_request_channel(const struct pl330_info *pi) { struct pl330_thread *thrd = NULL; struct pl330_dmac *pl330; @@ -1848,9 +1847,8 @@ static inline void _free_event(struct pl330_thread *thrd, int ev) pl330->events[ev] = -1; } -static void pl330_release_channel(void *ch_id) +static void pl330_release_channel(struct pl330_thread *thrd) { - struct pl330_thread *thrd = ch_id; struct pl330_dmac *pl330; unsigned long flags; @@ -2077,7 +2075,7 @@ static int dmac_free_threads(struct pl330_dmac *pl330) /* Release Channel threads */ for (i = 0; i < chans; i++) { thrd = &pl330->channels[i]; - pl330_release_channel((void *)thrd); + pl330_release_channel(thrd); } /* Free memory */ @@ -2146,8 +2144,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) if (desc->status == BUSY) continue; - ret = pl330_submit_req(pch->pl330_chid, - &desc->req); + ret = pl330_submit_req(pch->thread, &desc->req); if (!ret) { desc->status = BUSY; } else if (ret == -EAGAIN) { @@ -2183,7 +2180,7 @@ static void pl330_tasklet(unsigned long data) fill_queue(pch); /* Make sure the PL330 Channel thread is active */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_START); + pl330_chan_ctrl(pch->thread, PL330_OP_START); while (!list_empty(&pch->completed_list)) { dma_async_tx_callback callback; @@ -2254,8 +2251,8 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) dma_cookie_init(chan); pch->cyclic = false; - pch->pl330_chid = pl330_request_channel(&pdmac->pif); - if (!pch->pl330_chid) { + pch->thread = pl330_request_channel(&pdmac->pif); + if (!pch->thread) { spin_unlock_irqrestore(&pch->lock, flags); return -ENOMEM; } @@ -2281,7 +2278,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned spin_lock_irqsave(&pch->lock, flags); /* FLUSH the PL330 Channel thread */ - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); + pl330_chan_ctrl(pch->thread, PL330_OP_FLUSH); /* Mark all desc done */ list_for_each_entry(desc, &pch->submitted_list, node) { @@ -2340,8 +2337,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan) spin_lock_irqsave(&pch->lock, flags); - pl330_release_channel(pch->pl330_chid); - pch->pl330_chid = NULL; + pl330_release_channel(pch->thread); + pch->thread = NULL; if (pch->cyclic) list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); @@ -2887,7 +2884,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) INIT_LIST_HEAD(&pch->work_list); INIT_LIST_HEAD(&pch->completed_list); spin_lock_init(&pch->lock); - pch->pl330_chid = NULL; + pch->thread = NULL; pch->chan.device = pd; pch->dmac = pdmac; -- cgit v1.2.3-58-ga151 From 1b446d2a61c436f7ad542fcdb212d4e51845ac28 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:27 +0200 Subject: dmaengine: pl330: Remove uneccessary ccr validation We know that we do not create invalid ccr settings in this driver. There is no need to validate them. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 23 ----------------------- 1 file changed, 23 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index f0e9f47e7dc6..0209823ca77f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1420,21 +1420,6 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) return ccr; } -static inline bool _is_valid(u32 ccr) -{ - enum pl330_cachectrl dcctl; - enum pl330_cachectrl scctl; - - dcctl = (ccr >> CC_DSTCCTRL_SHFT) & CC_DRCCCTRL_MASK; - scctl = (ccr >> CC_SRCCCTRL_SHFT) & CC_SRCCCTRL_MASK; - - if (dcctl == INVALID1 || dcctl == INVALID2 - || scctl == INVALID1 || scctl == INVALID2) - return false; - else - return true; -} - /* * Submit a list of xfers after which the client wants notification. * Client is not notified after each xfer unit, just once after all @@ -1495,14 +1480,6 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) ccr = readl(regs + CC(thrd->id)); } - /* If this req doesn't have valid xfer settings */ - if (!_is_valid(ccr)) { - ret = -EINVAL; - dev_info(thrd->dmac->pinfo->dev, "%s:%d Invalid CCR(%x)!\n", - __func__, __LINE__, ccr); - goto xfer_exit; - } - idx = IS_FREE(&thrd->req[0]) ? 0 : 1; xs.ccr = ccr; -- cgit v1.2.3-58-ga151 From fbbcd9be96a0295e9d127e124f72fa0039f53d8e Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:28 +0200 Subject: dmaengine: pl330: Simplify is_manager() Since we keep a pointer to the manager thread it is fairly easy to check if a thread is the manager thread. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 0209823ca77f..c5eeb64dce6f 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -562,13 +562,7 @@ static inline bool _queue_full(struct pl330_thread *thrd) static inline bool is_manager(struct pl330_thread *thrd) { - struct pl330_dmac *pl330 = thrd->dmac; - - /* MANAGER is indexed at the end */ - if (thrd->id == pl330->pinfo->pcfg.num_chan) - return true; - else - return false; + return thrd->dmac->manager == thrd; } /* If manager of the thread is in Non-Secure mode */ -- cgit v1.2.3-58-ga151 From f6f2421c0a1cb6caffc85b13ab8f9bdd8f8278c3 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:29 +0200 Subject: dmaengine: pl330: Merge dma_pl330_dmac and pl330_dmac structs Both the dma_pl330_dmac and the pl330_dmac struct have the same lifetime and the separation of them is a relict of this having been two different drivers in the past. Merging them into one struct makes the code a bit simpler as it for example allows to remove the pointers going back and forth between the two structs. While we are at it also directly embed the pl330_info struct into the pl330_dmac struct as this allows to remove some more redundant fields. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 472 +++++++++++++++++++++------------------------------- 1 file changed, 192 insertions(+), 280 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index c5eeb64dce6f..e5fe9c764f53 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -282,20 +282,6 @@ struct pl330_config { u32 irq_ns; }; -/* Handle to the DMAC provided to the PL330 core */ -struct pl330_info { - /* Owning device */ - struct device *dev; - /* Size of MicroCode buffers for each channel. */ - unsigned mcbufsz; - /* ioremap'ed address of PL330 registers. */ - void __iomem *base; - /* PL330 core data, Client must not touch it. */ - void *pl330_data; - /* Populated by the PL330 core driver during pl330_add */ - struct pl330_config pcfg; -}; - /** * Request Configuration. * The PL330 core does not modify this and uses the last @@ -426,30 +412,6 @@ enum pl330_dmac_state { DYING, }; -/* A DMAC */ -struct pl330_dmac { - spinlock_t lock; - /* Holds list of reqs with due callbacks */ - struct list_head req_done; - /* Pointer to platform specific stuff */ - struct pl330_info *pinfo; - /* Maximum possible events/irqs */ - int events[32]; - /* BUS address of MicroCode buffer */ - dma_addr_t mcode_bus; - /* CPU address of MicroCode buffer */ - void *mcode_cpu; - /* List of all Channel threads */ - struct pl330_thread *channels; - /* Pointer to the MANAGER thread */ - struct pl330_thread *manager; - /* To handle bad news in interrupt */ - struct tasklet_struct tasks; - struct _pl330_tbd dmac_tbd; - /* State of DMAC operation */ - enum pl330_dmac_state state; -}; - enum desc_status { /* In the DMAC pool */ FREE, @@ -490,7 +452,7 @@ struct dma_pl330_chan { * As the parent, this DMAC also provides descriptors * to the channel. */ - struct dma_pl330_dmac *dmac; + struct pl330_dmac *dmac; /* To protect channel manipulation */ spinlock_t lock; @@ -510,9 +472,7 @@ struct dma_pl330_chan { bool cyclic; }; -struct dma_pl330_dmac { - struct pl330_info pif; - +struct pl330_dmac { /* DMA-Engine Device */ struct dma_device ddma; @@ -524,6 +484,32 @@ struct dma_pl330_dmac { /* To protect desc_pool manipulation */ spinlock_t pool_lock; + /* Size of MicroCode buffers for each channel. */ + unsigned mcbufsz; + /* ioremap'ed address of PL330 registers. */ + void __iomem *base; + /* Populated by the PL330 core driver during pl330_add */ + struct pl330_config pcfg; + + spinlock_t lock; + /* Maximum possible events/irqs */ + int events[32]; + /* BUS address of MicroCode buffer */ + dma_addr_t mcode_bus; + /* CPU address of MicroCode buffer */ + void *mcode_cpu; + /* List of all Channel threads */ + struct pl330_thread *channels; + /* Pointer to the MANAGER thread */ + struct pl330_thread *manager; + /* To handle bad news in interrupt */ + struct tasklet_struct tasks; + struct _pl330_tbd dmac_tbd; + /* State of DMAC operation */ + enum pl330_dmac_state state; + /* Holds list of reqs with due callbacks */ + struct list_head req_done; + /* Peripheral channels connected to this DMAC */ unsigned int num_peripherals; struct dma_pl330_chan *peripherals; /* keep at end */ @@ -568,9 +554,7 @@ static inline bool is_manager(struct pl330_thread *thrd) /* If manager of the thread is in Non-Secure mode */ static inline bool _manager_ns(struct pl330_thread *thrd) { - struct pl330_dmac *pl330 = thrd->dmac; - - return (pl330->pinfo->pcfg.mode & DMAC_MODE_NS) ? true : false; + return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; } static inline u32 get_revision(u32 periph_id) @@ -928,7 +912,7 @@ static inline u32 _emit_GO(unsigned dry_run, u8 buf[], /* Returns Time-Out */ static bool _until_dmac_idle(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; unsigned long loops = msecs_to_loops(5); do { @@ -948,7 +932,7 @@ static bool _until_dmac_idle(struct pl330_thread *thrd) static inline void _execute_DBGINSN(struct pl330_thread *thrd, u8 insn[], bool as_manager) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u32 val; val = (insn[0] << 16) | (insn[1] << 24); @@ -963,7 +947,7 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, /* If timed out due to halted state-machine */ if (_until_dmac_idle(thrd)) { - dev_err(thrd->dmac->pinfo->dev, "DMAC halted!\n"); + dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); return; } @@ -988,7 +972,7 @@ static void mark_free(struct pl330_thread *thrd, int idx) static inline u32 _state(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u32 val; if (is_manager(thrd)) @@ -1046,7 +1030,7 @@ static inline u32 _state(struct pl330_thread *thrd) static void _stop(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; u8 insn[6] = {0, 0, 0, 0, 0, 0}; if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) @@ -1069,7 +1053,7 @@ static void _stop(struct pl330_thread *thrd) /* Start doing req 'idx' of thread 'thrd' */ static bool _trigger(struct pl330_thread *thrd) { - void __iomem *regs = thrd->dmac->pinfo->base; + void __iomem *regs = thrd->dmac->base; struct _pl330_req *req; struct pl330_req *r; struct _arg_GO go; @@ -1107,7 +1091,7 @@ static bool _trigger(struct pl330_thread *thrd) /* See 'Abort Sources' point-4 at Page 2-25 */ if (_manager_ns(thrd) && !ns) - dev_info(thrd->dmac->pinfo->dev, "%s:%d Recipe for ABORT!\n", + dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", __func__, __LINE__); go.chan = thrd->id; @@ -1421,8 +1405,7 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) */ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) { - struct pl330_dmac *pl330; - struct pl330_info *pi; + struct pl330_dmac *pl330 = thrd->dmac; struct _xfer_spec xs; unsigned long flags; void __iomem *regs; @@ -1434,20 +1417,18 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) if (!r || !thrd || thrd->free) return -EINVAL; - pl330 = thrd->dmac; - pi = pl330->pinfo; - regs = pi->base; + regs = thrd->dmac->base; if (pl330->state == DYING || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { - dev_info(thrd->dmac->pinfo->dev, "%s:%d\n", + dev_info(thrd->dmac->ddma.dev, "%s:%d\n", __func__, __LINE__); return -EAGAIN; } /* If request for non-existing peripheral */ - if (r->rqtype != DMA_MEM_TO_MEM && r->peri >= pi->pcfg.num_peri) { - dev_info(thrd->dmac->pinfo->dev, + if (r->rqtype != DMA_MEM_TO_MEM && r->peri >= pl330->pcfg.num_peri) { + dev_info(thrd->dmac->ddma.dev, "%s:%d Invalid peripheral(%u)!\n", __func__, __LINE__, r->peri); return -EINVAL; @@ -1484,9 +1465,8 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) if (ret < 0) goto xfer_exit; - if (ret > pi->mcbufsz / 2) { - dev_info(thrd->dmac->pinfo->dev, - "%s:%d Trying increasing mcbufsz\n", + if (ret > pl330->mcbufsz / 2) { + dev_info(pl330->ddma.dev, "%s:%d Trying increasing mcbufsz\n", __func__, __LINE__); ret = -ENOMEM; goto xfer_exit; @@ -1527,7 +1507,6 @@ static void dma_pl330_rqcb(struct pl330_req *req, enum pl330_op_err err) static void pl330_dotask(unsigned long data) { struct pl330_dmac *pl330 = (struct pl330_dmac *) data; - struct pl330_info *pi = pl330->pinfo; unsigned long flags; int i; @@ -1545,16 +1524,16 @@ static void pl330_dotask(unsigned long data) if (pl330->dmac_tbd.reset_mngr) { _stop(pl330->manager); /* Reset all channels */ - pl330->dmac_tbd.reset_chan = (1 << pi->pcfg.num_chan) - 1; + pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1; /* Clear the reset flag */ pl330->dmac_tbd.reset_mngr = false; } - for (i = 0; i < pi->pcfg.num_chan; i++) { + for (i = 0; i < pl330->pcfg.num_chan; i++) { if (pl330->dmac_tbd.reset_chan & (1 << i)) { struct pl330_thread *thrd = &pl330->channels[i]; - void __iomem *regs = pi->base; + void __iomem *regs = pl330->base; enum pl330_op_err err; _stop(thrd); @@ -1585,20 +1564,15 @@ static void pl330_dotask(unsigned long data) } /* Returns 1 if state was updated, 0 otherwise */ -static int pl330_update(const struct pl330_info *pi) +static int pl330_update(struct pl330_dmac *pl330) { struct pl330_req *rqdone, *tmp; - struct pl330_dmac *pl330; unsigned long flags; void __iomem *regs; u32 val; int id, ev, ret = 0; - if (!pi || !pi->pl330_data) - return 0; - - regs = pi->base; - pl330 = pi->pl330_data; + regs = pl330->base; spin_lock_irqsave(&pl330->lock, flags); @@ -1608,13 +1582,13 @@ static int pl330_update(const struct pl330_info *pi) else pl330->dmac_tbd.reset_mngr = false; - val = readl(regs + FSC) & ((1 << pi->pcfg.num_chan) - 1); + val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1); pl330->dmac_tbd.reset_chan |= val; if (val) { int i = 0; - while (i < pi->pcfg.num_chan) { + while (i < pl330->pcfg.num_chan) { if (val & (1 << i)) { - dev_info(pi->dev, + dev_info(pl330->ddma.dev, "Reset Channel-%d\t CS-%x FTC-%x\n", i, readl(regs + CS(i)), readl(regs + FTC(i))); @@ -1626,15 +1600,16 @@ static int pl330_update(const struct pl330_info *pi) /* Check which event happened i.e, thread notified */ val = readl(regs + ES); - if (pi->pcfg.num_events < 32 - && val & ~((1 << pi->pcfg.num_events) - 1)) { + if (pl330->pcfg.num_events < 32 + && val & ~((1 << pl330->pcfg.num_events) - 1)) { pl330->dmac_tbd.reset_dmac = true; - dev_err(pi->dev, "%s:%d Unexpected!\n", __func__, __LINE__); + dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__, + __LINE__); ret = 1; goto updt_exit; } - for (ev = 0; ev < pi->pcfg.num_events; ev++) { + for (ev = 0; ev < pl330->pcfg.num_events; ev++) { if (val & (1 << ev)) { /* Event occurred */ struct pl330_thread *thrd; u32 inten = readl(regs + INTEN); @@ -1744,10 +1719,9 @@ static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) static inline int _alloc_event(struct pl330_thread *thrd) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; int ev; - for (ev = 0; ev < pi->pcfg.num_events; ev++) + for (ev = 0; ev < pl330->pcfg.num_events; ev++) if (pl330->events[ev] == -1) { pl330->events[ev] = thrd->id; return ev; @@ -1756,37 +1730,31 @@ static inline int _alloc_event(struct pl330_thread *thrd) return -1; } -static bool _chan_ns(const struct pl330_info *pi, int i) +static bool _chan_ns(const struct pl330_dmac *pl330, int i) { - return pi->pcfg.irq_ns & (1 << i); + return pl330->pcfg.irq_ns & (1 << i); } /* Upon success, returns IdentityToken for the * allocated channel, NULL otherwise. */ -static struct pl330_thread *pl330_request_channel(const struct pl330_info *pi) +static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) { struct pl330_thread *thrd = NULL; - struct pl330_dmac *pl330; unsigned long flags; int chans, i; - if (!pi || !pi->pl330_data) - return NULL; - - pl330 = pi->pl330_data; - if (pl330->state == DYING) return NULL; - chans = pi->pcfg.num_chan; + chans = pl330->pcfg.num_chan; spin_lock_irqsave(&pl330->lock, flags); for (i = 0; i < chans; i++) { thrd = &pl330->channels[i]; if ((thrd->free) && (!_manager_ns(thrd) || - _chan_ns(pi, i))) { + _chan_ns(pl330, i))) { thrd->ev = _alloc_event(thrd); if (thrd->ev >= 0) { thrd->free = false; @@ -1810,10 +1778,9 @@ static struct pl330_thread *pl330_request_channel(const struct pl330_info *pi) static inline void _free_event(struct pl330_thread *thrd, int ev) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; /* If the event is valid and was held by the thread */ - if (ev >= 0 && ev < pi->pcfg.num_events + if (ev >= 0 && ev < pl330->pcfg.num_events && pl330->events[ev] == thrd->id) pl330->events[ev] = -1; } @@ -1842,72 +1809,70 @@ static void pl330_release_channel(struct pl330_thread *thrd) /* Initialize the structure for PL330 configuration, that can be used * by the client driver the make best use of the DMAC */ -static void read_dmac_config(struct pl330_info *pi) +static void read_dmac_config(struct pl330_dmac *pl330) { - void __iomem *regs = pi->base; + void __iomem *regs = pl330->base; u32 val; val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; val &= CRD_DATA_WIDTH_MASK; - pi->pcfg.data_bus_width = 8 * (1 << val); + pl330->pcfg.data_bus_width = 8 * (1 << val); val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; val &= CRD_DATA_BUFF_MASK; - pi->pcfg.data_buf_dep = val + 1; + pl330->pcfg.data_buf_dep = val + 1; val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; val &= CR0_NUM_CHANS_MASK; val += 1; - pi->pcfg.num_chan = val; + pl330->pcfg.num_chan = val; val = readl(regs + CR0); if (val & CR0_PERIPH_REQ_SET) { val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; val += 1; - pi->pcfg.num_peri = val; - pi->pcfg.peri_ns = readl(regs + CR4); + pl330->pcfg.num_peri = val; + pl330->pcfg.peri_ns = readl(regs + CR4); } else { - pi->pcfg.num_peri = 0; + pl330->pcfg.num_peri = 0; } val = readl(regs + CR0); if (val & CR0_BOOT_MAN_NS) - pi->pcfg.mode |= DMAC_MODE_NS; + pl330->pcfg.mode |= DMAC_MODE_NS; else - pi->pcfg.mode &= ~DMAC_MODE_NS; + pl330->pcfg.mode &= ~DMAC_MODE_NS; val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; val &= CR0_NUM_EVENTS_MASK; val += 1; - pi->pcfg.num_events = val; + pl330->pcfg.num_events = val; - pi->pcfg.irq_ns = readl(regs + CR3); + pl330->pcfg.irq_ns = readl(regs + CR3); } static inline void _reset_thread(struct pl330_thread *thrd) { struct pl330_dmac *pl330 = thrd->dmac; - struct pl330_info *pi = pl330->pinfo; thrd->req[0].mc_cpu = pl330->mcode_cpu - + (thrd->id * pi->mcbufsz); + + (thrd->id * pl330->mcbufsz); thrd->req[0].mc_bus = pl330->mcode_bus - + (thrd->id * pi->mcbufsz); + + (thrd->id * pl330->mcbufsz); thrd->req[0].r = NULL; mark_free(thrd, 0); thrd->req[1].mc_cpu = thrd->req[0].mc_cpu - + pi->mcbufsz / 2; + + pl330->mcbufsz / 2; thrd->req[1].mc_bus = thrd->req[0].mc_bus - + pi->mcbufsz / 2; + + pl330->mcbufsz / 2; thrd->req[1].r = NULL; mark_free(thrd, 1); } static int dmac_alloc_threads(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; + int chans = pl330->pcfg.num_chan; struct pl330_thread *thrd; int i; @@ -1938,29 +1903,28 @@ static int dmac_alloc_threads(struct pl330_dmac *pl330) static int dmac_alloc_resources(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; + int chans = pl330->pcfg.num_chan; int ret; /* * Alloc MicroCode buffer for 'chans' Channel threads. * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) */ - pl330->mcode_cpu = dma_alloc_coherent(pi->dev, - chans * pi->mcbufsz, + pl330->mcode_cpu = dma_alloc_coherent(pl330->ddma.dev, + chans * pl330->mcbufsz, &pl330->mcode_bus, GFP_KERNEL); if (!pl330->mcode_cpu) { - dev_err(pi->dev, "%s:%d Can't allocate memory!\n", + dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", __func__, __LINE__); return -ENOMEM; } ret = dmac_alloc_threads(pl330); if (ret) { - dev_err(pi->dev, "%s:%d Can't to create channels for DMAC!\n", + dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", __func__, __LINE__); - dma_free_coherent(pi->dev, - chans * pi->mcbufsz, + dma_free_coherent(pl330->ddma.dev, + chans * pl330->mcbufsz, pl330->mcode_cpu, pl330->mcode_bus); return ret; } @@ -1968,64 +1932,45 @@ static int dmac_alloc_resources(struct pl330_dmac *pl330) return 0; } -static int pl330_add(struct pl330_info *pi) +static int pl330_add(struct pl330_dmac *pl330) { - struct pl330_dmac *pl330; void __iomem *regs; int i, ret; - if (!pi || !pi->dev) - return -EINVAL; - - /* If already added */ - if (pi->pl330_data) - return -EINVAL; - - regs = pi->base; + regs = pl330->base; /* Check if we can handle this DMAC */ - if ((pi->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { - dev_err(pi->dev, "PERIPH_ID 0x%x !\n", pi->pcfg.periph_id); + if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { + dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", + pl330->pcfg.periph_id); return -EINVAL; } /* Read the configuration of the DMAC */ - read_dmac_config(pi); + read_dmac_config(pl330); - if (pi->pcfg.num_events == 0) { - dev_err(pi->dev, "%s:%d Can't work without events!\n", + if (pl330->pcfg.num_events == 0) { + dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n", __func__, __LINE__); return -EINVAL; } - pl330 = kzalloc(sizeof(*pl330), GFP_KERNEL); - if (!pl330) { - dev_err(pi->dev, "%s:%d Can't allocate memory!\n", - __func__, __LINE__); - return -ENOMEM; - } - - /* Assign the info structure and private data */ - pl330->pinfo = pi; - pi->pl330_data = pl330; - spin_lock_init(&pl330->lock); INIT_LIST_HEAD(&pl330->req_done); /* Use default MC buffer size if not provided */ - if (!pi->mcbufsz) - pi->mcbufsz = MCODE_BUFF_PER_REQ * 2; + if (!pl330->mcbufsz) + pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2; /* Mark all events as free */ - for (i = 0; i < pi->pcfg.num_events; i++) + for (i = 0; i < pl330->pcfg.num_events; i++) pl330->events[i] = -1; /* Allocate resources needed by the DMAC */ ret = dmac_alloc_resources(pl330); if (ret) { - dev_err(pi->dev, "Unable to create channels for DMAC\n"); - kfree(pl330); + dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n"); return ret; } @@ -2038,13 +1983,11 @@ static int pl330_add(struct pl330_info *pi) static int dmac_free_threads(struct pl330_dmac *pl330) { - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; struct pl330_thread *thrd; int i; /* Release Channel threads */ - for (i = 0; i < chans; i++) { + for (i = 0; i < pl330->pcfg.num_chan; i++) { thrd = &pl330->channels[i]; pl330_release_channel(thrd); } @@ -2055,35 +1998,18 @@ static int dmac_free_threads(struct pl330_dmac *pl330) return 0; } -static void dmac_free_resources(struct pl330_dmac *pl330) -{ - struct pl330_info *pi = pl330->pinfo; - int chans = pi->pcfg.num_chan; - - dmac_free_threads(pl330); - - dma_free_coherent(pi->dev, chans * pi->mcbufsz, - pl330->mcode_cpu, pl330->mcode_bus); -} - -static void pl330_del(struct pl330_info *pi) +static void pl330_del(struct pl330_dmac *pl330) { - struct pl330_dmac *pl330; - - if (!pi || !pi->pl330_data) - return; - - pl330 = pi->pl330_data; - pl330->state = UNINIT; tasklet_kill(&pl330->tasks); /* Free DMAC resources */ - dmac_free_resources(pl330); + dmac_free_threads(pl330); - kfree(pl330); - pi->pl330_data = NULL; + dma_free_coherent(pl330->ddma.dev, + pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, + pl330->mcode_bus); } /* forward declaration */ @@ -2124,7 +2050,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) } else { /* Unacceptable request */ desc->status = DONE; - dev_err(pch->dmac->pif.dev, "%s:%d Bad Desc(%d)\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", __func__, __LINE__, desc->txd.cookie); tasklet_schedule(&pch->task); } @@ -2198,23 +2124,26 @@ static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { int count = dma_spec->args_count; - struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; + struct pl330_dmac *pl330 = ofdma->of_dma_data; unsigned int chan_id; + if (!pl330) + return NULL; + if (count != 1) return NULL; chan_id = dma_spec->args[0]; - if (chan_id >= pdmac->num_peripherals) + if (chan_id >= pl330->num_peripherals) return NULL; - return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan); + return dma_get_slave_channel(&pl330->peripherals[chan_id].chan); } static int pl330_alloc_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; unsigned long flags; spin_lock_irqsave(&pch->lock, flags); @@ -2222,7 +2151,7 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) dma_cookie_init(chan); pch->cyclic = false; - pch->thread = pl330_request_channel(&pdmac->pif); + pch->thread = pl330_request_channel(pl330); if (!pch->thread) { spin_unlock_irqrestore(&pch->lock, flags); return -ENOMEM; @@ -2240,7 +2169,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_desc *desc; unsigned long flags; - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; struct dma_slave_config *slave_config; LIST_HEAD(list); @@ -2267,9 +2196,9 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned dma_cookie_complete(&desc->txd); } - list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool); - list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); - list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); + list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); + list_splice_tail_init(&pch->work_list, &pl330->desc_pool); + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); spin_unlock_irqrestore(&pch->lock, flags); break; case DMA_SLAVE_CONFIG: @@ -2292,7 +2221,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned } break; default: - dev_err(pch->dmac->pif.dev, "Not supported command.\n"); + dev_err(pch->dmac->ddma.dev, "Not supported command.\n"); return -ENXIO; } @@ -2383,44 +2312,37 @@ static inline void _init_desc(struct dma_pl330_desc *desc) } /* Returns the number of descriptors added to the DMAC pool */ -static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) +static int add_desc(struct pl330_dmac *pl330, gfp_t flg, int count) { struct dma_pl330_desc *desc; unsigned long flags; int i; - if (!pdmac) - return 0; - desc = kcalloc(count, sizeof(*desc), flg); if (!desc) return 0; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); for (i = 0; i < count; i++) { _init_desc(&desc[i]); - list_add_tail(&desc[i].node, &pdmac->desc_pool); + list_add_tail(&desc[i].node, &pl330->desc_pool); } - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return count; } -static struct dma_pl330_desc * -pluck_desc(struct dma_pl330_dmac *pdmac) +static struct dma_pl330_desc *pluck_desc(struct pl330_dmac *pl330) { struct dma_pl330_desc *desc = NULL; unsigned long flags; - if (!pdmac) - return NULL; + spin_lock_irqsave(&pl330->pool_lock, flags); - spin_lock_irqsave(&pdmac->pool_lock, flags); - - if (!list_empty(&pdmac->desc_pool)) { - desc = list_entry(pdmac->desc_pool.next, + if (!list_empty(&pl330->desc_pool)) { + desc = list_entry(pl330->desc_pool.next, struct dma_pl330_desc, node); list_del_init(&desc->node); @@ -2429,29 +2351,29 @@ pluck_desc(struct dma_pl330_dmac *pdmac) desc->txd.callback = NULL; } - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return desc; } static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) { - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; u8 *peri_id = pch->chan.private; struct dma_pl330_desc *desc; /* Pluck one desc from the pool of DMAC */ - desc = pluck_desc(pdmac); + desc = pluck_desc(pl330); /* If the DMAC pool is empty, alloc new */ if (!desc) { - if (!add_desc(pdmac, GFP_ATOMIC, 1)) + if (!add_desc(pl330, GFP_ATOMIC, 1)) return NULL; /* Try again */ - desc = pluck_desc(pdmac); + desc = pluck_desc(pl330); if (!desc) { - dev_err(pch->dmac->pif.dev, + dev_err(pch->dmac->ddma.dev, "%s:%d ALERT!\n", __func__, __LINE__); return NULL; } @@ -2463,7 +2385,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) async_tx_ack(&desc->txd); desc->req.peri = peri_id ? pch->chan.chan_id : 0; - desc->rqcfg.pcfg = &pch->dmac->pif.pcfg; + desc->rqcfg.pcfg = &pch->dmac->pcfg; dma_async_tx_descriptor_init(&desc->txd, &pch->chan); @@ -2485,7 +2407,7 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, struct dma_pl330_desc *desc = pl330_get_desc(pch); if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); return NULL; } @@ -2509,11 +2431,11 @@ __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) { struct dma_pl330_chan *pch = desc->pchan; - struct pl330_info *pi = &pch->dmac->pif; + struct pl330_dmac *pl330 = pch->dmac; int burst_len; - burst_len = pi->pcfg.data_bus_width / 8; - burst_len *= pi->pcfg.data_buf_dep; + burst_len = pl330->pcfg.data_bus_width / 8; + burst_len *= pl330->pcfg.data_buf_dep; burst_len >>= desc->rqcfg.brst_size; /* src/dst_burst_len can't be more than 16 */ @@ -2536,7 +2458,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( { struct dma_pl330_desc *desc = NULL, *first = NULL; struct dma_pl330_chan *pch = to_pchan(chan); - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; unsigned int i; dma_addr_t dst; dma_addr_t src; @@ -2545,7 +2467,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( return NULL; if (!is_slave_direction(direction)) { - dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", __func__, __LINE__); return NULL; } @@ -2553,23 +2475,23 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( for (i = 0; i < len / period_len; i++) { desc = pl330_get_desc(pch); if (!desc) { - dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); if (!first) return NULL; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); while (!list_empty(&first->node)) { desc = list_entry(first->node.next, struct dma_pl330_desc, node); - list_move_tail(&desc->node, &pdmac->desc_pool); + list_move_tail(&desc->node, &pl330->desc_pool); } - list_move_tail(&first->node, &pdmac->desc_pool); + list_move_tail(&first->node, &pl330->desc_pool); - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); return NULL; } @@ -2619,14 +2541,12 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, { struct dma_pl330_desc *desc; struct dma_pl330_chan *pch = to_pchan(chan); - struct pl330_info *pi; + struct pl330_dmac *pl330 = pch->dmac; int burst; if (unlikely(!pch || !len)) return NULL; - pi = &pch->dmac->pif; - desc = __pl330_prep_dma_memcpy(pch, dst, src, len); if (!desc) return NULL; @@ -2636,7 +2556,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->req.rqtype = DMA_MEM_TO_MEM; /* Select max possible burst size */ - burst = pi->pcfg.data_bus_width / 8; + burst = pl330->pcfg.data_bus_width / 8; while (burst > 1) { if (!(len % burst)) @@ -2655,7 +2575,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, return &desc->txd; } -static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, +static void __pl330_giveback_desc(struct pl330_dmac *pl330, struct dma_pl330_desc *first) { unsigned long flags; @@ -2664,17 +2584,17 @@ static void __pl330_giveback_desc(struct dma_pl330_dmac *pdmac, if (!first) return; - spin_lock_irqsave(&pdmac->pool_lock, flags); + spin_lock_irqsave(&pl330->pool_lock, flags); while (!list_empty(&first->node)) { desc = list_entry(first->node.next, struct dma_pl330_desc, node); - list_move_tail(&desc->node, &pdmac->desc_pool); + list_move_tail(&desc->node, &pl330->desc_pool); } - list_move_tail(&first->node, &pdmac->desc_pool); + list_move_tail(&first->node, &pl330->desc_pool); - spin_unlock_irqrestore(&pdmac->pool_lock, flags); + spin_unlock_irqrestore(&pl330->pool_lock, flags); } static struct dma_async_tx_descriptor * @@ -2699,12 +2619,12 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc = pl330_get_desc(pch); if (!desc) { - struct dma_pl330_dmac *pdmac = pch->dmac; + struct pl330_dmac *pl330 = pch->dmac; - dev_err(pch->dmac->pif.dev, + dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __func__, __LINE__); - __pl330_giveback_desc(pdmac, first); + __pl330_giveback_desc(pl330, first); return NULL; } @@ -2768,9 +2688,9 @@ static int pl330_probe(struct amba_device *adev, const struct amba_id *id) { struct dma_pl330_platdata *pdat; - struct dma_pl330_dmac *pdmac; + struct pl330_config *pcfg; + struct pl330_dmac *pl330; struct dma_pl330_chan *pch, *_p; - struct pl330_info *pi; struct dma_device *pd; struct resource *res; int i, ret, irq; @@ -2783,30 +2703,27 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) return ret; /* Allocate a new DMAC and its Channels */ - pdmac = devm_kzalloc(&adev->dev, sizeof(*pdmac), GFP_KERNEL); - if (!pdmac) { + pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); + if (!pl330) { dev_err(&adev->dev, "unable to allocate mem\n"); return -ENOMEM; } - pi = &pdmac->pif; - pi->dev = &adev->dev; - pi->pl330_data = NULL; - pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; + pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; - pi->base = devm_ioremap_resource(&adev->dev, res); - if (IS_ERR(pi->base)) - return PTR_ERR(pi->base); + pl330->base = devm_ioremap_resource(&adev->dev, res); + if (IS_ERR(pl330->base)) + return PTR_ERR(pl330->base); - amba_set_drvdata(adev, pdmac); + amba_set_drvdata(adev, pl330); for (i = 0; i < AMBA_NR_IRQS; i++) { irq = adev->irq[i]; if (irq) { ret = devm_request_irq(&adev->dev, irq, pl330_irq_handler, 0, - dev_name(&adev->dev), pi); + dev_name(&adev->dev), pl330); if (ret) return ret; } else { @@ -2814,38 +2731,40 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) } } - pi->pcfg.periph_id = adev->periphid; - ret = pl330_add(pi); + pcfg = &pl330->pcfg; + + pcfg->periph_id = adev->periphid; + ret = pl330_add(pl330); if (ret) return ret; - INIT_LIST_HEAD(&pdmac->desc_pool); - spin_lock_init(&pdmac->pool_lock); + INIT_LIST_HEAD(&pl330->desc_pool); + spin_lock_init(&pl330->pool_lock); /* Create a descriptor pool of default size */ - if (!add_desc(pdmac, GFP_KERNEL, NR_DEFAULT_DESC)) + if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) dev_warn(&adev->dev, "unable to allocate desc\n"); - pd = &pdmac->ddma; + pd = &pl330->ddma; INIT_LIST_HEAD(&pd->channels); /* Initialize channel parameters */ if (pdat) - num_chan = max_t(int, pdat->nr_valid_peri, pi->pcfg.num_chan); + num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan); else - num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); + num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); - pdmac->num_peripherals = num_chan; + pl330->num_peripherals = num_chan; - pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); - if (!pdmac->peripherals) { + pl330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); + if (!pl330->peripherals) { ret = -ENOMEM; - dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); + dev_err(&adev->dev, "unable to allocate pl330->peripherals\n"); goto probe_err2; } for (i = 0; i < num_chan; i++) { - pch = &pdmac->peripherals[i]; + pch = &pl330->peripherals[i]; if (!adev->dev.of_node) pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; else @@ -2857,7 +2776,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) spin_lock_init(&pch->lock); pch->thread = NULL; pch->chan.device = pd; - pch->dmac = pdmac; + pch->dmac = pl330; /* Add the channel to the DMAC list */ list_add_tail(&pch->chan.device_node, &pd->channels); @@ -2868,7 +2787,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->cap_mask = pdat->cap_mask; } else { dma_cap_set(DMA_MEMCPY, pd->cap_mask); - if (pi->pcfg.num_peri) { + if (pcfg->num_peri) { dma_cap_set(DMA_SLAVE, pd->cap_mask); dma_cap_set(DMA_CYCLIC, pd->cap_mask); dma_cap_set(DMA_PRIVATE, pd->cap_mask); @@ -2893,14 +2812,14 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) if (adev->dev.of_node) { ret = of_dma_controller_register(adev->dev.of_node, - of_dma_pl330_xlate, pdmac); + of_dma_pl330_xlate, pl330); if (ret) { dev_err(&adev->dev, "unable to register DMA to the generic DT DMA helpers\n"); } } - adev->dev.dma_parms = &pdmac->dma_parms; + adev->dev.dma_parms = &pl330->dma_parms; /* * This is the limit for transfers with a buswidth of 1, larger @@ -2915,14 +2834,13 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) "Loaded driver for PL330 DMAC-%d\n", adev->periphid); dev_info(&adev->dev, "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", - pi->pcfg.data_buf_dep, - pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan, - pi->pcfg.num_peri, pi->pcfg.num_events); + pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, + pcfg->num_peri, pcfg->num_events); return 0; probe_err3: /* Idle the DMAC */ - list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, chan.device_node) { /* Remove the channel */ @@ -2933,27 +2851,23 @@ probe_err3: pl330_free_chan_resources(&pch->chan); } probe_err2: - pl330_del(pi); + pl330_del(pl330); return ret; } static int pl330_remove(struct amba_device *adev) { - struct dma_pl330_dmac *pdmac = amba_get_drvdata(adev); + struct pl330_dmac *pl330 = amba_get_drvdata(adev); struct dma_pl330_chan *pch, *_p; - struct pl330_info *pi; - - if (!pdmac) - return 0; if (adev->dev.of_node) of_dma_controller_free(adev->dev.of_node); - dma_async_device_unregister(&pdmac->ddma); + dma_async_device_unregister(&pl330->ddma); /* Idle the DMAC */ - list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels, + list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, chan.device_node) { /* Remove the channel */ @@ -2964,9 +2878,7 @@ static int pl330_remove(struct amba_device *adev) pl330_free_chan_resources(&pch->chan); } - pi = &pdmac->pif; - - pl330_del(pi); + pl330_del(pl330); return 0; } -- cgit v1.2.3-58-ga151 From 9dc5a315fe515e92f40c387ae15f8b760568834e Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:30 +0200 Subject: dmaengine: pl330: Embed pl330_req directly into dma_pl330_desc The pl330_req struct is embedded into the dma_pl330_desc struct. But half of the pl330_req struct are pointers to other fields of the dma_pl330_desc struct it is embedded to. By directly embedding the fields from the pl330_req struct into the dma_pl330_desc struct and reworking the code to work with the dma_pl330_desc struct those pointers can be eliminated. This slightly simplifies the code. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 159 +++++++++++++++++++++++----------------------------- 1 file changed, 69 insertions(+), 90 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index e5fe9c764f53..b31c6c380158 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -332,19 +332,6 @@ enum pl330_op_err { PL330_ERR_FAIL, }; -/* A request defining Scatter-Gather List ending with NULL xfer. */ -struct pl330_req { - enum dma_transfer_direction rqtype; - /* Index of peripheral for the xfer. */ - unsigned peri:5; - /* If NULL, req will be done at last set parameters. */ - struct pl330_reqcfg *cfg; - /* Pointer to first xfer in the request. */ - struct pl330_xfer *x; - /* Hook to attach to DMAC's list of reqs with due callback */ - struct list_head rqd; -}; - enum pl330_chan_op { /* Start the channel */ PL330_OP_START, @@ -354,12 +341,6 @@ enum pl330_chan_op { PL330_OP_FLUSH, }; -struct _xfer_spec { - u32 ccr; - struct pl330_req *r; - struct pl330_xfer *x; -}; - enum dmamov_dst { SAR = 0, CCR, @@ -377,10 +358,12 @@ enum pl330_cond { ALWAYS, }; +struct dma_pl330_desc; + struct _pl330_req { u32 mc_bus; void *mc_cpu; - struct pl330_req *r; + struct dma_pl330_desc *desc; }; /* ToBeDone for tasklet */ @@ -526,12 +509,22 @@ struct dma_pl330_desc { struct pl330_xfer px; struct pl330_reqcfg rqcfg; - struct pl330_req req; enum desc_status status; /* The channel which currently holds this desc */ struct dma_pl330_chan *pchan; + + enum dma_transfer_direction rqtype; + /* Index of peripheral for the xfer. */ + unsigned peri:5; + /* Hook to attach to DMAC's list of reqs with due callback */ + struct list_head rqd; +}; + +struct _xfer_spec { + u32 ccr; + struct dma_pl330_desc *desc; }; static inline bool _queue_empty(struct pl330_thread *thrd) @@ -1055,7 +1048,7 @@ static bool _trigger(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->base; struct _pl330_req *req; - struct pl330_req *r; + struct dma_pl330_desc *desc; struct _arg_GO go; unsigned ns; u8 insn[6] = {0, 0, 0, 0, 0, 0}; @@ -1077,17 +1070,12 @@ static bool _trigger(struct pl330_thread *thrd) } /* Return if no request */ - if (!req || !req->r) + if (!req || !req->desc) return true; - r = req->r; + desc = req->desc; - if (r->cfg) - ns = r->cfg->nonsecure ? 1 : 0; - else if (readl(regs + CS(thrd->id)) & CS_CNS) - ns = 1; - else - ns = 0; + ns = desc->rqcfg.nonsecure ? 1 : 0; /* See 'Abort Sources' point-4 at Page 2-25 */ if (_manager_ns(thrd) && !ns) @@ -1147,7 +1135,7 @@ static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs, int cyc) { int off = 0; - struct pl330_config *pcfg = pxs->r->cfg->pcfg; + struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; /* check lock-up free version */ if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { @@ -1173,10 +1161,10 @@ static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], int off = 0; while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->r->peri); + off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); + off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri); off += _emit_ST(dry_run, &buf[off], ALWAYS); - off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); + off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); } return off; @@ -1188,10 +1176,10 @@ static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], int off = 0; while (cyc--) { - off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->r->peri); + off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); off += _emit_LD(dry_run, &buf[off], ALWAYS); - off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->r->peri); - off += _emit_FLUSHP(dry_run, &buf[off], pxs->r->peri); + off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); + off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); } return off; @@ -1202,7 +1190,7 @@ static int _bursts(unsigned dry_run, u8 buf[], { int off = 0; - switch (pxs->r->rqtype) { + switch (pxs->desc->rqtype) { case DMA_MEM_TO_DEV: off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); break; @@ -1302,7 +1290,7 @@ static inline int _loop(unsigned dry_run, u8 buf[], static inline int _setup_loops(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs) { - struct pl330_xfer *x = pxs->x; + struct pl330_xfer *x = &pxs->desc->px; u32 ccr = pxs->ccr; unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); int off = 0; @@ -1319,7 +1307,7 @@ static inline int _setup_loops(unsigned dry_run, u8 buf[], static inline int _setup_xfer(unsigned dry_run, u8 buf[], const struct _xfer_spec *pxs) { - struct pl330_xfer *x = pxs->x; + struct pl330_xfer *x = &pxs->desc->px; int off = 0; /* DMAMOV SAR, x->src_addr */ @@ -1350,12 +1338,11 @@ static int _setup_req(unsigned dry_run, struct pl330_thread *thrd, /* DMAMOV CCR, ccr */ off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); - x = pxs->r->x; + x = &pxs->desc->px; /* Error if xfer length is not aligned at burst size */ if (x->bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) return -EINVAL; - pxs->x = x; off += _setup_xfer(dry_run, &buf[off], pxs); /* DMASEV peripheral/event */ @@ -1403,7 +1390,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) * Client is not notified after each xfer unit, just once after all * xfer units are done or some error occurs. */ -static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) +static int pl330_submit_req(struct pl330_thread *thrd, + struct dma_pl330_desc *desc) { struct pl330_dmac *pl330 = thrd->dmac; struct _xfer_spec xs; @@ -1414,7 +1402,7 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) int ret = 0; /* No Req or Unacquired Channel or DMAC */ - if (!r || !thrd || thrd->free) + if (!desc || !thrd || thrd->free) return -EINVAL; regs = thrd->dmac->base; @@ -1427,10 +1415,11 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) } /* If request for non-existing peripheral */ - if (r->rqtype != DMA_MEM_TO_MEM && r->peri >= pl330->pcfg.num_peri) { + if (desc->rqtype != DMA_MEM_TO_MEM && + desc->peri >= pl330->pcfg.num_peri) { dev_info(thrd->dmac->ddma.dev, "%s:%d Invalid peripheral(%u)!\n", - __func__, __LINE__, r->peri); + __func__, __LINE__, desc->peri); return -EINVAL; } @@ -1441,24 +1430,18 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) goto xfer_exit; } + /* Prefer Secure Channel */ + if (!_manager_ns(thrd)) + desc->rqcfg.nonsecure = 0; + else + desc->rqcfg.nonsecure = 1; - /* Use last settings, if not provided */ - if (r->cfg) { - /* Prefer Secure Channel */ - if (!_manager_ns(thrd)) - r->cfg->nonsecure = 0; - else - r->cfg->nonsecure = 1; - - ccr = _prepare_ccr(r->cfg); - } else { - ccr = readl(regs + CC(thrd->id)); - } + ccr = _prepare_ccr(&desc->rqcfg); idx = IS_FREE(&thrd->req[0]) ? 0 : 1; xs.ccr = ccr; - xs.r = r; + xs.desc = desc; /* First dry run to check if req is acceptable */ ret = _setup_req(1, thrd, idx, &xs); @@ -1474,7 +1457,7 @@ static int pl330_submit_req(struct pl330_thread *thrd, struct pl330_req *r) /* Hook the request */ thrd->lstenq = idx; - thrd->req[idx].r = r; + thrd->req[idx].desc = desc; _setup_req(0, thrd, idx, &xs); ret = 0; @@ -1485,9 +1468,8 @@ xfer_exit: return ret; } -static void dma_pl330_rqcb(struct pl330_req *req, enum pl330_op_err err) +static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) { - struct dma_pl330_desc *desc = container_of(req, struct dma_pl330_desc, req); struct dma_pl330_chan *pch = desc->pchan; unsigned long flags; @@ -1544,12 +1526,12 @@ static void pl330_dotask(unsigned long data) err = PL330_ERR_ABORT; spin_unlock_irqrestore(&pl330->lock, flags); - dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].r, err); - dma_pl330_rqcb(thrd->req[thrd->lstenq].r, err); + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); + dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); spin_lock_irqsave(&pl330->lock, flags); - thrd->req[0].r = NULL; - thrd->req[1].r = NULL; + thrd->req[0].desc = NULL; + thrd->req[1].desc = NULL; mark_free(thrd, 0); mark_free(thrd, 1); @@ -1566,7 +1548,7 @@ static void pl330_dotask(unsigned long data) /* Returns 1 if state was updated, 0 otherwise */ static int pl330_update(struct pl330_dmac *pl330) { - struct pl330_req *rqdone, *tmp; + struct dma_pl330_desc *descdone, *tmp; unsigned long flags; void __iomem *regs; u32 val; @@ -1630,8 +1612,8 @@ static int pl330_update(struct pl330_dmac *pl330) continue; /* Detach the req */ - rqdone = thrd->req[active].r; - thrd->req[active].r = NULL; + descdone = thrd->req[active].desc; + thrd->req[active].desc = NULL; mark_free(thrd, active); @@ -1639,16 +1621,15 @@ static int pl330_update(struct pl330_dmac *pl330) _start(thrd); /* For now, just make a list of callbacks to be done */ - list_add_tail(&rqdone->rqd, &pl330->req_done); + list_add_tail(&descdone->rqd, &pl330->req_done); } } /* Now that we are in no hurry, do the callbacks */ - list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) { - list_del(&rqdone->rqd); - + list_for_each_entry_safe(descdone, tmp, &pl330->req_done, rqd) { + list_del(&descdone->rqd); spin_unlock_irqrestore(&pl330->lock, flags); - dma_pl330_rqcb(rqdone, PL330_ERR_NONE); + dma_pl330_rqcb(descdone, PL330_ERR_NONE); spin_lock_irqsave(&pl330->lock, flags); } @@ -1684,8 +1665,8 @@ static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) /* Make sure the channel is stopped */ _stop(thrd); - thrd->req[0].r = NULL; - thrd->req[1].r = NULL; + thrd->req[0].desc = NULL; + thrd->req[1].desc = NULL; mark_free(thrd, 0); mark_free(thrd, 1); break; @@ -1698,7 +1679,7 @@ static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) if (active == -1) break; - thrd->req[active].r = NULL; + thrd->req[active].desc = NULL; mark_free(thrd, active); /* Start the next */ @@ -1759,9 +1740,9 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) if (thrd->ev >= 0) { thrd->free = false; thrd->lstenq = 1; - thrd->req[0].r = NULL; + thrd->req[0].desc = NULL; mark_free(thrd, 0); - thrd->req[1].r = NULL; + thrd->req[1].desc = NULL; mark_free(thrd, 1); break; } @@ -1795,8 +1776,8 @@ static void pl330_release_channel(struct pl330_thread *thrd) _stop(thrd); - dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].r, PL330_ERR_ABORT); - dma_pl330_rqcb(thrd->req[thrd->lstenq].r, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); + dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); pl330 = thrd->dmac; @@ -1859,14 +1840,14 @@ static inline void _reset_thread(struct pl330_thread *thrd) + (thrd->id * pl330->mcbufsz); thrd->req[0].mc_bus = pl330->mcode_bus + (thrd->id * pl330->mcbufsz); - thrd->req[0].r = NULL; + thrd->req[0].desc = NULL; mark_free(thrd, 0); thrd->req[1].mc_cpu = thrd->req[0].mc_cpu + pl330->mcbufsz / 2; thrd->req[1].mc_bus = thrd->req[0].mc_bus + pl330->mcbufsz / 2; - thrd->req[1].r = NULL; + thrd->req[1].desc = NULL; mark_free(thrd, 1); } @@ -2041,7 +2022,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) if (desc->status == BUSY) continue; - ret = pl330_submit_req(pch->thread, &desc->req); + ret = pl330_submit_req(pch->thread, desc); if (!ret) { desc->status = BUSY; } else if (ret == -EAGAIN) { @@ -2301,11 +2282,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) static inline void _init_desc(struct dma_pl330_desc *desc) { - desc->req.x = &desc->px; desc->rqcfg.swap = SWAP_NO; desc->rqcfg.scctl = CCTRL0; desc->rqcfg.dcctl = CCTRL0; - desc->req.cfg = &desc->rqcfg; desc->txd.tx_submit = pl330_tx_submit; INIT_LIST_HEAD(&desc->node); @@ -2384,7 +2363,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) desc->txd.cookie = 0; async_tx_ack(&desc->txd); - desc->req.peri = peri_id ? pch->chan.chan_id : 0; + desc->peri = peri_id ? pch->chan.chan_id : 0; desc->rqcfg.pcfg = &pch->dmac->pcfg; dma_async_tx_descriptor_init(&desc->txd, &pch->chan); @@ -2513,7 +2492,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( break; } - desc->req.rqtype = direction; + desc->rqtype = direction; desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; fill_px(&desc->px, dst, src, period_len); @@ -2553,7 +2532,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, desc->rqcfg.src_inc = 1; desc->rqcfg.dst_inc = 1; - desc->req.rqtype = DMA_MEM_TO_MEM; + desc->rqtype = DMA_MEM_TO_MEM; /* Select max possible burst size */ burst = pl330->pcfg.data_bus_width / 8; @@ -2648,7 +2627,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; - desc->req.rqtype = direction; + desc->rqtype = direction; } /* Return the last desc in the chain */ -- cgit v1.2.3-58-ga151 From 8ed30a14265fc2ebace02ea321c463facedfac17 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:31 +0200 Subject: dmaengine: pl330: Simplify marking a request as unused Instead of storing a special instruction in the command buffer to mark a request as currently unused just set the descriptor field to NULL. Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 51 +++++++++++++-------------------------------------- 1 file changed, 13 insertions(+), 38 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index b31c6c380158..105e33e3bb33 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -245,9 +245,6 @@ enum pl330_byteswap { */ #define MCODE_BUFF_PER_REQ 256 -/* If the _pl330_req is available to the client */ -#define IS_FREE(req) (*((u8 *)((req)->mc_cpu)) == CMD_DMAEND) - /* Use this _only_ to wait on transient states */ #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); @@ -529,14 +526,12 @@ struct _xfer_spec { static inline bool _queue_empty(struct pl330_thread *thrd) { - return (IS_FREE(&thrd->req[0]) && IS_FREE(&thrd->req[1])) - ? true : false; + return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; } static inline bool _queue_full(struct pl330_thread *thrd) { - return (IS_FREE(&thrd->req[0]) || IS_FREE(&thrd->req[1])) - ? false : true; + return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; } static inline bool is_manager(struct pl330_thread *thrd) @@ -948,21 +943,6 @@ static inline void _execute_DBGINSN(struct pl330_thread *thrd, writel(0, regs + DBGCMD); } -/* - * Mark a _pl330_req as free. - * We do it by writing DMAEND as the first instruction - * because no valid request is going to have DMAEND as - * its first instruction to execute. - */ -static void mark_free(struct pl330_thread *thrd, int idx) -{ - struct _pl330_req *req = &thrd->req[idx]; - - _emit_END(0, req->mc_cpu); - - thrd->req_running = -1; -} - static inline u32 _state(struct pl330_thread *thrd) { void __iomem *regs = thrd->dmac->base; @@ -1059,18 +1039,18 @@ static bool _trigger(struct pl330_thread *thrd) return true; idx = 1 - thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) + if (thrd->req[idx].desc != NULL) { req = &thrd->req[idx]; - else { + } else { idx = thrd->lstenq; - if (!IS_FREE(&thrd->req[idx])) + if (thrd->req[idx].desc != NULL) req = &thrd->req[idx]; else req = NULL; } /* Return if no request */ - if (!req || !req->desc) + if (!req) return true; desc = req->desc; @@ -1438,7 +1418,7 @@ static int pl330_submit_req(struct pl330_thread *thrd, ccr = _prepare_ccr(&desc->rqcfg); - idx = IS_FREE(&thrd->req[0]) ? 0 : 1; + idx = thrd->req[0].desc == NULL ? 0 : 1; xs.ccr = ccr; xs.desc = desc; @@ -1532,8 +1512,7 @@ static void pl330_dotask(unsigned long data) thrd->req[0].desc = NULL; thrd->req[1].desc = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); + thrd->req_running = -1; /* Clear the reset flag */ pl330->dmac_tbd.reset_chan &= ~(1 << i); @@ -1615,8 +1594,6 @@ static int pl330_update(struct pl330_dmac *pl330) descdone = thrd->req[active].desc; thrd->req[active].desc = NULL; - mark_free(thrd, active); - /* Get going again ASAP */ _start(thrd); @@ -1667,8 +1644,7 @@ static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) thrd->req[0].desc = NULL; thrd->req[1].desc = NULL; - mark_free(thrd, 0); - mark_free(thrd, 1); + thrd->req_running = -1; break; case PL330_OP_ABORT: @@ -1680,7 +1656,7 @@ static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) break; thrd->req[active].desc = NULL; - mark_free(thrd, active); + thrd->req_running = -1; /* Start the next */ case PL330_OP_START: @@ -1741,9 +1717,8 @@ static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) thrd->free = false; thrd->lstenq = 1; thrd->req[0].desc = NULL; - mark_free(thrd, 0); thrd->req[1].desc = NULL; - mark_free(thrd, 1); + thrd->req_running = -1; break; } } @@ -1841,14 +1816,14 @@ static inline void _reset_thread(struct pl330_thread *thrd) thrd->req[0].mc_bus = pl330->mcode_bus + (thrd->id * pl330->mcbufsz); thrd->req[0].desc = NULL; - mark_free(thrd, 0); thrd->req[1].mc_cpu = thrd->req[0].mc_cpu + pl330->mcbufsz / 2; thrd->req[1].mc_bus = thrd->req[0].mc_bus + pl330->mcbufsz / 2; thrd->req[1].desc = NULL; - mark_free(thrd, 1); + + thrd->req_running = -1; } static int dmac_alloc_threads(struct pl330_dmac *pl330) -- cgit v1.2.3-58-ga151 From c26939e5204c533b9348cdd0b4155758f9276df1 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 6 Jul 2014 20:32:32 +0200 Subject: dmaengine: pl330: Remove pl330_chan_ctrl() The pl330_chan_ctrl() function has 3 internal code paths which, except for the locking, do not share any code outside of their sections. One code path is never exercised and can be removed. The other two are mostly just forwards to the _start() and _stop() calls. This patch modifies the code to instead of going via pl330_chan_ctrl() to call _start() and _stop() directly. This allows to completely remove pl330_chan_ctrl(). Signed-off-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 71 ++++++++--------------------------------------------- 1 file changed, 10 insertions(+), 61 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 105e33e3bb33..bc5878a5c09e 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -329,15 +329,6 @@ enum pl330_op_err { PL330_ERR_FAIL, }; -enum pl330_chan_op { - /* Start the channel */ - PL330_OP_START, - /* Abort the active xfer */ - PL330_OP_ABORT, - /* Stop xfer and flush queue */ - PL330_OP_FLUSH, -}; - enum dmamov_dst { SAR = 0, CCR, @@ -1623,55 +1614,6 @@ updt_exit: return ret; } -static int pl330_chan_ctrl(struct pl330_thread *thrd, enum pl330_chan_op op) -{ - struct pl330_dmac *pl330; - unsigned long flags; - int ret = 0, active; - - if (!thrd || thrd->free || thrd->dmac->state == DYING) - return -EINVAL; - - pl330 = thrd->dmac; - active = thrd->req_running; - - spin_lock_irqsave(&pl330->lock, flags); - - switch (op) { - case PL330_OP_FLUSH: - /* Make sure the channel is stopped */ - _stop(thrd); - - thrd->req[0].desc = NULL; - thrd->req[1].desc = NULL; - thrd->req_running = -1; - break; - - case PL330_OP_ABORT: - /* Make sure the channel is stopped */ - _stop(thrd); - - /* ABORT is only for the active req */ - if (active == -1) - break; - - thrd->req[active].desc = NULL; - thrd->req_running = -1; - - /* Start the next */ - case PL330_OP_START: - if ((active == -1) && !_start(thrd)) - ret = -EIO; - break; - - default: - ret = -EINVAL; - } - - spin_unlock_irqrestore(&pl330->lock, flags); - return ret; -} - /* Reserve an event */ static inline int _alloc_event(struct pl330_thread *thrd) { @@ -2033,7 +1975,9 @@ static void pl330_tasklet(unsigned long data) fill_queue(pch); /* Make sure the PL330 Channel thread is active */ - pl330_chan_ctrl(pch->thread, PL330_OP_START); + spin_lock(&pch->thread->dmac->lock); + _start(pch->thread); + spin_unlock(&pch->thread->dmac->lock); while (!list_empty(&pch->completed_list)) { dma_async_tx_callback callback; @@ -2133,8 +2077,13 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned case DMA_TERMINATE_ALL: spin_lock_irqsave(&pch->lock, flags); - /* FLUSH the PL330 Channel thread */ - pl330_chan_ctrl(pch->thread, PL330_OP_FLUSH); + spin_lock(&pl330->lock); + _stop(pch->thread); + spin_unlock(&pl330->lock); + + pch->thread->req[0].desc = NULL; + pch->thread->req[1].desc = NULL; + pch->thread->req_running = -1; /* Mark all desc done */ list_for_each_entry(desc, &pch->submitted_list, node) { -- cgit v1.2.3-58-ga151 From e7637c6c0382485f4d2e20715d058dae6f2b6a7c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 18 Jun 2014 12:15:36 +0300 Subject: dmaengine: dw: introduce dwc_dostart_first_queued() helper We have a duplicate code which starts first descriptor in the queue. Let's make this as a separate helper that can be used in future as well. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index a27ded53ab4f..5ddf5e4c1222 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -279,6 +279,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) channel_set_bit(dw, CH_EN, dwc->mask); } +static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) +{ + if (list_empty(&dwc->queue)) + return; + + list_move(dwc->queue.next, &dwc->active_list); + dwc_dostart(dwc, dwc_first_active(dwc)); +} + /*----------------------------------------------------------------------*/ static void @@ -335,10 +344,7 @@ static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) * the completed ones. */ list_splice_init(&dwc->active_list, &list); - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } + dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); @@ -467,10 +473,7 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) /* Try to continue after resetting the channel... */ dwc_chan_disable(dw, dwc); - if (!list_empty(&dwc->queue)) { - list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } + dwc_dostart_first_queued(dwc); spin_unlock_irqrestore(&dwc->lock, flags); } -- cgit v1.2.3-58-ga151 From cba15617c37cc6082c286c2a272202565f62e7bc Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 18 Jun 2014 12:15:37 +0300 Subject: dmaengine: dw: add debug message to dwc_dostart_first_queued It would be useful to know when the first descriptor in the queue is started along with its cookie. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/dw/core.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 5ddf5e4c1222..52815b905b7c 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -281,11 +281,15 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) { + struct dw_desc *desc; + if (list_empty(&dwc->queue)) return; list_move(dwc->queue.next, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); + desc = dwc_first_active(dwc); + dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); + dwc_dostart(dwc, desc); } /*----------------------------------------------------------------------*/ -- cgit v1.2.3-58-ga151 From dd8ecfcac66b4485416b2d1df0ec4798b198d7d6 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 18 Jun 2014 12:15:38 +0300 Subject: dmaengine: dw: don't perform DMA when dmaengine_submit is called Accordingly to discussion [1] and followed up documentation the DMA controller driver shouldn't start any DMA operations when dmaengine_submit() is called. This patch fixes the workflow in dw_dmac driver to follow the documentation. [1] http://www.spinics.net/lists/arm-kernel/msg125987.html Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul --- drivers/dma/TODO | 1 - drivers/dma/dw/core.c | 19 +++++++------------ 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/TODO b/drivers/dma/TODO index 734ed0206cd5..b8045cd42ee1 100644 --- a/drivers/dma/TODO +++ b/drivers/dma/TODO @@ -7,7 +7,6 @@ TODO for slave dma - imx-dma - imx-sdma - mxs-dma.c - - dw_dmac - intel_mid_dma 4. Check other subsystems for dma drivers and merge/move to dmaengine 5. Remove dma_slave_config's dma direction. diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 52815b905b7c..1af731b83b3f 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -684,17 +684,9 @@ static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) * possible, perhaps even appending to those already submitted * for DMA. But this is hard to do in a race-free manner. */ - if (list_empty(&dwc->active_list)) { - dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, - desc->txd.cookie); - list_add_tail(&desc->desc_node, &dwc->active_list); - dwc_dostart(dwc, dwc_first_active(dwc)); - } else { - dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, - desc->txd.cookie); - list_add_tail(&desc->desc_node, &dwc->queue); - } + dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); + list_add_tail(&desc->desc_node, &dwc->queue); spin_unlock_irqrestore(&dwc->lock, flags); @@ -1099,9 +1091,12 @@ dwc_tx_status(struct dma_chan *chan, static void dwc_issue_pending(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + unsigned long flags; - if (!list_empty(&dwc->queue)) - dwc_scan_descriptors(to_dw_dma(chan->device), dwc); + spin_lock_irqsave(&dwc->lock, flags); + if (list_empty(&dwc->active_list)) + dwc_dostart_first_queued(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); } static int dwc_alloc_chan_resources(struct dma_chan *chan) -- cgit v1.2.3-58-ga151 From b1e51d771fbc9fec15785760a2f725040a0fe671 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Sat, 19 Jul 2014 03:21:47 +0200 Subject: dmaengine: pl330: Check if the DMA descriptor is NULL Commit 6079d38 ("dmaengine: pl330: Remove useless xfer_cb indirection") removed the __callback() function which created an unnecessary level of indirection to execute the tranfer callback .xfer_cb Unfortunately the commit also changed the semantics slightly since that function used to check if the request was not NULL before attempting to execute the callback function. Not checking this could lead to a kernel NULL pointer dereference error. Signed-off-by: Javier Martinez Canillas Acked-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/pl330.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index bc5878a5c09e..a55d75498098 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1441,9 +1441,14 @@ xfer_exit: static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) { - struct dma_pl330_chan *pch = desc->pchan; + struct dma_pl330_chan *pch; unsigned long flags; + if (!desc) + return; + + pch = desc->pchan; + /* If desc aborted */ if (!pch) return; -- cgit v1.2.3-58-ga151 From 0b3518652c596b00c1592b5adb2544db75b5ef57 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Mon, 16 Jun 2014 11:32:29 +0800 Subject: dmaengine: imx-sdma: Save imx_dma_data into sdmac The filter() function is currently called by xlate() while it transfers imx_dma_data as a local variable to the filter() but releases the data right after returning a DMA channel pointer, which results chan->private pointing an invalid memory space. So this patch just stores the imx_dma_data into sdmac to make usre the private pointer valid as long as the channel exists. Signed-off-by: Nicolin Chen Signed-off-by: Vinod Koul --- drivers/dma/imx-sdma.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index bbcf783707f1..8269c200b53b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -271,6 +271,7 @@ struct sdma_channel { unsigned int chn_count; unsigned int chn_real_count; struct tasklet_struct tasklet; + struct imx_dma_data data; }; #define IMX_DMA_SG_LOOP BIT(0) @@ -1413,12 +1414,14 @@ err_dma_alloc: static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param) { + struct sdma_channel *sdmac = to_sdma_chan(chan); struct imx_dma_data *data = fn_param; if (!imx_dma_is_general_purpose(chan)) return false; - chan->private = data; + sdmac->data = *data; + chan->private = &sdmac->data; return true; } -- cgit v1.2.3-58-ga151 From f892afb07eeecf575179c4747952644a82a92a36 Mon Sep 17 00:00:00 2001 From: Nicolin Chen Date: Mon, 16 Jun 2014 11:31:05 +0800 Subject: dmaengine: imx-sdma: Add a new DMATYPE for Shared Peripheral ASRC Shared Peripheral ASRC, running on SPBA, needs to use shp sciprts for DMA transfer. So this patch just adds a new DMATYPE for it. Signed-off-by: Nicolin Chen Acked-by: Shawn Guo Signed-off-by: Vinod Koul --- Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt | 1 + drivers/dma/imx-sdma.c | 5 +++++ include/linux/platform_data/dma-imx.h | 1 + 3 files changed, 7 insertions(+) (limited to 'drivers') diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt index e577196a12c0..4659fd952301 100644 --- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt +++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt @@ -47,6 +47,7 @@ The full ID of peripheral types can be found below. 20 ASRC 21 ESAI 22 SSI Dual FIFO (needs firmware ver >= 2) + 23 Shared ASRC The third cell specifies the transfer priority as below. diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 8269c200b53b..de584e605db5 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -750,6 +750,11 @@ static void sdma_get_pc(struct sdma_channel *sdmac, emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; per_2_per = sdma->script_addrs->per_2_per_addr; break; + case IMX_DMATYPE_ASRC_SP: + per_2_emi = sdma->script_addrs->shp_2_mcu_addr; + emi_2_per = sdma->script_addrs->mcu_2_shp_addr; + per_2_per = sdma->script_addrs->per_2_per_addr; + break; case IMX_DMATYPE_MSHC: per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index bcbc6c3c14c0..7aa0e89d1bcc 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -40,6 +40,7 @@ enum sdma_peripheral_type { IMX_DMATYPE_ASRC, /* ASRC */ IMX_DMATYPE_ESAI, /* ESAI */ IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ + IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ }; enum imx_dma_prio { -- cgit v1.2.3-58-ga151 From 211bfef7c2bc8c67e3878e54747e6bb2be3456da Mon Sep 17 00:00:00 2001 From: Jingchang Lu Date: Tue, 1 Jul 2014 16:41:03 +0800 Subject: dmaengine: fsl-edma: fix dmamux index calculating error Signed-off-by: Jingchang Lu Tested-by: Stefan Agner Signed-off-by: Vinod Koul --- drivers/dma/fsl-edma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b396a7fb53ab..24ab3d371954 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -248,11 +248,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, unsigned int slot, bool enable) { u32 ch = fsl_chan->vchan.chan.chan_id; - void __iomem *muxaddr = fsl_chan->edma->muxbase[ch / DMAMUX_NR]; + void __iomem *muxaddr; unsigned chans_per_mux, ch_off; chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; if (enable) edma_writeb(fsl_chan->edma, @@ -724,6 +725,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, { struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data; struct dma_chan *chan, *_chan; + unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR; if (dma_spec->args_count != 2) return NULL; @@ -732,7 +734,7 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { if (chan->client_count) continue; - if ((chan->chan_id / DMAMUX_NR) == dma_spec->args[0]) { + if ((chan->chan_id / chans_per_mux) == dma_spec->args[0]) { chan = dma_get_slave_channel(chan); if (chan) { chan->device->privatecnt++; -- cgit v1.2.3-58-ga151 From e2f9922ad4526e747f9727a5481979727db56ec6 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 15 Jul 2014 17:24:38 -0600 Subject: dmaengine: jz4740: Fix GPL v2 license string typo Per license_is_gpl_compatible(), the MODULE_LICENSE() string for GPL v2 is "GPL v2", not "GPLv2". Use "GPL v2" so this module doesn't taint the kernel. Signed-off-by: Bjorn Helgaas Acked-by: Lars-Peter Clausen Signed-off-by: Vinod Koul --- drivers/dma/dma-jz4740.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index 94c380f07538..bfbce6b07902 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -614,4 +614,4 @@ module_platform_driver(jz4740_dma_driver); MODULE_AUTHOR("Lars-Peter Clausen "); MODULE_DESCRIPTION("JZ4740 DMA driver"); -MODULE_LICENSE("GPLv2"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-58-ga151 From 37a746aaf5805c4bf3ab8f2d6e4cc78c879fd697 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Fri, 25 Jul 2014 15:39:50 +0530 Subject: dmaengine: ipu: use return value of request_irq Commit - 653e67f7e5: "dmaengine: inherit debug settings from the subsystem for subdirectories" introduced debug option for subdirectories too This exposed issue with ipu driver not using return value For now just warn users about it Reported-by: kbuild test robot Signed-off-by: Vinod Koul --- drivers/dma/ipu/ipu_idmac.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 128ca143486d..bbf62927bd72 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1532,11 +1532,17 @@ static int idmac_alloc_chan_resources(struct dma_chan *chan) #ifdef DEBUG if (chan->chan_id == IDMAC_IC_7) { ic_sof = ipu_irq_map(69); - if (ic_sof > 0) - request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); + if (ic_sof > 0) { + ret = request_irq(ic_sof, ic_sof_irq, 0, "IC SOF", ichan); + if (ret) + dev_err(&chan->dev->device, "request irq failed for IC SOF"); + } ic_eof = ipu_irq_map(70); - if (ic_eof > 0) - request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); + if (ic_eof > 0) { + ret = request_irq(ic_eof, ic_eof_irq, 0, "IC EOF", ichan); + if (ret) + dev_err(&chan->dev->device, "request irq failed for IC EOF"); + } } #endif -- cgit v1.2.3-58-ga151 From 555859308723d8d5b828304f5eb9281143fd86b5 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 17 Jul 2014 21:46:16 +0200 Subject: dmaengine: sun6i: Add driver for the Allwinner A31 DMA controller The Allwinner A31 has a 16 channels DMA controller that it shares with the newer A23. Although sharing some similarities with the DMA controller of the older Allwinner SoCs, it's significantly different, I don't expect it to be possible to share the driver for these two. The A31 Controller is able to memory-to-memory or memory-to-device transfers on the 16 channels in parallel. Signed-off-by: Maxime Ripard Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 8 + drivers/dma/Makefile | 1 + drivers/dma/sun6i-dma.c | 1059 +++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1068 insertions(+) create mode 100644 drivers/dma/sun6i-dma.c (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 1eca7b9760e6..4b439270fb11 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -375,6 +375,14 @@ config XILINX_VDMA channels, Memory Mapped to Stream (MM2S) and Stream to Memory Mapped (S2MM) for the data transfers. +config DMA_SUN6I + tristate "Allwinner A31 SoCs DMA support" + depends on MACH_SUN6I || COMPILE_TEST + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support for the DMA engine for Allwinner A31 SoCs. + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index aca5eb577d44..d08bd966da13 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -47,3 +47,4 @@ obj-$(CONFIG_MOXART_DMA) += moxart-dma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o obj-y += xilinx/ +obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c new file mode 100644 index 000000000000..ce8d5d1b0ff4 --- /dev/null +++ b/drivers/dma/sun6i-dma.c @@ -0,0 +1,1059 @@ +/* + * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd + * Author: Sugar + * + * Copyright (C) 2014 Maxime Ripard + * Maxime Ripard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "virt-dma.h" + +/* + * There's 16 physical channels that can work in parallel. + * + * However we have 30 different endpoints for our requests. + * + * Since the channels are able to handle only an unidirectional + * transfer, we need to allocate more virtual channels so that + * everyone can grab one channel. + * + * Some devices can't work in both direction (mostly because it + * wouldn't make sense), so we have a bit fewer virtual channels than + * 2 channels per endpoints. + */ + +#define NR_MAX_CHANNELS 16 +#define NR_MAX_REQUESTS 30 +#define NR_MAX_VCHANS 53 + +/* + * Common registers + */ +#define DMA_IRQ_EN(x) ((x) * 0x04) +#define DMA_IRQ_HALF BIT(0) +#define DMA_IRQ_PKG BIT(1) +#define DMA_IRQ_QUEUE BIT(2) + +#define DMA_IRQ_CHAN_NR 8 +#define DMA_IRQ_CHAN_WIDTH 4 + + +#define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10) + +#define DMA_STAT 0x30 + +/* + * Channels specific registers + */ +#define DMA_CHAN_ENABLE 0x00 +#define DMA_CHAN_ENABLE_START BIT(0) +#define DMA_CHAN_ENABLE_STOP 0 + +#define DMA_CHAN_PAUSE 0x04 +#define DMA_CHAN_PAUSE_PAUSE BIT(1) +#define DMA_CHAN_PAUSE_RESUME 0 + +#define DMA_CHAN_LLI_ADDR 0x08 + +#define DMA_CHAN_CUR_CFG 0x0c +#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & 0x1f) +#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5) +#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5) +#define DMA_CHAN_CFG_SRC_BURST(x) (((x) & 0x3) << 7) +#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9) + +#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16) +#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16) +#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16) +#define DMA_CHAN_CFG_DST_BURST(x) (DMA_CHAN_CFG_SRC_BURST(x) << 16) +#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16) + +#define DMA_CHAN_CUR_SRC 0x10 + +#define DMA_CHAN_CUR_DST 0x14 + +#define DMA_CHAN_CUR_CNT 0x18 + +#define DMA_CHAN_CUR_PARA 0x1c + + +/* + * Various hardware related defines + */ +#define LLI_LAST_ITEM 0xfffff800 +#define NORMAL_WAIT 8 +#define DRQ_SDRAM 1 + +/* + * Hardware representation of the LLI + * + * The hardware will be fed the physical address of this structure, + * and read its content in order to start the transfer. + */ +struct sun6i_dma_lli { + u32 cfg; + u32 src; + u32 dst; + u32 len; + u32 para; + u32 p_lli_next; + + /* + * This field is not used by the DMA controller, but will be + * used by the CPU to go through the list (mostly for dumping + * or freeing it). + */ + struct sun6i_dma_lli *v_lli_next; +}; + + +struct sun6i_desc { + struct virt_dma_desc vd; + dma_addr_t p_lli; + struct sun6i_dma_lli *v_lli; +}; + +struct sun6i_pchan { + u32 idx; + void __iomem *base; + struct sun6i_vchan *vchan; + struct sun6i_desc *desc; + struct sun6i_desc *done; +}; + +struct sun6i_vchan { + struct virt_dma_chan vc; + struct list_head node; + struct dma_slave_config cfg; + struct sun6i_pchan *phy; + u8 port; +}; + +struct sun6i_dma_dev { + struct dma_device slave; + void __iomem *base; + struct clk *clk; + int irq; + spinlock_t lock; + struct reset_control *rstc; + struct tasklet_struct task; + atomic_t tasklet_shutdown; + struct list_head pending; + struct dma_pool *pool; + struct sun6i_pchan *pchans; + struct sun6i_vchan *vchans; +}; + +static struct device *chan2dev(struct dma_chan *chan) +{ + return &chan->dev->device; +} + +static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d) +{ + return container_of(d, struct sun6i_dma_dev, slave); +} + +static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan) +{ + return container_of(chan, struct sun6i_vchan, vc.chan); +} + +static inline struct sun6i_desc * +to_sun6i_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct sun6i_desc, vd.tx); +} + +static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) +{ + dev_dbg(sdev->slave.dev, "Common register:\n" + "\tmask0(%04x): 0x%08x\n" + "\tmask1(%04x): 0x%08x\n" + "\tpend0(%04x): 0x%08x\n" + "\tpend1(%04x): 0x%08x\n" + "\tstats(%04x): 0x%08x\n", + DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)), + DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)), + DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)), + DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)), + DMA_STAT, readl(sdev->base + DMA_STAT)); +} + +static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, + struct sun6i_pchan *pchan) +{ + phys_addr_t reg = __virt_to_phys((unsigned long)pchan->base); + + dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" + "\t___en(%04x): \t0x%08x\n" + "\tpause(%04x): \t0x%08x\n" + "\tstart(%04x): \t0x%08x\n" + "\t__cfg(%04x): \t0x%08x\n" + "\t__src(%04x): \t0x%08x\n" + "\t__dst(%04x): \t0x%08x\n" + "\tcount(%04x): \t0x%08x\n" + "\t_para(%04x): \t0x%08x\n\n", + pchan->idx, ®, + DMA_CHAN_ENABLE, + readl(pchan->base + DMA_CHAN_ENABLE), + DMA_CHAN_PAUSE, + readl(pchan->base + DMA_CHAN_PAUSE), + DMA_CHAN_LLI_ADDR, + readl(pchan->base + DMA_CHAN_LLI_ADDR), + DMA_CHAN_CUR_CFG, + readl(pchan->base + DMA_CHAN_CUR_CFG), + DMA_CHAN_CUR_SRC, + readl(pchan->base + DMA_CHAN_CUR_SRC), + DMA_CHAN_CUR_DST, + readl(pchan->base + DMA_CHAN_CUR_DST), + DMA_CHAN_CUR_CNT, + readl(pchan->base + DMA_CHAN_CUR_CNT), + DMA_CHAN_CUR_PARA, + readl(pchan->base + DMA_CHAN_CUR_PARA)); +} + +static inline int convert_burst(u32 maxburst, u8 *burst) +{ + switch (maxburst) { + case 1: + *burst = 0; + break; + case 8: + *burst = 2; + break; + default: + return -EINVAL; + } + + return 0; +} + +static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) +{ + switch (addr_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + *width = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + *width = 1; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + *width = 2; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, + struct sun6i_dma_lli *next, + dma_addr_t next_phy, + struct sun6i_desc *txd) +{ + if ((!prev && !txd) || !next) + return NULL; + + if (!prev) { + txd->p_lli = next_phy; + txd->v_lli = next; + } else { + prev->p_lli_next = next_phy; + prev->v_lli_next = next; + } + + next->p_lli_next = LLI_LAST_ITEM; + next->v_lli_next = NULL; + + return next; +} + +static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, + dma_addr_t src, + dma_addr_t dst, u32 len, + struct dma_slave_config *config) +{ + u8 src_width, dst_width, src_burst, dst_burst; + int ret; + + if (!config) + return -EINVAL; + + ret = convert_burst(config->src_maxburst, &src_burst); + if (ret) + return ret; + + ret = convert_burst(config->dst_maxburst, &dst_burst); + if (ret) + return ret; + + ret = convert_buswidth(config->src_addr_width, &src_width); + if (ret) + return ret; + + ret = convert_buswidth(config->dst_addr_width, &dst_width); + if (ret) + return ret; + + lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | + DMA_CHAN_CFG_SRC_WIDTH(src_width) | + DMA_CHAN_CFG_DST_BURST(dst_burst) | + DMA_CHAN_CFG_DST_WIDTH(dst_width); + + lli->src = src; + lli->dst = dst; + lli->len = len; + lli->para = NORMAL_WAIT; + + return 0; +} + +static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, + struct sun6i_dma_lli *lli) +{ + phys_addr_t p_lli = __virt_to_phys((unsigned long)lli); + + dev_dbg(chan2dev(&vchan->vc.chan), + "\n\tdesc: p - %pa v - 0x%p\n" + "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n" + "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n", + &p_lli, lli, + lli->cfg, lli->src, lli->dst, + lli->len, lli->para, lli->p_lli_next); +} + +static void sun6i_dma_free_desc(struct virt_dma_desc *vd) +{ + struct sun6i_desc *txd = to_sun6i_desc(&vd->tx); + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device); + struct sun6i_dma_lli *v_lli, *v_next; + dma_addr_t p_lli, p_next; + + if (unlikely(!txd)) + return; + + p_lli = txd->p_lli; + v_lli = txd->v_lli; + + while (v_lli) { + v_next = v_lli->v_lli_next; + p_next = v_lli->p_lli_next; + + dma_pool_free(sdev->pool, v_lli, p_lli); + + v_lli = v_next; + p_lli = p_next; + } + + kfree(txd); +} + +static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); + struct sun6i_pchan *pchan = vchan->phy; + unsigned long flags; + LIST_HEAD(head); + + spin_lock(&sdev->lock); + list_del_init(&vchan->node); + spin_unlock(&sdev->lock); + + spin_lock_irqsave(&vchan->vc.lock, flags); + + vchan_get_all_descriptors(&vchan->vc, &head); + + if (pchan) { + writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); + writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); + + vchan->phy = NULL; + pchan->vchan = NULL; + pchan->desc = NULL; + pchan->done = NULL; + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + vchan_dma_desc_free_list(&vchan->vc, &head); + + return 0; +} + +static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); + struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc); + struct sun6i_pchan *pchan = vchan->phy; + u32 irq_val, irq_reg, irq_offset; + + if (!pchan) + return -EAGAIN; + + if (!desc) { + pchan->desc = NULL; + pchan->done = NULL; + return -EAGAIN; + } + + list_del(&desc->node); + + pchan->desc = to_sun6i_desc(&desc->tx); + pchan->done = NULL; + + sun6i_dma_dump_lli(vchan, pchan->desc->v_lli); + + irq_reg = pchan->idx / DMA_IRQ_CHAN_NR; + irq_offset = pchan->idx % DMA_IRQ_CHAN_NR; + + irq_val = readl(sdev->base + DMA_IRQ_EN(irq_offset)); + irq_val |= DMA_IRQ_QUEUE << (irq_offset * DMA_IRQ_CHAN_WIDTH); + writel(irq_val, sdev->base + DMA_IRQ_EN(irq_offset)); + + writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR); + writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE); + + sun6i_dma_dump_com_regs(sdev); + sun6i_dma_dump_chan_regs(sdev, pchan); + + return 0; +} + +static void sun6i_dma_tasklet(unsigned long data) +{ + struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; + struct sun6i_vchan *vchan; + struct sun6i_pchan *pchan; + unsigned int pchan_alloc = 0; + unsigned int pchan_idx; + + list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) { + spin_lock_irq(&vchan->vc.lock); + + pchan = vchan->phy; + + if (pchan && pchan->done) { + if (sun6i_dma_start_desc(vchan)) { + /* + * No current txd associated with this channel + */ + dev_dbg(sdev->slave.dev, "pchan %u: free\n", + pchan->idx); + + /* Mark this channel free */ + vchan->phy = NULL; + pchan->vchan = NULL; + } + } + spin_unlock_irq(&vchan->vc.lock); + } + + spin_lock_irq(&sdev->lock); + for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { + pchan = &sdev->pchans[pchan_idx]; + + if (pchan->vchan || list_empty(&sdev->pending)) + continue; + + vchan = list_first_entry(&sdev->pending, + struct sun6i_vchan, node); + + /* Remove from pending channels */ + list_del_init(&vchan->node); + pchan_alloc |= BIT(pchan_idx); + + /* Mark this channel allocated */ + pchan->vchan = vchan; + vchan->phy = pchan; + dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n", + pchan->idx, &vchan->vc); + } + spin_unlock_irq(&sdev->lock); + + for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { + if (!(pchan_alloc & BIT(pchan_idx))) + continue; + + pchan = sdev->pchans + pchan_idx; + vchan = pchan->vchan; + if (vchan) { + spin_lock_irq(&vchan->vc.lock); + sun6i_dma_start_desc(vchan); + spin_unlock_irq(&vchan->vc.lock); + } + } +} + +static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id) +{ + struct sun6i_dma_dev *sdev = dev_id; + struct sun6i_vchan *vchan; + struct sun6i_pchan *pchan; + int i, j, ret = IRQ_NONE; + u32 status; + + for (i = 0; i < 2; i++) { + status = readl(sdev->base + DMA_IRQ_STAT(i)); + if (!status) + continue; + + dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n", + i ? "high" : "low", status); + + writel(status, sdev->base + DMA_IRQ_STAT(i)); + + for (j = 0; (j < 8) && status; j++) { + if (status & DMA_IRQ_QUEUE) { + pchan = sdev->pchans + j; + vchan = pchan->vchan; + + if (vchan) { + spin_lock(&vchan->vc.lock); + vchan_cookie_complete(&pchan->desc->vd); + pchan->done = pchan->desc; + spin_unlock(&vchan->vc.lock); + } + } + + status = status >> 4; + } + + if (!atomic_read(&sdev->tasklet_shutdown)) + tasklet_schedule(&sdev->task); + ret = IRQ_HANDLED; + } + + return ret; +} + +static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun6i_dma_lli *v_lli; + struct sun6i_desc *txd; + dma_addr_t p_lli; + int ret; + + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags); + + if (!len) + return NULL; + + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); + if (!txd) + return NULL; + + v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + if (!v_lli) { + dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); + kfree(txd); + return NULL; + } + + ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); + if (ret) + goto err_dma_free; + + v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_DST_LINEAR_MODE | + DMA_CHAN_CFG_SRC_LINEAR_MODE; + + sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); + + sun6i_dma_dump_lli(vchan, v_lli); + + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); + +err_dma_free: + dma_pool_free(sdev->pool, v_lli, p_lli); + return NULL; +} + +static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct dma_slave_config *sconfig = &vchan->cfg; + struct sun6i_dma_lli *v_lli, *prev = NULL; + struct sun6i_desc *txd; + struct scatterlist *sg; + dma_addr_t p_lli; + int i, ret; + + if (!sgl) + return NULL; + + if (!is_slave_direction(dir)) { + dev_err(chan2dev(chan), "Invalid DMA direction\n"); + return NULL; + } + + txd = kzalloc(sizeof(*txd), GFP_NOWAIT); + if (!txd) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); + if (!v_lli) { + kfree(txd); + return NULL; + } + + if (dir == DMA_MEM_TO_DEV) { + ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg), + sconfig->dst_addr, sg_dma_len(sg), + sconfig); + if (ret) + goto err_dma_free; + + v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE | + DMA_CHAN_CFG_SRC_LINEAR_MODE | + DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_DST_DRQ(vchan->port); + + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, + &sconfig->dst_addr, &sg_dma_address(sg), + sg_dma_len(sg), flags); + + } else { + ret = sun6i_dma_cfg_lli(v_lli, sconfig->src_addr, + sg_dma_address(sg), sg_dma_len(sg), + sconfig); + if (ret) + goto err_dma_free; + + v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE | + DMA_CHAN_CFG_SRC_IO_MODE | + DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | + DMA_CHAN_CFG_SRC_DRQ(vchan->port); + + dev_dbg(chan2dev(chan), + "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + __func__, vchan->vc.chan.chan_id, + &sg_dma_address(sg), &sconfig->src_addr, + sg_dma_len(sg), flags); + } + + prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd); + } + + dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli); + for (prev = txd->v_lli; prev; prev = prev->v_lli_next) + sun6i_dma_dump_lli(vchan, prev); + + return vchan_tx_prep(&vchan->vc, &txd->vd, flags); + +err_dma_free: + dma_pool_free(sdev->pool, v_lli, p_lli); + return NULL; +} + +static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct sun6i_pchan *pchan = vchan->phy; + unsigned long flags; + int ret = 0; + + switch (cmd) { + case DMA_RESUME: + dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); + + spin_lock_irqsave(&vchan->vc.lock, flags); + + if (pchan) { + writel(DMA_CHAN_PAUSE_RESUME, + pchan->base + DMA_CHAN_PAUSE); + } else if (!list_empty(&vchan->vc.desc_issued)) { + spin_lock(&sdev->lock); + list_add_tail(&vchan->node, &sdev->pending); + spin_unlock(&sdev->lock); + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + break; + + case DMA_PAUSE: + dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); + + if (pchan) { + writel(DMA_CHAN_PAUSE_PAUSE, + pchan->base + DMA_CHAN_PAUSE); + } else { + spin_lock(&sdev->lock); + list_del_init(&vchan->node); + spin_unlock(&sdev->lock); + } + break; + + case DMA_TERMINATE_ALL: + ret = sun6i_dma_terminate_all(vchan); + break; + case DMA_SLAVE_CONFIG: + memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); + break; + default: + ret = -ENXIO; + break; + } + return ret; +} + +static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *state) +{ + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct sun6i_pchan *pchan = vchan->phy; + struct sun6i_dma_lli *lli; + struct virt_dma_desc *vd; + struct sun6i_desc *txd; + enum dma_status ret; + unsigned long flags; + size_t bytes = 0; + + ret = dma_cookie_status(chan, cookie, state); + if (ret == DMA_COMPLETE) + return ret; + + spin_lock_irqsave(&vchan->vc.lock, flags); + + vd = vchan_find_desc(&vchan->vc, cookie); + txd = to_sun6i_desc(&vd->tx); + + if (vd) { + for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next) + bytes += lli->len; + } else if (!pchan || !pchan->desc) { + bytes = 0; + } else { + bytes = readl(pchan->base + DMA_CHAN_CUR_CNT); + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + dma_set_residue(state, bytes); + + return ret; +} + +static void sun6i_dma_issue_pending(struct dma_chan *chan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + unsigned long flags; + + spin_lock_irqsave(&vchan->vc.lock, flags); + + if (vchan_issue_pending(&vchan->vc)) { + spin_lock(&sdev->lock); + + if (!vchan->phy && list_empty(&vchan->node)) { + list_add_tail(&vchan->node, &sdev->pending); + tasklet_schedule(&sdev->task); + dev_dbg(chan2dev(chan), "vchan %p: issued\n", + &vchan->vc); + } + + spin_unlock(&sdev->lock); + } else { + dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n", + &vchan->vc); + } + + spin_unlock_irqrestore(&vchan->vc.lock, flags); +} + +static int sun6i_dma_alloc_chan_resources(struct dma_chan *chan) +{ + return 0; +} + +static void sun6i_dma_free_chan_resources(struct dma_chan *chan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + unsigned long flags; + + spin_lock_irqsave(&sdev->lock, flags); + list_del_init(&vchan->node); + spin_unlock_irqrestore(&sdev->lock, flags); + + vchan_free_chan_resources(&vchan->vc); +} + +static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct sun6i_dma_dev *sdev = ofdma->of_dma_data; + struct sun6i_vchan *vchan; + struct dma_chan *chan; + u8 port = dma_spec->args[0]; + + if (port > NR_MAX_REQUESTS) + return NULL; + + chan = dma_get_any_slave_channel(&sdev->slave); + if (!chan) + return NULL; + + vchan = to_sun6i_vchan(chan); + vchan->port = port; + + return chan; +} + +static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev) +{ + /* Disable all interrupts from DMA */ + writel(0, sdev->base + DMA_IRQ_EN(0)); + writel(0, sdev->base + DMA_IRQ_EN(1)); + + /* Prevent spurious interrupts from scheduling the tasklet */ + atomic_inc(&sdev->tasklet_shutdown); + + /* Make sure all interrupts are handled */ + synchronize_irq(sdev->irq); + + /* Actually prevent the tasklet from being scheduled */ + tasklet_kill(&sdev->task); +} + +static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev) +{ + int i; + + for (i = 0; i < NR_MAX_VCHANS; i++) { + struct sun6i_vchan *vchan = &sdev->vchans[i]; + + list_del(&vchan->vc.chan.device_node); + tasklet_kill(&vchan->vc.task); + } +} + +static int sun6i_dma_probe(struct platform_device *pdev) +{ + struct sun6i_dma_dev *sdc; + struct resource *res; + struct clk *mux, *pll6; + int ret, i; + + sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL); + if (!sdc) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sdc->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sdc->base)) + return PTR_ERR(sdc->base); + + sdc->irq = platform_get_irq(pdev, 0); + if (sdc->irq < 0) { + dev_err(&pdev->dev, "Cannot claim IRQ\n"); + return sdc->irq; + } + + sdc->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(sdc->clk)) { + dev_err(&pdev->dev, "No clock specified\n"); + return PTR_ERR(sdc->clk); + } + + mux = clk_get(NULL, "ahb1_mux"); + if (IS_ERR(mux)) { + dev_err(&pdev->dev, "Couldn't get AHB1 Mux\n"); + return PTR_ERR(mux); + } + + pll6 = clk_get(NULL, "pll6"); + if (IS_ERR(pll6)) { + dev_err(&pdev->dev, "Couldn't get PLL6\n"); + clk_put(mux); + return PTR_ERR(pll6); + } + + ret = clk_set_parent(mux, pll6); + clk_put(pll6); + clk_put(mux); + + if (ret) { + dev_err(&pdev->dev, "Couldn't reparent AHB1 on PLL6\n"); + return ret; + } + + sdc->rstc = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(sdc->rstc)) { + dev_err(&pdev->dev, "No reset controller specified\n"); + return PTR_ERR(sdc->rstc); + } + + sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, + sizeof(struct sun6i_dma_lli), 4, 0); + if (!sdc->pool) { + dev_err(&pdev->dev, "No memory for descriptors dma pool\n"); + return -ENOMEM; + } + + platform_set_drvdata(pdev, sdc); + INIT_LIST_HEAD(&sdc->pending); + spin_lock_init(&sdc->lock); + + dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask); + dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask); + dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask); + + INIT_LIST_HEAD(&sdc->slave.channels); + sdc->slave.device_alloc_chan_resources = sun6i_dma_alloc_chan_resources; + sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources; + sdc->slave.device_tx_status = sun6i_dma_tx_status; + sdc->slave.device_issue_pending = sun6i_dma_issue_pending; + sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; + sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; + sdc->slave.device_control = sun6i_dma_control; + sdc->slave.chancnt = NR_MAX_VCHANS; + + sdc->slave.dev = &pdev->dev; + + sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, + sizeof(struct sun6i_pchan), GFP_KERNEL); + if (!sdc->pchans) + return -ENOMEM; + + sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, + sizeof(struct sun6i_vchan), GFP_KERNEL); + if (!sdc->vchans) + return -ENOMEM; + + tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); + + for (i = 0; i < NR_MAX_CHANNELS; i++) { + struct sun6i_pchan *pchan = &sdc->pchans[i]; + + pchan->idx = i; + pchan->base = sdc->base + 0x100 + i * 0x40; + } + + for (i = 0; i < NR_MAX_VCHANS; i++) { + struct sun6i_vchan *vchan = &sdc->vchans[i]; + + INIT_LIST_HEAD(&vchan->node); + vchan->vc.desc_free = sun6i_dma_free_desc; + vchan_init(&vchan->vc, &sdc->slave); + } + + ret = reset_control_deassert(sdc->rstc); + if (ret) { + dev_err(&pdev->dev, "Couldn't deassert the device from reset\n"); + goto err_chan_free; + } + + ret = clk_prepare_enable(sdc->clk); + if (ret) { + dev_err(&pdev->dev, "Couldn't enable the clock\n"); + goto err_reset_assert; + } + + ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0, + dev_name(&pdev->dev), sdc); + if (ret) { + dev_err(&pdev->dev, "Cannot request IRQ\n"); + goto err_clk_disable; + } + + ret = dma_async_device_register(&sdc->slave); + if (ret) { + dev_warn(&pdev->dev, "Failed to register DMA engine device\n"); + goto err_irq_disable; + } + + ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate, + sdc); + if (ret) { + dev_err(&pdev->dev, "of_dma_controller_register failed\n"); + goto err_dma_unregister; + } + + return 0; + +err_dma_unregister: + dma_async_device_unregister(&sdc->slave); +err_irq_disable: + sun6i_kill_tasklet(sdc); +err_clk_disable: + clk_disable_unprepare(sdc->clk); +err_reset_assert: + reset_control_assert(sdc->rstc); +err_chan_free: + sun6i_dma_free(sdc); + return ret; +} + +static int sun6i_dma_remove(struct platform_device *pdev) +{ + struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&sdc->slave); + + sun6i_kill_tasklet(sdc); + + clk_disable_unprepare(sdc->clk); + reset_control_assert(sdc->rstc); + + sun6i_dma_free(sdc); + + return 0; +} + +static struct of_device_id sun6i_dma_match[] = { + { .compatible = "allwinner,sun6i-a31-dma" }, + { /* sentinel */ } +}; + +static struct platform_driver sun6i_dma_driver = { + .probe = sun6i_dma_probe, + .remove = sun6i_dma_remove, + .driver = { + .name = "sun6i-dma", + .of_match_table = sun6i_dma_match, + }, +}; +module_platform_driver(sun6i_dma_driver); + +MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver"); +MODULE_AUTHOR("Sugar "); +MODULE_AUTHOR("Maxime Ripard "); +MODULE_LICENSE("GPL"); -- cgit v1.2.3-58-ga151 From 16369efb1f6006ec79babe53f388eed431533596 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 25 Jun 2014 14:52:59 +0400 Subject: dmaengine: of: add common xlate function for matching by channel id This patch adds a new common OF dma xlate callback function which will match a channel by it's id. The binding expects one integer argument which it will use to lookup the channel by the id. Unlike of_dma_simple_xlate this function is able to handle a system with multiple DMA controllers. When registering the of dma provider with of_dma_controller_register a pointer to the dma_device struct which is associated with the dt node needs to passed as the data parameter. New function will use this pointer to match only channels which belong to the specified DMA controller. Signed-off-by: Alexander Popov Signed-off-by: Vinod Koul --- drivers/dma/of-dma.c | 35 +++++++++++++++++++++++++++++++++++ include/linux/of_dma.h | 4 ++++ 2 files changed, 39 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index e8fe9dc455f4..d5fbeaa1e7ba 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -218,3 +218,38 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, &dma_spec->args[0]); } EXPORT_SYMBOL_GPL(of_dma_simple_xlate); + +/** + * of_dma_xlate_by_chan_id - Translate dt property to DMA channel by channel id + * @dma_spec: pointer to DMA specifier as found in the device tree + * @of_dma: pointer to DMA controller data + * + * This function can be used as the of xlate callback for DMA driver which wants + * to match the channel based on the channel id. When using this xlate function + * the #dma-cells propety of the DMA controller dt node needs to be set to 1. + * The data parameter of of_dma_controller_register must be a pointer to the + * dma_device struct the function should match upon. + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct dma_device *dev = ofdma->of_dma_data; + struct dma_chan *chan, *candidate = NULL; + + if (!dev || dma_spec->args_count != 1) + return NULL; + + list_for_each_entry(chan, &dev->channels, device_node) + if (chan->chan_id == dma_spec->args[0]) { + candidate = chan; + break; + } + + if (!candidate) + return NULL; + + return dma_get_slave_channel(candidate); +} +EXPORT_SYMBOL_GPL(of_dma_xlate_by_chan_id); diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index ae36298ba076..56bc026c143f 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h @@ -41,6 +41,8 @@ extern struct dma_chan *of_dma_request_slave_channel(struct device_node *np, const char *name); extern struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma); +extern struct dma_chan *of_dma_xlate_by_chan_id(struct of_phandle_args *dma_spec, + struct of_dma *ofdma); #else static inline int of_dma_controller_register(struct device_node *np, struct dma_chan *(*of_dma_xlate) @@ -66,6 +68,8 @@ static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_s return NULL; } +#define of_dma_xlate_by_chan_id NULL + #endif #endif /* __LINUX_OF_DMA_H */ -- cgit v1.2.3-58-ga151 From ec1f0c96669a23f898c25a30a5319baebf3f0677 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 25 Jun 2014 14:53:00 +0400 Subject: dmaengine: mpc512x: register for device tree channel lookup Register the controller for device tree based lookup of DMA channels (non-fatal for backwards compatibility with older device trees) and provide the '#dma-cells' property in the shared mpc5121.dtsi file Signed-off-by: Alexander Popov Signed-off-by: Vinod Koul --- arch/powerpc/boot/dts/mpc5121.dtsi | 1 + drivers/dma/mpc512x_dma.c | 13 ++++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi index 2c0e1552d20b..7f9d14f5c4da 100644 --- a/arch/powerpc/boot/dts/mpc5121.dtsi +++ b/arch/powerpc/boot/dts/mpc5121.dtsi @@ -498,6 +498,7 @@ compatible = "fsl,mpc5121-dma"; reg = <0x14000 0x1800>; interrupts = <65 0x8>; + #dma-cells = <1>; }; }; diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 2ad43738ac8b..881db2bcb48b 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -1036,7 +1037,15 @@ static int mpc_dma_probe(struct platform_device *op) if (retval) goto err_free2; - return retval; + /* Register with OF helpers for DMA lookups (nonfatal) */ + if (dev->of_node) { + retval = of_dma_controller_register(dev->of_node, + of_dma_xlate_by_chan_id, mdma); + if (retval) + dev_warn(dev, "Could not register for OF lookup\n"); + } + + return 0; err_free2: if (mdma->is_mpc8308) @@ -1057,6 +1066,8 @@ static int mpc_dma_remove(struct platform_device *op) struct device *dev = &op->dev; struct mpc_dma *mdma = dev_get_drvdata(dev); + if (dev->of_node) + of_dma_controller_free(dev->of_node); dma_async_device_unregister(&mdma->dma); if (mdma->is_mpc8308) { free_irq(mdma->irq2, mdma); -- cgit v1.2.3-58-ga151 From 42c0d54e62369584f2491f072f405602ff504e0e Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 28 Jul 2014 11:57:25 +0530 Subject: dmaengine: sun6i: fix build failure on x86, xilinx targets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since the driver defined COMPILE_TEST, it gets compiled for different arch's The driver uses __virt_to_phys() insteadof virt_to_phys, so replace it drivers/dma/sun6i-dma.c: In function ‘sun6i_dma_dump_chan_regs’: drivers/dma/sun6i-dma.c:203: error: implicit declaration of function '__virt_to_phys' Reported-by: kbuild test robot Reported-by: Stephen Rothwell Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index ce8d5d1b0ff4..e9f99bbc985e 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -200,7 +200,7 @@ static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev) static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev, struct sun6i_pchan *pchan) { - phys_addr_t reg = __virt_to_phys((unsigned long)pchan->base); + phys_addr_t reg = virt_to_phys(pchan->base); dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n" "\t___en(%04x): \t0x%08x\n" @@ -330,7 +330,7 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli, static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan, struct sun6i_dma_lli *lli) { - phys_addr_t p_lli = __virt_to_phys((unsigned long)lli); + phys_addr_t p_lli = virt_to_phys(lli); dev_dbg(chan2dev(&vchan->vc.chan), "\n\tdesc: p - %pa v - 0x%p\n" -- cgit v1.2.3-58-ga151 From 7f5e03e73672932ba5fdb078e79914ca93a9e1ee Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 28 Jul 2014 12:32:51 +0530 Subject: dmaengine: sun61: fix warning on bad print specfier MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The sg_dma_len() returns unsigned int but we had driver print it as %zu, use %u as documented in Documentation/printk-formats.txt drivers/dma/sun6i-dma.c: In function ‘sun6i_dma_prep_slave_sg’: drivers/dma/sun6i-dma.c:643: warning: format ‘%zu’ expects type ‘size_t’, but argument 8 has type ‘unsigned int’ drivers/dma/sun6i-dma.c:661: warning: format ‘%zu’ expects type ‘size_t’, but argument 8 has type ‘unsigned int’ Reported-by: kbuild test robot Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index e9f99bbc985e..c771d90b8ded 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -641,7 +641,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( DMA_CHAN_CFG_DST_DRQ(vchan->port); dev_dbg(chan2dev(chan), - "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", __func__, vchan->vc.chan.chan_id, &sconfig->dst_addr, &sg_dma_address(sg), sg_dma_len(sg), flags); @@ -659,7 +659,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( DMA_CHAN_CFG_SRC_DRQ(vchan->port); dev_dbg(chan2dev(chan), - "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", + "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n", __func__, vchan->vc.chan.chan_id, &sg_dma_address(sg), &sconfig->src_addr, sg_dma_len(sg), flags); -- cgit v1.2.3-58-ga151 From b7f9bc5267d3517de8a7159219db96de7d12a37b Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 16 Jul 2014 15:29:20 +0300 Subject: dmaengine: edma: Update caps->residue_granularity to match with reality The edma can report accurate DMA position so update the residue_granularity to DMA_RESIDUE_GRANULARITY_BURST. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index d08c4dedef35..8e400c0f3529 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -992,7 +992,7 @@ static int edma_dma_device_slave_caps(struct dma_chan *dchan, caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; return 0; } -- cgit v1.2.3-58-ga151 From a1f146f31724b649f74185ffc817bbd983eba5e2 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 16 Jul 2014 15:29:21 +0300 Subject: dmaengine: edma: Support to suppress the period interrupts in cyclic mode If the client (audio) does not request interrupts for every period we can disable them. With updated audio driver stack we can play audio w/o the need to process any edma interrupts. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 8e400c0f3529..54f7408e14ef 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -718,10 +718,10 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc->absync = ret; /* - * Enable interrupts for every period because callback - * has to be called for every period. + * Enable period interrupt only if it is requested */ - edesc->pset[i].param.opt |= TCINTEN; + if (tx_flags & DMA_PREP_INTERRUPT) + edesc->pset[i].param.opt |= TCINTEN; } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); -- cgit v1.2.3-58-ga151 From 8e8805d5bf704f24acb14d67d506cbd4c5893be5 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 8 Jul 2014 13:46:38 +0300 Subject: dmaengine: edma: Serve cyclic (audio) channels with high priority queue Move the DMA channel used in cyclic mode (audio) to the highest priority event queue which helps to reduce audio problems. When the channel is terminated, move it back to the default queue. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers') diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 54f7408e14ef..3754ffa09f27 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -256,8 +256,13 @@ static int edma_terminate_all(struct edma_chan *echan) * echan->edesc is NULL and exit.) */ if (echan->edesc) { + int cyclic = echan->edesc->cyclic; echan->edesc = NULL; edma_stop(echan->ch_num); + /* Move the cyclic channel back to default queue */ + if (cyclic) + edma_assign_channel_eventq(echan->ch_num, + EVENTQ_DEFAULT); } vchan_get_all_descriptors(&echan->vchan, &head); @@ -724,6 +729,9 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( edesc->pset[i].param.opt |= TCINTEN; } + /* Place the cyclic channel to highest priority queue */ + edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } -- cgit v1.2.3-58-ga151 From 2795eedf2fbd816f1bba0e8eb5f33434a67d61c2 Mon Sep 17 00:00:00 2001 From: Alexander Popov Date: Wed, 30 Jul 2014 15:58:51 +0400 Subject: dmaengine: of: kconfig: select DMA_ENGINE when DMA_OF is selected Select DMA_ENGINE when DMA_OF is selected since of_dma_xlate_by_chan_id() from drivers/dma/of-dma.c uses dma_get_slave_channel() from drivers/dma/dmaengine.c This resolves error reported: drivers/built-in.o: In function `of_dma_xlate_by_chan_id': drivers/dma/of-dma.c:253: undefined reference to `dma_get_slave_channel' Signed-off-by: Alexander Popov Reported-by: kbuild test robot Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 4b439270fb11..d4f0415894c7 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -396,6 +396,7 @@ config DMA_ACPI config DMA_OF def_bool y depends on OF + select DMA_ENGINE comment "DMA Clients" depends on DMA_ENGINE -- cgit v1.2.3-58-ga151 From 92e4a3bf3812b0b0d06c6736fe4d83e1442640ef Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 30 Jul 2014 10:30:21 +0200 Subject: dmaengine: sun6i: Remove switch statement from buswidth convertion routine Since the conversion routine is quite trivial, we don't need this switch, and we can just use a simple calculation. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index c771d90b8ded..609c5d8cb947 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -248,20 +248,11 @@ static inline int convert_burst(u32 maxburst, u8 *burst) static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) { - switch (addr_width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - *width = 0; - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - *width = 1; - break; - case DMA_SLAVE_BUSWIDTH_4_BYTES: - *width = 2; - break; - default: + if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || + (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) return -EINVAL; - } + *width = addr_width >> 1; return 0; } -- cgit v1.2.3-58-ga151 From 174427c1bb9e7810743f55a90a64da2c29d67b0b Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 30 Jul 2014 10:30:22 +0200 Subject: dmaengine: sun6i: Free the interrupt before killing the tasklet There's still a small window between the call to sun6i_kill_tasklet and the end of the driver remove function where a spurious interrupt might trigger, and start using deallocated resources. Replace the call to synchronize_irq by a free_irq, so that we're sure that we won't get any further interrupts when we're deallocating resources. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 609c5d8cb947..63a1db38894e 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -836,8 +836,8 @@ static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev) /* Prevent spurious interrupts from scheduling the tasklet */ atomic_inc(&sdev->tasklet_shutdown); - /* Make sure all interrupts are handled */ - synchronize_irq(sdev->irq); + /* Make sure we won't have any further interrupts */ + devm_free_irq(sdev->slave.dev, sdev->irq, sdev); /* Actually prevent the tasklet from being scheduled */ tasklet_kill(&sdev->task); -- cgit v1.2.3-58-ga151 From 4fbd804e009ae9dff29dcb1fbfa0aaffd3992880 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 30 Jul 2014 10:30:23 +0200 Subject: dmaengine: sun6i: Fix memory leaks The sun6i_dma_prep_memcpy and sun6i_dma_prep_slave_sg functions were both leaking the descriptor they allocated if an error was happening after a successful dma_pool_alloc call. It also fixes a memleak that was happening in the scatter gather list traversal, that was allocating as much descriptor as there was scatter gather items, but only freeing the current descriptor if an error was to arise. Reported-by: Dan Carpenter Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul --- drivers/dma/sun6i-dma.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 63a1db38894e..1f92a56fd2b6 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -562,8 +562,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); if (!v_lli) { dev_err(sdev->slave.dev, "Failed to alloc lli memory\n"); - kfree(txd); - return NULL; + goto err_txd_free; } ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); @@ -583,6 +582,8 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( err_dma_free: dma_pool_free(sdev->pool, v_lli, p_lli); +err_txd_free: + kfree(txd); return NULL; } @@ -614,17 +615,15 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( for_each_sg(sgl, sg, sg_len, i) { v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli); - if (!v_lli) { - kfree(txd); - return NULL; - } + if (!v_lli) + goto err_lli_free; if (dir == DMA_MEM_TO_DEV) { ret = sun6i_dma_cfg_lli(v_lli, sg_dma_address(sg), sconfig->dst_addr, sg_dma_len(sg), sconfig); if (ret) - goto err_dma_free; + goto err_cur_lli_free; v_lli->cfg |= DMA_CHAN_CFG_DST_IO_MODE | DMA_CHAN_CFG_SRC_LINEAR_MODE | @@ -642,7 +641,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( sg_dma_address(sg), sg_dma_len(sg), sconfig); if (ret) - goto err_dma_free; + goto err_cur_lli_free; v_lli->cfg |= DMA_CHAN_CFG_DST_LINEAR_MODE | DMA_CHAN_CFG_SRC_IO_MODE | @@ -665,8 +664,12 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg( return vchan_tx_prep(&vchan->vc, &txd->vd, flags); -err_dma_free: +err_cur_lli_free: dma_pool_free(sdev->pool, v_lli, p_lli); +err_lli_free: + for (prev = txd->v_lli; prev; prev = prev->v_lli_next) + dma_pool_free(sdev->pool, prev, virt_to_phys(prev)); + kfree(txd); return NULL; } -- cgit v1.2.3-58-ga151 From 9f2c2bb31258e11b1ebaf73bdf2a88f8afaa2dd2 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 Jul 2014 09:34:04 +0900 Subject: dmaengine: sh: Rework Kconfig and Makefile Separate helpers and drivers in the Kconfig and Makefile to improve readability and move the CONFIG_OF dependency from the Makefile to Kconfig. [pebolle@tiscali.nl: reported need to rename SHDMA_R8A73A4 instances] Reported-by: Paul Bolle Signed-off-by: Laurent Pinchart [horms+renesas@verge.net.au: squashed rename of SHDMA_R8A73A4 instances] Signed-off-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/Kconfig | 24 +++++++++++++++++++----- drivers/dma/sh/Makefile | 16 ++++++++++++---- drivers/dma/sh/shdma.h | 2 +- 3 files changed, 32 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 0f719816c91b..0349125a2e20 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -2,21 +2,39 @@ # DMA engine configuration for sh # +# +# DMA Engine Helpers +# + config SH_DMAE_BASE bool "Renesas SuperH DMA Engine support" - depends on (SUPERH && SH_DMA) || ARCH_SHMOBILE || COMPILE_TEST + depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST + depends on !SUPERH || SH_DMA depends on !SH_DMA_API default y select DMA_ENGINE help Enable support for the Renesas SuperH DMA controllers. +# +# DMA Controllers +# + config SH_DMAE tristate "Renesas SuperH DMAC support" depends on SH_DMAE_BASE help Enable support for the Renesas SuperH DMA controllers. +if SH_DMAE + +config SH_DMAE_R8A73A4 + def_bool y + depends on ARCH_R8A73A4 + depends on OF + +endif + config SUDMAC tristate "Renesas SUDMAC support" depends on SH_DMAE_BASE @@ -34,7 +52,3 @@ config RCAR_AUDMAC_PP depends on SH_DMAE_BASE help Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. - -config SHDMA_R8A73A4 - def_bool y - depends on ARCH_R8A73A4 && SH_DMAE != n diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 1ce88b28cfc6..0a5cfdb76e45 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -1,10 +1,18 @@ +# +# DMA Engine Helpers +# + obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o shdma-of.o -obj-$(CONFIG_SH_DMAE) += shdma.o + +# +# DMA Controllers +# + shdma-y := shdmac.o -ifeq ($(CONFIG_OF),y) -shdma-$(CONFIG_SHDMA_R8A73A4) += shdma-r8a73a4.o -endif +shdma-$(CONFIG_SH_DMAE_R8A73A4) += shdma-r8a73a4.o shdma-objs := $(shdma-y) +obj-$(CONFIG_SH_DMAE) += shdma.o + obj-$(CONFIG_SUDMAC) += sudmac.o obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o diff --git a/drivers/dma/sh/shdma.h b/drivers/dma/sh/shdma.h index 758a57b51875..2c0a969adc9f 100644 --- a/drivers/dma/sh/shdma.h +++ b/drivers/dma/sh/shdma.h @@ -62,7 +62,7 @@ struct sh_dmae_desc { #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ struct sh_dmae_device, shdma_dev.dma_dev) -#ifdef CONFIG_SHDMA_R8A73A4 +#ifdef CONFIG_SH_DMAE_R8A73A4 extern const struct sh_dmae_pdata r8a73a4_dma_pdata; #define r8a73a4_shdma_devid (&r8a73a4_dma_pdata) #else -- cgit v1.2.3-58-ga151 From c091ff51b4d2543b828d53ce47f66905dee870fd Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 Jul 2014 09:34:05 +0900 Subject: dmaengine: shdma: Make channel filter ignore unrelated devices The shdma_chan_filter() function relies on the DMA channel being embedded in an shdma_chan structure. If this assumption isn't true, for instance when the system contains DMA channels supported by an unrelated driver, the function will crash. Avoid this by returning false directly when the channel belongs to an unrelated device. Signed-off-by: Laurent Pinchart Signed-off-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 84 ++++++++++++++++++++++++--------------------- 1 file changed, 45 insertions(+), 39 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index b35007e21e6b..94b6bde6c86a 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -206,45 +206,6 @@ static int shdma_setup_slave(struct shdma_chan *schan, int slave_id, return 0; } -/* - * This is the standard shdma filter function to be used as a replacement to the - * "old" method, using the .private pointer. If for some reason you allocate a - * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter - * parameter. If this filter is used, the slave driver, after calling - * dma_request_channel(), will also have to call dmaengine_slave_config() with - * .slave_id, .direction, and either .src_addr or .dst_addr set. - * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE - * capability! If this becomes a requirement, hardware glue drivers, using this - * services would have to provide their own filters, which first would check - * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do - * this, and only then, in case of a match, call this common filter. - * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). - * In that case the MID-RID value is used for slave channel filtering and is - * passed to this function in the "arg" parameter. - */ -bool shdma_chan_filter(struct dma_chan *chan, void *arg) -{ - struct shdma_chan *schan = to_shdma_chan(chan); - struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); - const struct shdma_ops *ops = sdev->ops; - int match = (long)arg; - int ret; - - if (match < 0) - /* No slave requested - arbitrary channel */ - return true; - - if (!schan->dev->of_node && match >= slave_num) - return false; - - ret = ops->set_slave(schan, match, 0, true); - if (ret < 0) - return false; - - return true; -} -EXPORT_SYMBOL(shdma_chan_filter); - static int shdma_alloc_chan_resources(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); @@ -295,6 +256,51 @@ esetslave: return ret; } +/* + * This is the standard shdma filter function to be used as a replacement to the + * "old" method, using the .private pointer. If for some reason you allocate a + * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter + * parameter. If this filter is used, the slave driver, after calling + * dma_request_channel(), will also have to call dmaengine_slave_config() with + * .slave_id, .direction, and either .src_addr or .dst_addr set. + * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE + * capability! If this becomes a requirement, hardware glue drivers, using this + * services would have to provide their own filters, which first would check + * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do + * this, and only then, in case of a match, call this common filter. + * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate(). + * In that case the MID-RID value is used for slave channel filtering and is + * passed to this function in the "arg" parameter. + */ +bool shdma_chan_filter(struct dma_chan *chan, void *arg) +{ + struct shdma_chan *schan; + struct shdma_dev *sdev; + int match = (long)arg; + int ret; + + /* Only support channels handled by this driver. */ + if (chan->device->device_alloc_chan_resources != + shdma_alloc_chan_resources) + return false; + + if (match < 0) + /* No slave requested - arbitrary channel */ + return true; + + schan = to_shdma_chan(chan); + if (!schan->dev->of_node && match >= slave_num) + return false; + + sdev = to_shdma_dev(schan->dma_chan.device); + ret = sdev->ops->set_slave(schan, match, 0, true); + if (ret < 0) + return false; + + return true; +} +EXPORT_SYMBOL(shdma_chan_filter); + static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) { struct shdma_desc *desc, *_desc; -- cgit v1.2.3-58-ga151 From 4415b03abb0aacd937010f13310b7fa437b9ad7d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 31 Jul 2014 09:34:06 +0900 Subject: dmaengine: shdma: Allocate cyclic sg list dynamically MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The sg list used to prepare cyclic DMA descriptors is currently allocated statically on the stack as an array of 32 elements. This makes the shdma_prep_dma_cyclic() function consume a lot of stack space, as reported by the compiler: drivers/dma/sh/shdma-base.c: In function ‘shdma_prep_dma_cyclic’: drivers/dma/sh/shdma-base.c:715:1: warning: the frame size of 1056 bytes is larger than 1024 bytes [-Wframe-larger-than=] Given the limited Linux kernel stack size, this could lead to stack overflows. Fix the problem by allocating the sg list dynamically. Signed-off-by: Laurent Pinchart Signed-off-by: Simon Horman Signed-off-by: Vinod Koul --- drivers/dma/sh/shdma-base.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 94b6bde6c86a..e427a03a0e8b 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -672,11 +672,12 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); + struct dma_async_tx_descriptor *desc; const struct shdma_ops *ops = sdev->ops; unsigned int sg_len = buf_len / period_len; int slave_id = schan->slave_id; dma_addr_t slave_addr; - struct scatterlist sgl[SHDMA_MAX_SG_LEN]; + struct scatterlist *sgl; int i; if (!chan) @@ -700,7 +701,16 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( slave_addr = ops->slave_addr(schan); + /* + * Allocate the sg list dynamically as it would consumer too much stack + * space. + */ + sgl = kcalloc(sg_len, sizeof(*sgl), GFP_KERNEL); + if (!sgl) + return NULL; + sg_init_table(sgl, sg_len); + for (i = 0; i < sg_len; i++) { dma_addr_t src = buf_addr + (period_len * i); @@ -710,8 +720,11 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( sg_dma_len(&sgl[i]) = period_len; } - return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, + desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr, direction, flags, true); + + kfree(sgl); + return desc; } static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, -- cgit v1.2.3-58-ga151 From 04d537d95e2f48295b6f61ef7029a2dba75e3677 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 31 Jul 2014 13:12:37 +0300 Subject: dmaengine: edma: Do not change the error code returned from edma_alloc_slot In case of edma_alloc_slot() failure during probe we should return the error unchanged to make debugging easier. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3754ffa09f27..4190976ababc 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -1047,7 +1047,7 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); - return -EIO; + return ecc->dummy_slot; } dma_cap_zero(ecc->dma_slave.cap_mask); -- cgit v1.2.3-58-ga151 From ed64610f29414c0ea782bb6a462a3be72e7704dd Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Thu, 31 Jul 2014 13:12:38 +0300 Subject: dmaengine: edma: Do not register second device when booted with DT DT boot does not yet support more than one edma device. To avoid issues at runtime we should not register the second device when the kernel is booted with DT. Signed-off-by: Peter Ujfalusi Signed-off-by: Vinod Koul --- drivers/dma/edma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 4190976ababc..a13f37f719ed 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -1132,7 +1133,7 @@ static int edma_init(void) } } - if (EDMA_CTLRS == 2) { + if (!of_have_populated_dt() && EDMA_CTLRS == 2) { pdev1 = platform_device_register_full(&edma_dev_info1); if (IS_ERR(pdev1)) { platform_driver_unregister(&edma_driver); -- cgit v1.2.3-58-ga151 From b45b262cefd5b8eb2ba88d20e5bd295881293894 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Sat, 19 Jul 2014 12:48:51 +0200 Subject: dmaengine: add a driver for AMBA AXI NBPF DMAC IP cores This patch adds a driver for NBPF DMAC IP cores from Renesas, designed for the AMBA AXI bus. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 6 + drivers/dma/Makefile | 1 + drivers/dma/nbpfaxi.c | 1511 +++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 1518 insertions(+) create mode 100644 drivers/dma/nbpfaxi.c (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d4f0415894c7..901818c1b37c 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -383,6 +383,12 @@ config DMA_SUN6I help Support for the DMA engine for Allwinner A31 SoCs. +config NBPFAXI_DMA + tristate "Renesas Type-AXI NBPF DMA support" + select DMA_ENGINE + help + Support for "Type-AXI" NBPF DMA IPs from Renesas + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index d08bd966da13..f2b831a994cf 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -47,4 +47,5 @@ obj-$(CONFIG_MOXART_DMA) += moxart-dma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o obj-y += xilinx/ +obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c new file mode 100644 index 000000000000..77c5a890a30a --- /dev/null +++ b/drivers/dma/nbpfaxi.c @@ -0,0 +1,1511 @@ +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dmaengine.h" + +#define NBPF_REG_CHAN_OFFSET 0 +#define NBPF_REG_CHAN_SIZE 0x40 + +/* Channel Current Transaction Byte register */ +#define NBPF_CHAN_CUR_TR_BYTE 0x20 + +/* Channel Status register */ +#define NBPF_CHAN_STAT 0x24 +#define NBPF_CHAN_STAT_EN 1 +#define NBPF_CHAN_STAT_TACT 4 +#define NBPF_CHAN_STAT_ERR 0x10 +#define NBPF_CHAN_STAT_END 0x20 +#define NBPF_CHAN_STAT_TC 0x40 +#define NBPF_CHAN_STAT_DER 0x400 + +/* Channel Control register */ +#define NBPF_CHAN_CTRL 0x28 +#define NBPF_CHAN_CTRL_SETEN 1 +#define NBPF_CHAN_CTRL_CLREN 2 +#define NBPF_CHAN_CTRL_STG 4 +#define NBPF_CHAN_CTRL_SWRST 8 +#define NBPF_CHAN_CTRL_CLRRQ 0x10 +#define NBPF_CHAN_CTRL_CLREND 0x20 +#define NBPF_CHAN_CTRL_CLRTC 0x40 +#define NBPF_CHAN_CTRL_SETSUS 0x100 +#define NBPF_CHAN_CTRL_CLRSUS 0x200 + +/* Channel Configuration register */ +#define NBPF_CHAN_CFG 0x2c +#define NBPF_CHAN_CFG_SEL 7 /* terminal SELect: 0..7 */ +#define NBPF_CHAN_CFG_REQD 8 /* REQuest Direction: DMAREQ is 0: input, 1: output */ +#define NBPF_CHAN_CFG_LOEN 0x10 /* LOw ENable: low DMA request line is: 0: inactive, 1: active */ +#define NBPF_CHAN_CFG_HIEN 0x20 /* HIgh ENable: high DMA request line is: 0: inactive, 1: active */ +#define NBPF_CHAN_CFG_LVL 0x40 /* LeVeL: DMA request line is sensed as 0: edge, 1: level */ +#define NBPF_CHAN_CFG_AM 0x700 /* ACK Mode: 0: Pulse mode, 1: Level mode, b'1x: Bus Cycle */ +#define NBPF_CHAN_CFG_SDS 0xf000 /* Source Data Size: 0: 8 bits,... , 7: 1024 bits */ +#define NBPF_CHAN_CFG_DDS 0xf0000 /* Destination Data Size: as above */ +#define NBPF_CHAN_CFG_SAD 0x100000 /* Source ADdress counting: 0: increment, 1: fixed */ +#define NBPF_CHAN_CFG_DAD 0x200000 /* Destination ADdress counting: 0: increment, 1: fixed */ +#define NBPF_CHAN_CFG_TM 0x400000 /* Transfer Mode: 0: single, 1: block TM */ +#define NBPF_CHAN_CFG_DEM 0x1000000 /* DMAEND interrupt Mask */ +#define NBPF_CHAN_CFG_TCM 0x2000000 /* DMATCO interrupt Mask */ +#define NBPF_CHAN_CFG_SBE 0x8000000 /* Sweep Buffer Enable */ +#define NBPF_CHAN_CFG_RSEL 0x10000000 /* RM: Register Set sELect */ +#define NBPF_CHAN_CFG_RSW 0x20000000 /* RM: Register Select sWitch */ +#define NBPF_CHAN_CFG_REN 0x40000000 /* RM: Register Set Enable */ +#define NBPF_CHAN_CFG_DMS 0x80000000 /* 0: register mode (RM), 1: link mode (LM) */ + +#define NBPF_CHAN_NXLA 0x38 +#define NBPF_CHAN_CRLA 0x3c + +/* Link Header field */ +#define NBPF_HEADER_LV 1 +#define NBPF_HEADER_LE 2 +#define NBPF_HEADER_WBD 4 +#define NBPF_HEADER_DIM 8 + +#define NBPF_CTRL 0x300 +#define NBPF_CTRL_PR 1 /* 0: fixed priority, 1: round robin */ +#define NBPF_CTRL_LVINT 2 /* DMAEND and DMAERR signalling: 0: pulse, 1: level */ + +#define NBPF_DSTAT_ER 0x314 +#define NBPF_DSTAT_END 0x318 + +#define NBPF_DMA_BUSWIDTHS \ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +struct nbpf_config { + int num_channels; + int buffer_size; +}; + +/* + * We've got 3 types of objects, used to describe DMA transfers: + * 1. high-level descriptor, containing a struct dma_async_tx_descriptor object + * in it, used to communicate with the user + * 2. hardware DMA link descriptors, that we pass to DMAC for DMA transfer + * queuing, these must be DMAable, using either the streaming DMA API or + * allocated from coherent memory - one per SG segment + * 3. one per SG segment descriptors, used to manage HW link descriptors from + * (2). They do not have to be DMAable. They can either be (a) allocated + * together with link descriptors as mixed (DMA / CPU) objects, or (b) + * separately. Even if allocated separately it would be best to link them + * to link descriptors once during channel resource allocation and always + * use them as a single object. + * Therefore for both cases (a) and (b) at run-time objects (2) and (3) shall be + * treated as a single SG segment descriptor. + */ + +struct nbpf_link_reg { + u32 header; + u32 src_addr; + u32 dst_addr; + u32 transaction_size; + u32 config; + u32 interval; + u32 extension; + u32 next; +} __packed; + +struct nbpf_device; +struct nbpf_channel; +struct nbpf_desc; + +struct nbpf_link_desc { + struct nbpf_link_reg *hwdesc; + dma_addr_t hwdesc_dma_addr; + struct nbpf_desc *desc; + struct list_head node; +}; + +/** + * struct nbpf_desc - DMA transfer descriptor + * @async_tx: dmaengine object + * @user_wait: waiting for a user ack + * @length: total transfer length + * @sg: list of hardware descriptors, represented by struct nbpf_link_desc + * @node: member in channel descriptor lists + */ +struct nbpf_desc { + struct dma_async_tx_descriptor async_tx; + bool user_wait; + size_t length; + struct nbpf_channel *chan; + struct list_head sg; + struct list_head node; +}; + +/* Take a wild guess: allocate 4 segments per descriptor */ +#define NBPF_SEGMENTS_PER_DESC 4 +#define NBPF_DESCS_PER_PAGE ((PAGE_SIZE - sizeof(struct list_head)) / \ + (sizeof(struct nbpf_desc) + \ + NBPF_SEGMENTS_PER_DESC * \ + (sizeof(struct nbpf_link_desc) + sizeof(struct nbpf_link_reg)))) +#define NBPF_SEGMENTS_PER_PAGE (NBPF_SEGMENTS_PER_DESC * NBPF_DESCS_PER_PAGE) + +struct nbpf_desc_page { + struct list_head node; + struct nbpf_desc desc[NBPF_DESCS_PER_PAGE]; + struct nbpf_link_desc ldesc[NBPF_SEGMENTS_PER_PAGE]; + struct nbpf_link_reg hwdesc[NBPF_SEGMENTS_PER_PAGE]; +}; + +/** + * struct nbpf_channel - one DMAC channel + * @dma_chan: standard dmaengine channel object + * @base: register address base + * @nbpf: DMAC + * @name: IRQ name + * @irq: IRQ number + * @slave_addr: address for slave DMA + * @slave_width:slave data size in bytes + * @slave_burst:maximum slave burst size in bytes + * @terminal: DMA terminal, assigned to this channel + * @dmarq_cfg: DMA request line configuration - high / low, edge / level for NBPF_CHAN_CFG + * @flags: configuration flags from DT + * @lock: protect descriptor lists + * @free_links: list of free link descriptors + * @free: list of free descriptors + * @queued: list of queued descriptors + * @active: list of descriptors, scheduled for processing + * @done: list of completed descriptors, waiting post-processing + * @desc_page: list of additionally allocated descriptor pages - if any + */ +struct nbpf_channel { + struct dma_chan dma_chan; + void __iomem *base; + struct nbpf_device *nbpf; + char name[16]; + int irq; + dma_addr_t slave_src_addr; + size_t slave_src_width; + size_t slave_src_burst; + dma_addr_t slave_dst_addr; + size_t slave_dst_width; + size_t slave_dst_burst; + unsigned int terminal; + u32 dmarq_cfg; + unsigned long flags; + spinlock_t lock; + struct list_head free_links; + struct list_head free; + struct list_head queued; + struct list_head active; + struct list_head done; + struct list_head desc_page; + struct nbpf_desc *running; + bool paused; +}; + +struct nbpf_device { + struct dma_device dma_dev; + void __iomem *base; + struct clk *clk; + const struct nbpf_config *config; + struct nbpf_channel chan[]; +}; + +enum nbpf_model { + NBPF1B4, + NBPF1B8, + NBPF1B16, + NBPF4B4, + NBPF4B8, + NBPF4B16, + NBPF8B4, + NBPF8B8, + NBPF8B16, +}; + +static struct nbpf_config nbpf_cfg[] = { + [NBPF1B4] = { + .num_channels = 1, + .buffer_size = 4, + }, + [NBPF1B8] = { + .num_channels = 1, + .buffer_size = 8, + }, + [NBPF1B16] = { + .num_channels = 1, + .buffer_size = 16, + }, + [NBPF4B4] = { + .num_channels = 4, + .buffer_size = 4, + }, + [NBPF4B8] = { + .num_channels = 4, + .buffer_size = 8, + }, + [NBPF4B16] = { + .num_channels = 4, + .buffer_size = 16, + }, + [NBPF8B4] = { + .num_channels = 8, + .buffer_size = 4, + }, + [NBPF8B8] = { + .num_channels = 8, + .buffer_size = 8, + }, + [NBPF8B16] = { + .num_channels = 8, + .buffer_size = 16, + }, +}; + +#define nbpf_to_chan(d) container_of(d, struct nbpf_channel, dma_chan) + +/* + * dmaengine drivers seem to have a lot in common and instead of sharing more + * code, they reimplement those common algorithms independently. In this driver + * we try to separate the hardware-specific part from the (largely) generic + * part. This improves code readability and makes it possible in the future to + * reuse the generic code in form of a helper library. That generic code should + * be suitable for various DMA controllers, using transfer descriptors in RAM + * and pushing one SG list at a time to the DMA controller. + */ + +/* Hardware-specific part */ + +static inline u32 nbpf_chan_read(struct nbpf_channel *chan, + unsigned int offset) +{ + u32 data = ioread32(chan->base + offset); + dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, chan->base, offset, data); + return data; +} + +static inline void nbpf_chan_write(struct nbpf_channel *chan, + unsigned int offset, u32 data) +{ + iowrite32(data, chan->base + offset); + dev_dbg(chan->dma_chan.device->dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, chan->base, offset, data); +} + +static inline u32 nbpf_read(struct nbpf_device *nbpf, + unsigned int offset) +{ + u32 data = ioread32(nbpf->base + offset); + dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, nbpf->base, offset, data); + return data; +} + +static inline void nbpf_write(struct nbpf_device *nbpf, + unsigned int offset, u32 data) +{ + iowrite32(data, nbpf->base + offset); + dev_dbg(nbpf->dma_dev.dev, "%s(0x%p + 0x%x) = 0x%x\n", + __func__, nbpf->base, offset, data); +} + +static void nbpf_chan_halt(struct nbpf_channel *chan) +{ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); +} + +static bool nbpf_status_get(struct nbpf_channel *chan) +{ + u32 status = nbpf_read(chan->nbpf, NBPF_DSTAT_END); + + return status & BIT(chan - chan->nbpf->chan); +} + +static void nbpf_status_ack(struct nbpf_channel *chan) +{ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREND); +} + +static u32 nbpf_error_get(struct nbpf_device *nbpf) +{ + return nbpf_read(nbpf, NBPF_DSTAT_ER); +} + +struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) +{ + return nbpf->chan + __ffs(error); +} + +static void nbpf_error_clear(struct nbpf_channel *chan) +{ + u32 status; + int i; + + /* Stop the channel, make sure DMA has been aborted */ + nbpf_chan_halt(chan); + + for (i = 1000; i; i--) { + status = nbpf_chan_read(chan, NBPF_CHAN_STAT); + if (!(status & NBPF_CHAN_STAT_TACT)) + break; + cpu_relax(); + } + + if (!i) + dev_err(chan->dma_chan.device->dev, + "%s(): abort timeout, channel status 0x%x\n", __func__, status); + + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SWRST); +} + +static int nbpf_start(struct nbpf_desc *desc) +{ + struct nbpf_channel *chan = desc->chan; + struct nbpf_link_desc *ldesc = list_first_entry(&desc->sg, struct nbpf_link_desc, node); + + nbpf_chan_write(chan, NBPF_CHAN_NXLA, (u32)ldesc->hwdesc_dma_addr); + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETEN | NBPF_CHAN_CTRL_CLRSUS); + chan->paused = false; + + /* Software trigger MEMCPY - only MEMCPY uses the block mode */ + if (ldesc->hwdesc->config & NBPF_CHAN_CFG_TM) + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_STG); + + dev_dbg(chan->nbpf->dma_dev.dev, "%s(): next 0x%x, cur 0x%x\n", __func__, + nbpf_chan_read(chan, NBPF_CHAN_NXLA), nbpf_chan_read(chan, NBPF_CHAN_CRLA)); + + return 0; +} + +static void nbpf_chan_prepare(struct nbpf_channel *chan) +{ + chan->dmarq_cfg = (chan->flags & NBPF_SLAVE_RQ_HIGH ? NBPF_CHAN_CFG_HIEN : 0) | + (chan->flags & NBPF_SLAVE_RQ_LOW ? NBPF_CHAN_CFG_LOEN : 0) | + (chan->flags & NBPF_SLAVE_RQ_LEVEL ? + NBPF_CHAN_CFG_LVL | (NBPF_CHAN_CFG_AM & 0x200) : 0) | + chan->terminal; +} + +static void nbpf_chan_prepare_default(struct nbpf_channel *chan) +{ + /* Don't output DMAACK */ + chan->dmarq_cfg = NBPF_CHAN_CFG_AM & 0x400; + chan->terminal = 0; + chan->flags = 0; +} + +static void nbpf_chan_configure(struct nbpf_channel *chan) +{ + /* + * We assume, that only the link mode and DMA request line configuration + * have to be set in the configuration register manually. Dynamic + * per-transfer configuration will be loaded from transfer descriptors. + */ + nbpf_chan_write(chan, NBPF_CHAN_CFG, NBPF_CHAN_CFG_DMS | chan->dmarq_cfg); +} + +static u32 nbpf_xfer_ds(struct nbpf_device *nbpf, size_t size) +{ + /* Maximum supported bursts depend on the buffer size */ + return min_t(int, __ffs(size), ilog2(nbpf->config->buffer_size * 8)); +} + +static size_t nbpf_xfer_size(struct nbpf_device *nbpf, + enum dma_slave_buswidth width, u32 burst) +{ + size_t size; + + if (!burst) + burst = 1; + + switch (width) { + case DMA_SLAVE_BUSWIDTH_8_BYTES: + size = 8 * burst; + break; + + case DMA_SLAVE_BUSWIDTH_4_BYTES: + size = 4 * burst; + break; + + case DMA_SLAVE_BUSWIDTH_2_BYTES: + size = 2 * burst; + break; + + default: + pr_warn("%s(): invalid bus width %u\n", __func__, width); + case DMA_SLAVE_BUSWIDTH_1_BYTE: + size = burst; + } + + return nbpf_xfer_ds(nbpf, size); +} + +/* + * We need a way to recognise slaves, whose data is sent "raw" over the bus, + * i.e. it isn't known in advance how many bytes will be received. Therefore + * the slave driver has to provide a "large enough" buffer and either read the + * buffer, when it is full, or detect, that some data has arrived, then wait for + * a timeout, if no more data arrives - receive what's already there. We want to + * handle such slaves in a special way to allow an optimised mode for other + * users, for whom the amount of data is known in advance. So far there's no way + * to recognise such slaves. We use a data-width check to distinguish between + * the SD host and the PL011 UART. + */ + +static int nbpf_prep_one(struct nbpf_link_desc *ldesc, + enum dma_transfer_direction direction, + dma_addr_t src, dma_addr_t dst, size_t size, bool last) +{ + struct nbpf_link_reg *hwdesc = ldesc->hwdesc; + struct nbpf_desc *desc = ldesc->desc; + struct nbpf_channel *chan = desc->chan; + struct device *dev = chan->dma_chan.device->dev; + size_t mem_xfer, slave_xfer; + bool can_burst; + + hwdesc->header = NBPF_HEADER_WBD | NBPF_HEADER_LV | + (last ? NBPF_HEADER_LE : 0); + + hwdesc->src_addr = src; + hwdesc->dst_addr = dst; + hwdesc->transaction_size = size; + + /* + * set config: SAD, DAD, DDS, SDS, etc. + * Note on transfer sizes: the DMAC can perform unaligned DMA transfers, + * but it is important to have transaction size a multiple of both + * receiver and transmitter transfer sizes. It is also possible to use + * different RAM and device transfer sizes, and it does work well with + * some devices, e.g. with V08R07S01E SD host controllers, which can use + * 128 byte transfers. But this doesn't work with other devices, + * especially when the transaction size is unknown. This is the case, + * e.g. with serial drivers like amba-pl011.c. For reception it sets up + * the transaction size of 4K and if fewer bytes are received, it + * pauses DMA and reads out data received via DMA as well as those left + * in the Rx FIFO. For this to work with the RAM side using burst + * transfers we enable the SBE bit and terminate the transfer in our + * DMA_PAUSE handler. + */ + mem_xfer = nbpf_xfer_ds(chan->nbpf, size); + + switch (direction) { + case DMA_DEV_TO_MEM: + can_burst = chan->slave_src_width >= 3; + slave_xfer = min(mem_xfer, can_burst ? + chan->slave_src_burst : chan->slave_src_width); + /* + * Is the slave narrower than 64 bits, i.e. isn't using the full + * bus width and cannot use bursts? + */ + if (mem_xfer > chan->slave_src_burst && !can_burst) + mem_xfer = chan->slave_src_burst; + /* Device-to-RAM DMA is unreliable without REQD set */ + hwdesc->config = NBPF_CHAN_CFG_SAD | (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)) | + (NBPF_CHAN_CFG_SDS & (slave_xfer << 12)) | NBPF_CHAN_CFG_REQD | + NBPF_CHAN_CFG_SBE; + break; + + case DMA_MEM_TO_DEV: + slave_xfer = min(mem_xfer, chan->slave_dst_width >= 3 ? + chan->slave_dst_burst : chan->slave_dst_width); + hwdesc->config = NBPF_CHAN_CFG_DAD | (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | + (NBPF_CHAN_CFG_DDS & (slave_xfer << 16)) | NBPF_CHAN_CFG_REQD; + break; + + case DMA_MEM_TO_MEM: + hwdesc->config = NBPF_CHAN_CFG_TCM | NBPF_CHAN_CFG_TM | + (NBPF_CHAN_CFG_SDS & (mem_xfer << 12)) | + (NBPF_CHAN_CFG_DDS & (mem_xfer << 16)); + break; + + default: + return -EINVAL; + } + + hwdesc->config |= chan->dmarq_cfg | (last ? 0 : NBPF_CHAN_CFG_DEM) | + NBPF_CHAN_CFG_DMS; + + dev_dbg(dev, "%s(): desc @ %pad: hdr 0x%x, cfg 0x%x, %zu @ %pad -> %pad\n", + __func__, &ldesc->hwdesc_dma_addr, hwdesc->header, + hwdesc->config, size, &src, &dst); + + dma_sync_single_for_device(dev, ldesc->hwdesc_dma_addr, sizeof(*hwdesc), + DMA_TO_DEVICE); + + return 0; +} + +static size_t nbpf_bytes_left(struct nbpf_channel *chan) +{ + return nbpf_chan_read(chan, NBPF_CHAN_CUR_TR_BYTE); +} + +static void nbpf_configure(struct nbpf_device *nbpf) +{ + nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); +} + +static void nbpf_pause(struct nbpf_channel *chan) +{ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); + /* See comment in nbpf_prep_one() */ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); +} + +/* Generic part */ + +/* DMA ENGINE functions */ +static void nbpf_issue_pending(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + unsigned long flags; + + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); + + spin_lock_irqsave(&chan->lock, flags); + if (list_empty(&chan->queued)) + goto unlock; + + list_splice_tail_init(&chan->queued, &chan->active); + + if (!chan->running) { + struct nbpf_desc *desc = list_first_entry(&chan->active, + struct nbpf_desc, node); + if (!nbpf_start(desc)) + chan->running = desc; + } + +unlock: + spin_unlock_irqrestore(&chan->lock, flags); +} + +static enum dma_status nbpf_tx_status(struct dma_chan *dchan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + enum dma_status status = dma_cookie_status(dchan, cookie, state); + + if (state) { + dma_cookie_t running; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + running = chan->running ? chan->running->async_tx.cookie : -EINVAL; + + if (cookie == running) { + state->residue = nbpf_bytes_left(chan); + dev_dbg(dchan->device->dev, "%s(): residue %u\n", __func__, + state->residue); + } else if (status == DMA_IN_PROGRESS) { + struct nbpf_desc *desc; + bool found = false; + + list_for_each_entry(desc, &chan->active, node) + if (desc->async_tx.cookie == cookie) { + found = true; + break; + } + + if (!found) + list_for_each_entry(desc, &chan->queued, node) + if (desc->async_tx.cookie == cookie) { + found = true; + break; + + } + + state->residue = found ? desc->length : 0; + } + + spin_unlock_irqrestore(&chan->lock, flags); + } + + if (chan->paused) + status = DMA_PAUSED; + + return status; +} + +static dma_cookie_t nbpf_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct nbpf_desc *desc = container_of(tx, struct nbpf_desc, async_tx); + struct nbpf_channel *chan = desc->chan; + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&chan->lock, flags); + cookie = dma_cookie_assign(tx); + list_add_tail(&desc->node, &chan->queued); + spin_unlock_irqrestore(&chan->lock, flags); + + dev_dbg(chan->dma_chan.device->dev, "Entry %s(%d)\n", __func__, cookie); + + return cookie; +} + +static int nbpf_desc_page_alloc(struct nbpf_channel *chan) +{ + struct dma_chan *dchan = &chan->dma_chan; + struct nbpf_desc_page *dpage = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + struct nbpf_link_desc *ldesc; + struct nbpf_link_reg *hwdesc; + struct nbpf_desc *desc; + LIST_HEAD(head); + LIST_HEAD(lhead); + int i; + struct device *dev = dchan->device->dev; + + if (!dpage) + return -ENOMEM; + + dev_dbg(dev, "%s(): alloc %lu descriptors, %lu segments, total alloc %zu\n", + __func__, NBPF_DESCS_PER_PAGE, NBPF_SEGMENTS_PER_PAGE, sizeof(*dpage)); + + for (i = 0, ldesc = dpage->ldesc, hwdesc = dpage->hwdesc; + i < ARRAY_SIZE(dpage->ldesc); + i++, ldesc++, hwdesc++) { + ldesc->hwdesc = hwdesc; + list_add_tail(&ldesc->node, &lhead); + ldesc->hwdesc_dma_addr = dma_map_single(dchan->device->dev, + hwdesc, sizeof(*hwdesc), DMA_TO_DEVICE); + + dev_dbg(dev, "%s(): mapped 0x%p to %pad\n", __func__, + hwdesc, &ldesc->hwdesc_dma_addr); + } + + for (i = 0, desc = dpage->desc; + i < ARRAY_SIZE(dpage->desc); + i++, desc++) { + dma_async_tx_descriptor_init(&desc->async_tx, dchan); + desc->async_tx.tx_submit = nbpf_tx_submit; + desc->chan = chan; + INIT_LIST_HEAD(&desc->sg); + list_add_tail(&desc->node, &head); + } + + /* + * This function cannot be called from interrupt context, so, no need to + * save flags + */ + spin_lock_irq(&chan->lock); + list_splice_tail(&lhead, &chan->free_links); + list_splice_tail(&head, &chan->free); + list_add(&dpage->node, &chan->desc_page); + spin_unlock_irq(&chan->lock); + + return ARRAY_SIZE(dpage->desc); +} + +static void nbpf_desc_put(struct nbpf_desc *desc) +{ + struct nbpf_channel *chan = desc->chan; + struct nbpf_link_desc *ldesc, *tmp; + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); + list_for_each_entry_safe(ldesc, tmp, &desc->sg, node) + list_move(&ldesc->node, &chan->free_links); + + list_add(&desc->node, &chan->free); + spin_unlock_irqrestore(&chan->lock, flags); +} + +static void nbpf_scan_acked(struct nbpf_channel *chan) +{ + struct nbpf_desc *desc, *tmp; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->lock, flags); + list_for_each_entry_safe(desc, tmp, &chan->done, node) + if (async_tx_test_ack(&desc->async_tx) && desc->user_wait) { + list_move(&desc->node, &head); + desc->user_wait = false; + } + spin_unlock_irqrestore(&chan->lock, flags); + + list_for_each_entry_safe(desc, tmp, &head, node) { + list_del(&desc->node); + nbpf_desc_put(desc); + } +} + +/* + * We have to allocate descriptors with the channel lock dropped. This means, + * before we re-acquire the lock buffers can be taken already, so we have to + * re-check after re-acquiring the lock and possibly retry, if buffers are gone + * again. + */ +static struct nbpf_desc *nbpf_desc_get(struct nbpf_channel *chan, size_t len) +{ + struct nbpf_desc *desc = NULL; + struct nbpf_link_desc *ldesc, *prev = NULL; + + nbpf_scan_acked(chan); + + spin_lock_irq(&chan->lock); + + do { + int i = 0, ret; + + if (list_empty(&chan->free)) { + /* No more free descriptors */ + spin_unlock_irq(&chan->lock); + ret = nbpf_desc_page_alloc(chan); + if (ret < 0) + return NULL; + spin_lock_irq(&chan->lock); + continue; + } + desc = list_first_entry(&chan->free, struct nbpf_desc, node); + list_del(&desc->node); + + do { + if (list_empty(&chan->free_links)) { + /* No more free link descriptors */ + spin_unlock_irq(&chan->lock); + ret = nbpf_desc_page_alloc(chan); + if (ret < 0) { + nbpf_desc_put(desc); + return NULL; + } + spin_lock_irq(&chan->lock); + continue; + } + + ldesc = list_first_entry(&chan->free_links, + struct nbpf_link_desc, node); + ldesc->desc = desc; + if (prev) + prev->hwdesc->next = (u32)ldesc->hwdesc_dma_addr; + + prev = ldesc; + list_move_tail(&ldesc->node, &desc->sg); + + i++; + } while (i < len); + } while (!desc); + + prev->hwdesc->next = 0; + + spin_unlock_irq(&chan->lock); + + return desc; +} + +static void nbpf_chan_idle(struct nbpf_channel *chan) +{ + struct nbpf_desc *desc, *tmp; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->lock, flags); + + list_splice_init(&chan->done, &head); + list_splice_init(&chan->active, &head); + list_splice_init(&chan->queued, &head); + + chan->running = NULL; + + spin_unlock_irqrestore(&chan->lock, flags); + + list_for_each_entry_safe(desc, tmp, &head, node) { + dev_dbg(chan->nbpf->dma_dev.dev, "%s(): force-free desc %p cookie %d\n", + __func__, desc, desc->async_tx.cookie); + list_del(&desc->node); + nbpf_desc_put(desc); + } +} + +static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct dma_slave_config *config; + + dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); + + switch (cmd) { + case DMA_TERMINATE_ALL: + dev_dbg(dchan->device->dev, "Terminating\n"); + nbpf_chan_halt(chan); + nbpf_chan_idle(chan); + break; + + case DMA_SLAVE_CONFIG: + if (!arg) + return -EINVAL; + config = (struct dma_slave_config *)arg; + + /* + * We could check config->slave_id to match chan->terminal here, + * but with DT they would be coming from the same source, so + * such a check would be superflous + */ + + chan->slave_dst_addr = config->dst_addr; + chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, + config->dst_addr_width, 1); + chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, + config->dst_addr_width, + config->dst_maxburst); + chan->slave_src_addr = config->src_addr; + chan->slave_src_width = nbpf_xfer_size(chan->nbpf, + config->src_addr_width, 1); + chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, + config->src_addr_width, + config->src_maxburst); + break; + + case DMA_PAUSE: + chan->paused = true; + nbpf_pause(chan); + break; + + default: + return -ENXIO; + } + + return 0; +} + +static struct dma_async_tx_descriptor *nbpf_prep_sg(struct nbpf_channel *chan, + struct scatterlist *src_sg, struct scatterlist *dst_sg, + size_t len, enum dma_transfer_direction direction, + unsigned long flags) +{ + struct nbpf_link_desc *ldesc; + struct scatterlist *mem_sg; + struct nbpf_desc *desc; + bool inc_src, inc_dst; + size_t data_len = 0; + int i = 0; + + switch (direction) { + case DMA_DEV_TO_MEM: + mem_sg = dst_sg; + inc_src = false; + inc_dst = true; + break; + + case DMA_MEM_TO_DEV: + mem_sg = src_sg; + inc_src = true; + inc_dst = false; + break; + + default: + case DMA_MEM_TO_MEM: + mem_sg = src_sg; + inc_src = true; + inc_dst = true; + } + + desc = nbpf_desc_get(chan, len); + if (!desc) + return NULL; + + desc->async_tx.flags = flags; + desc->async_tx.cookie = -EBUSY; + desc->user_wait = false; + + /* + * This is a private descriptor list, and we own the descriptor. No need + * to lock. + */ + list_for_each_entry(ldesc, &desc->sg, node) { + int ret = nbpf_prep_one(ldesc, direction, + sg_dma_address(src_sg), + sg_dma_address(dst_sg), + sg_dma_len(mem_sg), + i == len - 1); + if (ret < 0) { + nbpf_desc_put(desc); + return NULL; + } + data_len += sg_dma_len(mem_sg); + if (inc_src) + src_sg = sg_next(src_sg); + if (inc_dst) + dst_sg = sg_next(dst_sg); + mem_sg = direction == DMA_DEV_TO_MEM ? dst_sg : src_sg; + i++; + } + + desc->length = data_len; + + /* The user has to return the descriptor to us ASAP via .tx_submit() */ + return &desc->async_tx; +} + +static struct dma_async_tx_descriptor *nbpf_prep_memcpy( + struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct scatterlist dst_sg; + struct scatterlist src_sg; + + sg_init_table(&dst_sg, 1); + sg_init_table(&src_sg, 1); + + sg_dma_address(&dst_sg) = dst; + sg_dma_address(&src_sg) = src; + + sg_dma_len(&dst_sg) = len; + sg_dma_len(&src_sg) = len; + + dev_dbg(dchan->device->dev, "%s(): %zu @ %pad -> %pad\n", + __func__, len, &src, &dst); + + return nbpf_prep_sg(chan, &src_sg, &dst_sg, 1, + DMA_MEM_TO_MEM, flags); +} + +static struct dma_async_tx_descriptor *nbpf_prep_memcpy_sg( + struct dma_chan *dchan, + struct scatterlist *dst_sg, unsigned int dst_nents, + struct scatterlist *src_sg, unsigned int src_nents, + unsigned long flags) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + + if (dst_nents != src_nents) + return NULL; + + return nbpf_prep_sg(chan, src_sg, dst_sg, src_nents, + DMA_MEM_TO_MEM, flags); +} + +static struct dma_async_tx_descriptor *nbpf_prep_slave_sg( + struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction direction, unsigned long flags, void *context) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct scatterlist slave_sg; + + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); + + sg_init_table(&slave_sg, 1); + + switch (direction) { + case DMA_MEM_TO_DEV: + sg_dma_address(&slave_sg) = chan->slave_dst_addr; + return nbpf_prep_sg(chan, sgl, &slave_sg, sg_len, + direction, flags); + + case DMA_DEV_TO_MEM: + sg_dma_address(&slave_sg) = chan->slave_src_addr; + return nbpf_prep_sg(chan, &slave_sg, sgl, sg_len, + direction, flags); + + default: + return NULL; + } +} + +static int nbpf_alloc_chan_resources(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + int ret; + + INIT_LIST_HEAD(&chan->free); + INIT_LIST_HEAD(&chan->free_links); + INIT_LIST_HEAD(&chan->queued); + INIT_LIST_HEAD(&chan->active); + INIT_LIST_HEAD(&chan->done); + + ret = nbpf_desc_page_alloc(chan); + if (ret < 0) + return ret; + + dev_dbg(dchan->device->dev, "Entry %s(): terminal %u\n", __func__, + chan->terminal); + + nbpf_chan_configure(chan); + + return ret; +} + +static void nbpf_free_chan_resources(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + struct nbpf_desc_page *dpage, *tmp; + + dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); + + nbpf_chan_halt(chan); + /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ + nbpf_chan_prepare_default(chan); + + list_for_each_entry_safe(dpage, tmp, &chan->desc_page, node) { + struct nbpf_link_desc *ldesc; + int i; + list_del(&dpage->node); + for (i = 0, ldesc = dpage->ldesc; + i < ARRAY_SIZE(dpage->ldesc); + i++, ldesc++) + dma_unmap_single(dchan->device->dev, ldesc->hwdesc_dma_addr, + sizeof(*ldesc->hwdesc), DMA_TO_DEVICE); + free_page((unsigned long)dpage); + } +} + +static int nbpf_slave_caps(struct dma_chan *dchan, + struct dma_slave_caps *caps) +{ + caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; + caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS; + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + caps->cmd_pause = false; + caps->cmd_terminate = true; + + return 0; +} + +static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct nbpf_device *nbpf = ofdma->of_dma_data; + struct dma_chan *dchan; + struct nbpf_channel *chan; + + if (dma_spec->args_count != 2) + return NULL; + + dchan = dma_get_any_slave_channel(&nbpf->dma_dev); + if (!dchan) + return NULL; + + dev_dbg(dchan->device->dev, "Entry %s(%s)\n", __func__, + dma_spec->np->name); + + chan = nbpf_to_chan(dchan); + + chan->terminal = dma_spec->args[0]; + chan->flags = dma_spec->args[1]; + + nbpf_chan_prepare(chan); + nbpf_chan_configure(chan); + + return dchan; +} + +static irqreturn_t nbpf_chan_irqt(int irq, void *dev) +{ + struct nbpf_channel *chan = dev; + struct nbpf_desc *desc, *tmp; + dma_async_tx_callback callback; + void *param; + + while (!list_empty(&chan->done)) { + bool found = false, must_put, recycling = false; + + spin_lock_irq(&chan->lock); + + list_for_each_entry_safe(desc, tmp, &chan->done, node) { + if (!desc->user_wait) { + /* Newly completed descriptor, have to process */ + found = true; + break; + } else if (async_tx_test_ack(&desc->async_tx)) { + /* + * This descriptor was waiting for a user ACK, + * it can be recycled now. + */ + list_del(&desc->node); + spin_unlock_irq(&chan->lock); + nbpf_desc_put(desc); + recycling = true; + break; + } + } + + if (recycling) + continue; + + if (!found) { + /* This can happen if TERMINATE_ALL has been called */ + spin_unlock_irq(&chan->lock); + break; + } + + dma_cookie_complete(&desc->async_tx); + + /* + * With released lock we cannot dereference desc, maybe it's + * still on the "done" list + */ + if (async_tx_test_ack(&desc->async_tx)) { + list_del(&desc->node); + must_put = true; + } else { + desc->user_wait = true; + must_put = false; + } + + callback = desc->async_tx.callback; + param = desc->async_tx.callback_param; + + /* ack and callback completed descriptor */ + spin_unlock_irq(&chan->lock); + + if (callback) + callback(param); + + if (must_put) + nbpf_desc_put(desc); + } + + return IRQ_HANDLED; +} + +static irqreturn_t nbpf_chan_irq(int irq, void *dev) +{ + struct nbpf_channel *chan = dev; + bool done = nbpf_status_get(chan); + struct nbpf_desc *desc; + irqreturn_t ret; + + if (!done) + return IRQ_NONE; + + nbpf_status_ack(chan); + + dev_dbg(&chan->dma_chan.dev->device, "%s()\n", __func__); + + spin_lock(&chan->lock); + desc = chan->running; + if (WARN_ON(!desc)) { + ret = IRQ_NONE; + goto unlock; + } else { + ret = IRQ_WAKE_THREAD; + } + + list_move_tail(&desc->node, &chan->done); + chan->running = NULL; + + if (!list_empty(&chan->active)) { + desc = list_first_entry(&chan->active, + struct nbpf_desc, node); + if (!nbpf_start(desc)) + chan->running = desc; + } + +unlock: + spin_unlock(&chan->lock); + + return ret; +} + +static irqreturn_t nbpf_err_irq(int irq, void *dev) +{ + struct nbpf_device *nbpf = dev; + u32 error = nbpf_error_get(nbpf); + + dev_warn(nbpf->dma_dev.dev, "DMA error IRQ %u\n", irq); + + if (!error) + return IRQ_NONE; + + do { + struct nbpf_channel *chan = nbpf_error_get_channel(nbpf, error); + /* On error: abort all queued transfers, no callback */ + nbpf_error_clear(chan); + nbpf_chan_idle(chan); + error = nbpf_error_get(nbpf); + } while (error); + + return IRQ_HANDLED; +} + +static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) +{ + struct dma_device *dma_dev = &nbpf->dma_dev; + struct nbpf_channel *chan = nbpf->chan + n; + int ret; + + chan->nbpf = nbpf; + chan->base = nbpf->base + NBPF_REG_CHAN_OFFSET + NBPF_REG_CHAN_SIZE * n; + INIT_LIST_HEAD(&chan->desc_page); + spin_lock_init(&chan->lock); + chan->dma_chan.device = dma_dev; + dma_cookie_init(&chan->dma_chan); + nbpf_chan_prepare_default(chan); + + dev_dbg(dma_dev->dev, "%s(): channel %d: -> %p\n", __func__, n, chan->base); + + snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); + + ret = devm_request_threaded_irq(dma_dev->dev, chan->irq, + nbpf_chan_irq, nbpf_chan_irqt, IRQF_SHARED, + chan->name, chan); + if (ret < 0) + return ret; + + /* Add the channel to DMA device channel list */ + list_add_tail(&chan->dma_chan.device_node, + &dma_dev->channels); + + return 0; +} + +static const struct of_device_id nbpf_match[] = { + {.compatible = "renesas,nbpfaxi64dmac1b4", .data = &nbpf_cfg[NBPF1B4]}, + {.compatible = "renesas,nbpfaxi64dmac1b8", .data = &nbpf_cfg[NBPF1B8]}, + {.compatible = "renesas,nbpfaxi64dmac1b16", .data = &nbpf_cfg[NBPF1B16]}, + {.compatible = "renesas,nbpfaxi64dmac4b4", .data = &nbpf_cfg[NBPF4B4]}, + {.compatible = "renesas,nbpfaxi64dmac4b8", .data = &nbpf_cfg[NBPF4B8]}, + {.compatible = "renesas,nbpfaxi64dmac4b16", .data = &nbpf_cfg[NBPF4B16]}, + {.compatible = "renesas,nbpfaxi64dmac8b4", .data = &nbpf_cfg[NBPF8B4]}, + {.compatible = "renesas,nbpfaxi64dmac8b8", .data = &nbpf_cfg[NBPF8B8]}, + {.compatible = "renesas,nbpfaxi64dmac8b16", .data = &nbpf_cfg[NBPF8B16]}, + {} +}; +MODULE_DEVICE_TABLE(of, nbpf_match); + +static int nbpf_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + const struct of_device_id *of_id = of_match_device(nbpf_match, dev); + struct device_node *np = dev->of_node; + struct nbpf_device *nbpf; + struct dma_device *dma_dev; + struct resource *iomem, *irq_res; + const struct nbpf_config *cfg; + int num_channels; + int ret, irq, eirq, i; + int irqbuf[9] /* maximum 8 channels + error IRQ */; + unsigned int irqs = 0; + + BUILD_BUG_ON(sizeof(struct nbpf_desc_page) > PAGE_SIZE); + + /* DT only */ + if (!np || !of_id || !of_id->data) + return -ENODEV; + + cfg = of_id->data; + num_channels = cfg->num_channels; + + nbpf = devm_kzalloc(dev, sizeof(*nbpf) + num_channels * + sizeof(nbpf->chan[0]), GFP_KERNEL); + if (!nbpf) { + dev_err(dev, "Memory allocation failed\n"); + return -ENOMEM; + } + dma_dev = &nbpf->dma_dev; + dma_dev->dev = dev; + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + nbpf->base = devm_ioremap_resource(dev, iomem); + if (IS_ERR(nbpf->base)) + return PTR_ERR(nbpf->base); + + nbpf->clk = devm_clk_get(dev, NULL); + if (IS_ERR(nbpf->clk)) + return PTR_ERR(nbpf->clk); + + nbpf->config = cfg; + + for (i = 0; irqs < ARRAY_SIZE(irqbuf); i++) { + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, i); + if (!irq_res) + break; + + for (irq = irq_res->start; irq <= irq_res->end; + irq++, irqs++) + irqbuf[irqs] = irq; + } + + /* + * 3 IRQ resource schemes are supported: + * 1. 1 shared IRQ for error and all channels + * 2. 2 IRQs: one for error and one shared for all channels + * 3. 1 IRQ for error and an own IRQ for each channel + */ + if (irqs != 1 && irqs != 2 && irqs != num_channels + 1) + return -ENXIO; + + if (irqs == 1) { + eirq = irqbuf[0]; + + for (i = 0; i <= num_channels; i++) + nbpf->chan[i].irq = irqbuf[0]; + } else { + eirq = platform_get_irq_byname(pdev, "error"); + if (eirq < 0) + return eirq; + + if (irqs == num_channels + 1) { + struct nbpf_channel *chan; + + for (i = 0, chan = nbpf->chan; i <= num_channels; + i++, chan++) { + /* Skip the error IRQ */ + if (irqbuf[i] == eirq) + i++; + chan->irq = irqbuf[i]; + } + + if (chan != nbpf->chan + num_channels) + return -EINVAL; + } else { + /* 2 IRQs and more than one channel */ + if (irqbuf[0] == eirq) + irq = irqbuf[1]; + else + irq = irqbuf[0]; + + for (i = 0; i <= num_channels; i++) + nbpf->chan[i].irq = irq; + } + } + + ret = devm_request_irq(dev, eirq, nbpf_err_irq, + IRQF_SHARED, "dma error", nbpf); + if (ret < 0) + return ret; + + INIT_LIST_HEAD(&dma_dev->channels); + + /* Create DMA Channel */ + for (i = 0; i < num_channels; i++) { + ret = nbpf_chan_probe(nbpf, i); + if (ret < 0) + return ret; + } + + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask); + dma_cap_set(DMA_SG, dma_dev->cap_mask); + + /* Common and MEMCPY operations */ + dma_dev->device_alloc_chan_resources + = nbpf_alloc_chan_resources; + dma_dev->device_free_chan_resources = nbpf_free_chan_resources; + dma_dev->device_prep_dma_sg = nbpf_prep_memcpy_sg; + dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; + dma_dev->device_tx_status = nbpf_tx_status; + dma_dev->device_issue_pending = nbpf_issue_pending; + dma_dev->device_slave_caps = nbpf_slave_caps; + + /* + * If we drop support for unaligned MEMCPY buffer addresses and / or + * lengths by setting + * dma_dev->copy_align = 4; + * then we can set transfer length to 4 bytes in nbpf_prep_one() for + * DMA_MEM_TO_MEM + */ + + /* Compulsory for DMA_SLAVE fields */ + dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; + dma_dev->device_control = nbpf_control; + + platform_set_drvdata(pdev, nbpf); + + ret = clk_prepare_enable(nbpf->clk); + if (ret < 0) + return ret; + + nbpf_configure(nbpf); + + ret = dma_async_device_register(dma_dev); + if (ret < 0) + goto e_clk_off; + + ret = of_dma_controller_register(np, nbpf_of_xlate, nbpf); + if (ret < 0) + goto e_dma_dev_unreg; + + return 0; + +e_dma_dev_unreg: + dma_async_device_unregister(dma_dev); +e_clk_off: + clk_disable_unprepare(nbpf->clk); + + return ret; +} + +static int nbpf_remove(struct platform_device *pdev) +{ + struct nbpf_device *nbpf = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&nbpf->dma_dev); + clk_disable_unprepare(nbpf->clk); + + return 0; +} + +static struct platform_device_id nbpf_ids[] = { + {"nbpfaxi64dmac1b4", (kernel_ulong_t)&nbpf_cfg[NBPF1B4]}, + {"nbpfaxi64dmac1b8", (kernel_ulong_t)&nbpf_cfg[NBPF1B8]}, + {"nbpfaxi64dmac1b16", (kernel_ulong_t)&nbpf_cfg[NBPF1B16]}, + {"nbpfaxi64dmac4b4", (kernel_ulong_t)&nbpf_cfg[NBPF4B4]}, + {"nbpfaxi64dmac4b8", (kernel_ulong_t)&nbpf_cfg[NBPF4B8]}, + {"nbpfaxi64dmac4b16", (kernel_ulong_t)&nbpf_cfg[NBPF4B16]}, + {"nbpfaxi64dmac8b4", (kernel_ulong_t)&nbpf_cfg[NBPF8B4]}, + {"nbpfaxi64dmac8b8", (kernel_ulong_t)&nbpf_cfg[NBPF8B8]}, + {"nbpfaxi64dmac8b16", (kernel_ulong_t)&nbpf_cfg[NBPF8B16]}, + {}, +}; +MODULE_DEVICE_TABLE(platform, nbpf_ids); + +#ifdef CONFIG_PM_RUNTIME +static int nbpf_runtime_suspend(struct device *dev) +{ + struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); + clk_disable_unprepare(nbpf->clk); + return 0; +} + +static int nbpf_runtime_resume(struct device *dev) +{ + struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); + return clk_prepare_enable(nbpf->clk); +} +#endif + +static const struct dev_pm_ops nbpf_pm_ops = { + SET_RUNTIME_PM_OPS(nbpf_runtime_suspend, nbpf_runtime_resume, NULL) +}; + +static struct platform_driver nbpf_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "dma-nbpf", + .of_match_table = nbpf_match, + .pm = &nbpf_pm_ops, + }, + .id_table = nbpf_ids, + .probe = nbpf_probe, + .remove = nbpf_remove, +}; + +module_platform_driver(nbpf_driver); + +MODULE_AUTHOR("Guennadi Liakhovetski "); +MODULE_DESCRIPTION("dmaengine driver for NBPFAXI64* DMACs"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-58-ga151 From 67b166847009b009cd9fbcdef6b71558f49e1bd6 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Sun, 3 Aug 2014 19:13:03 +0200 Subject: dmaengine: nbpfaxi: fix a theoretical race A race possibility exists if a DMA slave driver tries to free channel resources witout waiting for all transfers to complete and without explicitly terminating all requests. In such a case the IRQ processing thread can race with .device_free_chan_resources(). To fix this race empty all descriptor lists before freeing descriptor cache. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 77c5a890a30a..5b40ac8d82d1 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1054,6 +1054,7 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan) dev_dbg(dchan->device->dev, "Entry %s()\n", __func__); nbpf_chan_halt(chan); + nbpf_chan_idle(chan); /* Clean up for if a channel is re-used for MEMCPY after slave DMA */ nbpf_chan_prepare_default(chan); -- cgit v1.2.3-58-ga151 From f02323ec68328c3db3e3c23490cd80f9ce9c00fa Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Sun, 3 Aug 2014 19:13:07 +0200 Subject: dmaengine: nbpfaxi: convert to tasklet It is common among dmaengine drivers to use a tasklet for bottom half interrupt processing. Convert nbpfaxi to do the same. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 5b40ac8d82d1..cea6a3768e2e 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -197,6 +197,7 @@ struct nbpf_desc_page { */ struct nbpf_channel { struct dma_chan dma_chan; + struct tasklet_struct tasklet; void __iomem *base; struct nbpf_device *nbpf; char name[16]; @@ -1111,9 +1112,9 @@ static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, return dchan; } -static irqreturn_t nbpf_chan_irqt(int irq, void *dev) +static void nbpf_chan_tasklet(unsigned long data) { - struct nbpf_channel *chan = dev; + struct nbpf_channel *chan = (struct nbpf_channel *)data; struct nbpf_desc *desc, *tmp; dma_async_tx_callback callback; void *param; @@ -1176,8 +1177,6 @@ static irqreturn_t nbpf_chan_irqt(int irq, void *dev) if (must_put) nbpf_desc_put(desc); } - - return IRQ_HANDLED; } static irqreturn_t nbpf_chan_irq(int irq, void *dev) @@ -1186,6 +1185,7 @@ static irqreturn_t nbpf_chan_irq(int irq, void *dev) bool done = nbpf_status_get(chan); struct nbpf_desc *desc; irqreturn_t ret; + bool bh = false; if (!done) return IRQ_NONE; @@ -1200,7 +1200,8 @@ static irqreturn_t nbpf_chan_irq(int irq, void *dev) ret = IRQ_NONE; goto unlock; } else { - ret = IRQ_WAKE_THREAD; + ret = IRQ_HANDLED; + bh = true; } list_move_tail(&desc->node, &chan->done); @@ -1216,6 +1217,9 @@ static irqreturn_t nbpf_chan_irq(int irq, void *dev) unlock: spin_unlock(&chan->lock); + if (bh) + tasklet_schedule(&chan->tasklet); + return ret; } @@ -1258,8 +1262,9 @@ static int nbpf_chan_probe(struct nbpf_device *nbpf, int n) snprintf(chan->name, sizeof(chan->name), "nbpf %d", n); - ret = devm_request_threaded_irq(dma_dev->dev, chan->irq, - nbpf_chan_irq, nbpf_chan_irqt, IRQF_SHARED, + tasklet_init(&chan->tasklet, nbpf_chan_tasklet, (unsigned long)chan); + ret = devm_request_irq(dma_dev->dev, chan->irq, + nbpf_chan_irq, IRQF_SHARED, chan->name, chan); if (ret < 0) return ret; -- cgit v1.2.3-58-ga151 From 31c1e5a1350ae8d1bc2018f5de8264266d9773e1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Fri, 1 Aug 2014 12:20:10 +0200 Subject: dmaengine: Remove the context argument to the prep_dma_cyclic operation The argument is always set to NULL and never used. Remove it. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 2 +- drivers/dma/at_hdmac.c | 3 +-- drivers/dma/bcm2835-dma.c | 2 +- drivers/dma/dma-jz4740.c | 2 +- drivers/dma/edma.c | 2 +- drivers/dma/ep93xx_dma.c | 4 +--- drivers/dma/fsl-edma.c | 2 +- drivers/dma/imx-dma.c | 2 +- drivers/dma/imx-sdma.c | 2 +- drivers/dma/mmp_pdma.c | 2 +- drivers/dma/mmp_tdma.c | 2 +- drivers/dma/mxs-dma.c | 2 +- drivers/dma/omap-dma.c | 3 +-- drivers/dma/pl330.c | 2 +- drivers/dma/s3c24xx-dma.c | 3 +-- drivers/dma/sa11x0-dma.c | 2 +- drivers/dma/sh/shdma-base.c | 2 +- drivers/dma/sirf-dma.c | 2 +- drivers/dma/ste_dma40.c | 3 +-- drivers/dma/tegra20-apb-dma.c | 2 +- include/linux/dmaengine.h | 4 ++-- 21 files changed, 22 insertions(+), 28 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 8114731a1c62..48ec81fe1eac 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1653,7 +1653,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index c13a3bb0f594..d20ab1b73a3a 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -893,12 +893,11 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, * @period_len: number of bytes for each period * @direction: transfer direction, to or from device * @flags: tx descriptor status flags - * @context: transfer context (ignored) */ static struct dma_async_tx_descriptor * atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_slave *atslave = chan->private; diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index a03602164e3e..68007974961a 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -335,7 +335,7 @@ static void bcm2835_dma_issue_pending(struct dma_chan *chan) static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); enum dma_slave_buswidth dev_width; diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index bfbce6b07902..6a9d89c93b1f 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -433,7 +433,7 @@ static struct dma_async_tx_descriptor *jz4740_dma_prep_slave_sg( static struct dma_async_tx_descriptor *jz4740_dma_prep_dma_cyclic( struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_desc *desc; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index a13f37f719ed..d566650abf62 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -598,7 +598,7 @@ struct dma_async_tx_descriptor *edma_prep_dma_memcpy( static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long tx_flags, void *context) + unsigned long tx_flags) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index cb4bf682a708..7650470196c4 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1092,7 +1092,6 @@ fail: * @period_len: length of a single period * @dir: direction of the operation * @flags: tx descriptor status flags - * @context: operation context (ignored) * * Prepares a descriptor for cyclic DMA operation. This means that once the * descriptor is submitted, we will be submitting in a @period_len sized @@ -1105,8 +1104,7 @@ fail: static struct dma_async_tx_descriptor * ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction dir, unsigned long flags, - void *context) + enum dma_transfer_direction dir, unsigned long flags) { struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_desc *desc, *first; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 24ab3d371954..3c5711d5fe97 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -517,7 +517,7 @@ err: static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_desc *fsl_desc; diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 286660a12cc6..9d2c9e7374dc 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -866,7 +866,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); struct imxdma_engine *imxdma = imxdmac->imxdma; diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index de584e605db5..f7626e37d0b8 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1125,7 +1125,7 @@ err_out: static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index a7b186d536b3..a1a4db5721b8 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -601,7 +601,7 @@ static struct dma_async_tx_descriptor * mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct mmp_pdma_chan *chan; struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 724f7f4c9720..6ad30e2c5038 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -389,7 +389,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); struct mmp_tdma_desc *desc; diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index dc1dba78e529..5ea61201dbf0 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -589,7 +589,7 @@ err_out: static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index b19f04f4390b..4cf7d9a950d7 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -853,8 +853,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, - size_t period_len, enum dma_transfer_direction dir, unsigned long flags, - void *context) + size_t period_len, enum dma_transfer_direction dir, unsigned long flags) { struct omap_dmadev *od = to_omap_dma_dev(chan->device); struct omap_chan *c = to_omap_dma_chan(chan); diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index a55d75498098..d5149aacd2fe 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2362,7 +2362,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t dma_addr, size_t len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct dma_pl330_desc *desc = NULL, *first = NULL; struct dma_pl330_chan *pch = to_pchan(chan); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 012520c9fd79..7416572d1e40 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -889,8 +889,7 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy( static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, - enum dma_transfer_direction direction, unsigned long flags, - void *context) + enum dma_transfer_direction direction, unsigned long flags) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 5ebdfbc1051e..4b0ef043729a 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -612,7 +612,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, - enum dma_transfer_direction dir, unsigned long flags, void *context) + enum dma_transfer_direction dir, unsigned long flags) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_desc *txd; diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index e427a03a0e8b..42d497416196 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -668,7 +668,7 @@ static struct dma_async_tx_descriptor *shdma_prep_slave_sg( static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 03f7820fa333..aac03ab10c54 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -580,7 +580,7 @@ err_dir: static struct dma_async_tx_descriptor * sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction, unsigned long flags, void *context) + enum dma_transfer_direction direction, unsigned long flags) { struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); struct sirfsoc_dma_desc *sdesc = NULL; diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index c7984459ede7..5fe59335e247 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2531,8 +2531,7 @@ d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, static struct dma_async_tx_descriptor * dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, size_t period_len, - enum dma_transfer_direction direction, unsigned long flags, - void *context) + enum dma_transfer_direction direction, unsigned long flags) { unsigned int periods = buf_len / period_len; struct dma_async_tx_descriptor *txd; diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 03ad64ecaaf0..16efa603ff65 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1055,7 +1055,7 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context) + unsigned long flags) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_desc *dma_desc = NULL; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 4eb2f82aed1d..94ddccd706fc 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -669,7 +669,7 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, size_t period_len, enum dma_transfer_direction direction, - unsigned long flags, void *context); + unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); @@ -744,7 +744,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( unsigned long flags) { return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len, - period_len, dir, flags, NULL); + period_len, dir, flags); } static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma( -- cgit v1.2.3-58-ga151 From 6fc8ae787c589245ee3395630d2c428a1afab26c Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 1 Aug 2014 18:09:48 +0100 Subject: dma: pl08x: Use correct specifier for size_t values When printing size_t values we should use the %zd or %zx format specifier in order to ensure the value is displayed correctly and avoid warnings from sparse. Signed-off-by: Mark Brown Signed-off-by: Vinod Koul --- drivers/dma/amba-pl08x.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 48ec81fe1eac..e34024b000a4 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1040,7 +1040,7 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, if (early_bytes) { dev_vdbg(&pl08x->adev->dev, - "%s byte width LLIs (remain 0x%08x)\n", + "%s byte width LLIs (remain 0x%08zx)\n", __func__, bd.remainder); prep_byte_width_lli(pl08x, &bd, &cctl, early_bytes, num_llis++, &total_bytes); @@ -1662,7 +1662,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( dma_addr_t slave_addr; dev_dbg(&pl08x->adev->dev, - "%s prepare cyclic transaction of %d/%d bytes %s %s\n", + "%s prepare cyclic transaction of %zd/%zd bytes %s %s\n", __func__, period_len, buf_len, direction == DMA_MEM_TO_DEV ? "to" : "from", plchan->name); -- cgit v1.2.3-58-ga151 From 1141b7e95aa1d5c328c719bc405714d90c167a50 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Tue, 5 Aug 2014 22:00:18 +0530 Subject: dmaengine: nbpf_error_get_channel() can be static CC: Guennadi Liakhovetski CC: Vinod Koul Signed-off-by: Fengguang Wu Signed-off-by: Vinod Koul --- drivers/dma/nbpfaxi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index cea6a3768e2e..5aeada56a442 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -351,7 +351,7 @@ static u32 nbpf_error_get(struct nbpf_device *nbpf) return nbpf_read(nbpf, NBPF_DSTAT_ER); } -struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) +static struct nbpf_channel *nbpf_error_get_channel(struct nbpf_device *nbpf, u32 error) { return nbpf->chan + __ffs(error); } -- cgit v1.2.3-58-ga151 From cfc6abc3f1ee01dec27a6166f518f084544e4387 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Wed, 6 Aug 2014 16:35:41 +0200 Subject: dmaengine: nbpfaxi: don't build this driver where it cannot be used Although this driver doesn't have any explicit compile-time architecture dependencies, it is better not to bloat kernels on those platforms, where this driver isn't needed, unless a compile test is being performed. Signed-off-by: Guennadi Liakhovetski Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 901818c1b37c..6c3692da2072 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -386,6 +386,7 @@ config DMA_SUN6I config NBPFAXI_DMA tristate "Renesas Type-AXI NBPF DMA support" select DMA_ENGINE + depends on ARM || COMPILE_TEST help Support for "Type-AXI" NBPF DMA IPs from Renesas -- cgit v1.2.3-58-ga151 From 6758ddafad55fc8e665bb03ff103d3e410483fe9 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Fri, 1 Aug 2014 18:51:46 +0200 Subject: dma: at_hdmac: fix invalid remaining bytes detection Found using smatch: drivers/dma/at_hdmac.c:299 atc_get_bytes_left() warn: unsigned 'atchan->remain_desc' is never less than zero. Signed-off-by: Alexandre Belloni Signed-off-by: Vinod Koul --- drivers/dma/at_hdmac.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers') diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index d20ab1b73a3a..ca9dd2613283 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -294,14 +294,16 @@ static int atc_get_bytes_left(struct dma_chan *chan) ret = -EINVAL; goto out; } - atchan->remain_desc -= (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) - << (desc_first->tx_width); - if (atchan->remain_desc < 0) { + + count = (desc_cur->lli.ctrla & ATC_BTSIZE_MAX) + << desc_first->tx_width; + if (atchan->remain_desc < count) { ret = -EINVAL; goto out; - } else { - ret = atchan->remain_desc; } + + atchan->remain_desc -= count; + ret = atchan->remain_desc; } else { /* * Get residual bytes when current -- cgit v1.2.3-58-ga151 From a0bbe990c161ddf56444efe80d9d6e15fdc47aca Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 7 Aug 2014 19:00:44 +0200 Subject: dmaengine: sun6i: depends on RESET_CONTROLLER Fixes a compilation error when RESET_CONTROLLER is not enabled in the configuration. drivers/dma/sun6i-dma.c: In function 'sun6i_dma_probe': drivers/dma/sun6i-dma.c:911:2: error: implicit declaration of function 'devm_reset_control_get' [-Werror=implicit-function-declaration] Signed-off-by: Maxime Ripard Reported-by: Randy Dunlap Signed-off-by: Vinod Koul --- drivers/dma/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6c3692da2072..f2fefa327c35 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -378,6 +378,7 @@ config XILINX_VDMA config DMA_SUN6I tristate "Allwinner A31 SoCs DMA support" depends on MACH_SUN6I || COMPILE_TEST + depends on RESET_CONTROLLER select DMA_ENGINE select DMA_VIRTUAL_CHANNELS help -- cgit v1.2.3-58-ga151