diff options
author | Viacheslav Mitrofanov <v.v.mitrofanov@yadro.com> | 2022-07-01 11:29:42 +0300 |
---|---|---|
committer | Vinod Koul <vkoul@kernel.org> | 2022-07-06 18:10:34 +0530 |
commit | b2cc5c465c2cb8ab697c3fd6583c614e3f6cfbcc (patch) | |
tree | d1a9c40c70563896477ab3c3631cd9f7876d2e53 /drivers/dma | |
parent | 4ce653d7c651d6a9fc6d5db4275624ac8d856b99 (diff) |
dmaengine: sf-pdma: Add multithread support for a DMA channel
When we get a DMA channel and try to use it in multiple threads it
will cause oops and hanging the system.
% echo 64 > /sys/module/dmatest/parameters/threads_per_chan
% echo 10000 > /sys/module/dmatest/parameters/iterations
% echo 1 > /sys/module/dmatest/parameters/run
[ 89.480664] Unable to handle kernel NULL pointer dereference at virtual
address 00000000000000a0
[ 89.488725] Oops [#1]
[ 89.494708] CPU: 2 PID: 1008 Comm: dma0chan0-copy0 Not tainted
5.17.0-rc5
[ 89.509385] epc : vchan_find_desc+0x32/0x46
[ 89.513553] ra : sf_pdma_tx_status+0xca/0xd6
This happens because of data race. Each thread rewrite channels's
descriptor as soon as device_prep_dma_memcpy() is called. It leads to the
situation when the driver thinks that it uses right descriptor that
actually is freed or substituted for other one.
With current fixes a descriptor changes its value only when it has
been used. A new descriptor is acquired from vc->desc_issued queue that
is already filled with descriptors that are ready to be sent. Threads
have no direct access to DMA channel descriptor. Now it is just possible
to queue a descriptor for further processing.
Fixes: 6973886ad58e ("dmaengine: sf-pdma: add platform DMA support for HiFive Unleashed A00")
Signed-off-by: Viacheslav Mitrofanov <v.v.mitrofanov@yadro.com>
Link: https://lore.kernel.org/r/20220701082942.12835-1-v.v.mitrofanov@yadro.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/sf-pdma/sf-pdma.c | 44 |
1 files changed, 30 insertions, 14 deletions
diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index db5a4ef76077..4f8b8498c5c6 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -52,16 +52,6 @@ static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd) static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan) { struct sf_pdma_desc *desc; - unsigned long flags; - - spin_lock_irqsave(&chan->lock, flags); - - if (chan->desc && !chan->desc->in_use) { - spin_unlock_irqrestore(&chan->lock, flags); - return chan->desc; - } - - spin_unlock_irqrestore(&chan->lock, flags); desc = kzalloc(sizeof(*desc), GFP_NOWAIT); if (!desc) @@ -111,7 +101,6 @@ sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src, desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); spin_lock_irqsave(&chan->vchan.lock, iflags); - chan->desc = desc; sf_pdma_fill_desc(desc, dest, src, len); spin_unlock_irqrestore(&chan->vchan.lock, iflags); @@ -170,11 +159,17 @@ static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, unsigned long flags; u64 residue = 0; struct sf_pdma_desc *desc; - struct dma_async_tx_descriptor *tx; + struct dma_async_tx_descriptor *tx = NULL; spin_lock_irqsave(&chan->vchan.lock, flags); - tx = &chan->desc->vdesc.tx; + list_for_each_entry(vd, &chan->vchan.desc_submitted, node) + if (vd->tx.cookie == cookie) + tx = &vd->tx; + + if (!tx) + goto out; + if (cookie == tx->chan->completed_cookie) goto out; @@ -241,6 +236,19 @@ static void sf_pdma_enable_request(struct sf_pdma_chan *chan) writel(v, regs->ctrl); } +static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan) +{ + struct virt_dma_chan *vchan = &chan->vchan; + struct virt_dma_desc *vdesc; + + if (list_empty(&vchan->desc_issued)) + return NULL; + + vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node); + + return container_of(vdesc, struct sf_pdma_desc, vdesc); +} + static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan) { struct sf_pdma_desc *desc = chan->desc; @@ -268,8 +276,11 @@ static void sf_pdma_issue_pending(struct dma_chan *dchan) spin_lock_irqsave(&chan->vchan.lock, flags); - if (vchan_issue_pending(&chan->vchan) && chan->desc) + if (!chan->desc && vchan_issue_pending(&chan->vchan)) { + /* vchan_issue_pending has made a check that desc in not NULL */ + chan->desc = sf_pdma_get_first_pending_desc(chan); sf_pdma_xfer_desc(chan); + } spin_unlock_irqrestore(&chan->vchan.lock, flags); } @@ -298,6 +309,11 @@ static void sf_pdma_donebh_tasklet(struct tasklet_struct *t) spin_lock_irqsave(&chan->vchan.lock, flags); list_del(&chan->desc->vdesc.node); vchan_cookie_complete(&chan->desc->vdesc); + + chan->desc = sf_pdma_get_first_pending_desc(chan); + if (chan->desc) + sf_pdma_xfer_desc(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); } |