summaryrefslogtreecommitdiff
path: root/kernel/dma/swiotlb.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-09-30 11:07:26 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-09-30 11:07:26 -0700
commit3b517966c5616ac011081153482a5ba0e91b17ff (patch)
tree907dbd6d690b11ff4112f5185971d9fbd83308e2 /kernel/dma/swiotlb.c
parent25d48d570eeda62cf71e5b9cdad76a37b833f408 (diff)
parent2d5780bbef8dbe6375d481cbea212606a80e4453 (diff)
Merge tag 'dma-mapping-6.6-2023-09-30' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig: - fix the narea calculation in swiotlb initialization (Ross Lagerwall) - fix the check whether a device has used swiotlb (Petr Tesarik) * tag 'dma-mapping-6.6-2023-09-30' of git://git.infradead.org/users/hch/dma-mapping: swiotlb: fix the check whether a device has used software IO TLB swiotlb: use the calculated number of areas
Diffstat (limited to 'kernel/dma/swiotlb.c')
-rw-r--r--kernel/dma/swiotlb.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 394494a6b1f3..01637677736f 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -399,14 +399,13 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
}
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
- default_nareas), SMP_CACHE_BYTES);
+ nareas), SMP_CACHE_BYTES);
if (!mem->areas) {
pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}
- swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false,
- default_nareas);
+ swiotlb_init_io_tlb_pool(mem, __pa(tlb), nslabs, false, nareas);
add_mem_pool(&io_tlb_default_mem, mem);
if (flags & SWIOTLB_VERBOSE)
@@ -729,9 +728,6 @@ static void swiotlb_dyn_alloc(struct work_struct *work)
}
add_mem_pool(mem, pool);
-
- /* Pairs with smp_rmb() in is_swiotlb_buffer(). */
- smp_wmb();
}
/**
@@ -1152,9 +1148,26 @@ static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr,
spin_unlock_irqrestore(&dev->dma_io_tlb_lock, flags);
found:
- dev->dma_uses_io_tlb = true;
- /* Pairs with smp_rmb() in is_swiotlb_buffer() */
- smp_wmb();
+ WRITE_ONCE(dev->dma_uses_io_tlb, true);
+
+ /*
+ * The general barrier orders reads and writes against a presumed store
+ * of the SWIOTLB buffer address by a device driver (to a driver private
+ * data structure). It serves two purposes.
+ *
+ * First, the store to dev->dma_uses_io_tlb must be ordered before the
+ * presumed store. This guarantees that the returned buffer address
+ * cannot be passed to another CPU before updating dev->dma_uses_io_tlb.
+ *
+ * Second, the load from mem->pools must be ordered before the same
+ * presumed store. This guarantees that the returned buffer address
+ * cannot be observed by another CPU before an update of the RCU list
+ * that was made by swiotlb_dyn_alloc() on a third CPU (cf. multicopy
+ * atomicity).
+ *
+ * See also the comment in is_swiotlb_buffer().
+ */
+ smp_mb();
*retpool = pool;
return index;