diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-06 10:56:45 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2022-08-06 10:56:45 -0700 |
commit | c993e07be023acdeec8e84e2e0743c52adb5fc94 (patch) | |
tree | 873b039ee47b424a31829ffcda3c316c52bf78e4 /arch/arm | |
parent | 1d239c1eb873c7d6c6cbc80d68330c939fd86136 (diff) | |
parent | 5c850d31880e00f063fa2a3746ba212c4bcc510f (diff) |
Merge tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping updates from Christoph Hellwig:
- convert arm32 to the common dma-direct code (Arnd Bergmann, Robin
Murphy, Christoph Hellwig)
- restructure the PCIe peer to peer mapping support (Logan Gunthorpe)
- allow the IOMMU code to communicate an optional DMA mapping length
and use that in scsi and libata (John Garry)
- split the global swiotlb lock (Tianyu Lan)
- various fixes and cleanup (Chao Gao, Dan Carpenter, Dongli Zhang,
Lukas Bulwahn, Robin Murphy)
* tag 'dma-mapping-5.20-2022-08-06' of git://git.infradead.org/users/hch/dma-mapping: (45 commits)
swiotlb: fix passing local variable to debugfs_create_ulong()
dma-mapping: reformat comment to suppress htmldoc warning
PCI/P2PDMA: Remove pci_p2pdma_[un]map_sg()
RDMA/rw: drop pci_p2pdma_[un]map_sg()
RDMA/core: introduce ib_dma_pci_p2p_dma_supported()
nvme-pci: convert to using dma_map_sgtable()
nvme-pci: check DMA ops when indicating support for PCI P2PDMA
iommu/dma: support PCI P2PDMA pages in dma-iommu map_sg
iommu: Explicitly skip bus address marked segments in __iommu_map_sg()
dma-mapping: add flags to dma_map_ops to indicate PCI P2PDMA support
dma-direct: support PCI P2PDMA pages in dma-direct map_sg
dma-mapping: allow EREMOTEIO return code for P2PDMA transfers
PCI/P2PDMA: Introduce helpers for dma_map_sg implementations
PCI/P2PDMA: Attempt to set map_type if it has not been set
lib/scatterlist: add flag for indicating P2PDMA segments in an SGL
swiotlb: clean up some coding style and minor issues
dma-mapping: update comment after dmabounce removal
scsi: sd: Add a comment about limiting max_sectors to shost optimal limit
ata: libata-scsi: cap ata_device->max_sectors according to shost->max_sectors
scsi: scsi_transport_sas: cap shost opt_sectors according to DMA optimal limit
...
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/Kconfig | 5 | ||||
-rw-r--r-- | arch/arm/common/Kconfig | 6 | ||||
-rw-r--r-- | arch/arm/common/Makefile | 1 | ||||
-rw-r--r-- | arch/arm/common/dmabounce.c | 582 | ||||
-rw-r--r-- | arch/arm/common/sa1111.c | 64 | ||||
-rw-r--r-- | arch/arm/include/asm/device.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-direct.h | 49 | ||||
-rw-r--r-- | arch/arm/include/asm/dma-mapping.h | 128 | ||||
-rw-r--r-- | arch/arm/include/asm/memory.h | 2 | ||||
-rw-r--r-- | arch/arm/mach-footbridge/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm/mach-footbridge/common.c | 19 | ||||
-rw-r--r-- | arch/arm/mach-footbridge/include/mach/dma-direct.h | 8 | ||||
-rw-r--r-- | arch/arm/mach-footbridge/include/mach/memory.h | 4 | ||||
-rw-r--r-- | arch/arm/mach-highbank/highbank.c | 2 | ||||
-rw-r--r-- | arch/arm/mach-mvebu/coherency.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 652 |
16 files changed, 99 insertions, 1429 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 53e6a1da9af5..87badeae3181 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -15,13 +15,12 @@ config ARM select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE select ARCH_HAS_PTE_SPECIAL if ARM_LPAE - select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SET_MEMORY select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL select ARCH_HAS_STRICT_MODULE_RWX if MMU - select ARCH_HAS_SYNC_DMA_FOR_DEVICE if SWIOTLB || !MMU - select ARCH_HAS_SYNC_DMA_FOR_CPU if SWIOTLB || !MMU + select ARCH_HAS_SYNC_DMA_FOR_DEVICE + select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_TEARDOWN_DMA_OPS if MMU select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_CUSTOM_GPIO_H diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index c8e198631d41..d2fdb1796f48 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig @@ -1,11 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config SA1111 bool - select DMABOUNCE if !ARCH_PXA - -config DMABOUNCE - bool - select ZONE_DMA + select ZONE_DMA if ARCH_SA1100 config KRAIT_L2_ACCESSORS bool diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile index 8cd574be94cf..7bae8cbaafe7 100644 --- a/arch/arm/common/Makefile +++ b/arch/arm/common/Makefile @@ -6,7 +6,6 @@ obj-y += firmware.o obj-$(CONFIG_SA1111) += sa1111.o -obj-$(CONFIG_DMABOUNCE) += dmabounce.o obj-$(CONFIG_KRAIT_L2_ACCESSORS) += krait-l2-accessors.o obj-$(CONFIG_SHARP_LOCOMO) += locomo.o obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c deleted file mode 100644 index 7996c04393d5..000000000000 --- a/arch/arm/common/dmabounce.c +++ /dev/null @@ -1,582 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * arch/arm/common/dmabounce.c - * - * Special dma_{map/unmap/dma_sync}_* routines for systems that have - * limited DMA windows. These functions utilize bounce buffers to - * copy data to/from buffers located outside the DMA region. This - * only works for systems in which DMA memory is at the bottom of - * RAM, the remainder of memory is at the top and the DMA memory - * can be marked as ZONE_DMA. Anything beyond that such as discontiguous - * DMA windows will require custom implementations that reserve memory - * areas at early bootup. - * - * Original version by Brad Parker (brad@heeltoe.com) - * Re-written by Christopher Hoover <ch@murgatroid.com> - * Made generic by Deepak Saxena <dsaxena@plexity.net> - * - * Copyright (C) 2002 Hewlett Packard Company. - * Copyright (C) 2004 MontaVista Software, Inc. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/page-flags.h> -#include <linux/device.h> -#include <linux/dma-direct.h> -#include <linux/dma-map-ops.h> -#include <linux/dmapool.h> -#include <linux/list.h> -#include <linux/scatterlist.h> - -#include <asm/cacheflush.h> -#include <asm/dma-iommu.h> - -#undef STATS - -#ifdef STATS -#define DO_STATS(X) do { X ; } while (0) -#else -#define DO_STATS(X) do { } while (0) -#endif - -/* ************************************************** */ - -struct safe_buffer { - struct list_head node; - - /* original request */ - void *ptr; - size_t size; - int direction; - - /* safe buffer info */ - struct dmabounce_pool *pool; - void *safe; - dma_addr_t safe_dma_addr; -}; - -struct dmabounce_pool { - unsigned long size; - struct dma_pool *pool; -#ifdef STATS - unsigned long allocs; -#endif -}; - -struct dmabounce_device_info { - struct device *dev; - struct list_head safe_buffers; -#ifdef STATS - unsigned long total_allocs; - unsigned long map_op_count; - unsigned long bounce_count; - int attr_res; -#endif - struct dmabounce_pool small; - struct dmabounce_pool large; - - rwlock_t lock; - - int (*needs_bounce)(struct device *, dma_addr_t, size_t); -}; - -#ifdef STATS -static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n", - device_info->small.allocs, - device_info->large.allocs, - device_info->total_allocs - device_info->small.allocs - - device_info->large.allocs, - device_info->total_allocs, - device_info->map_op_count, - device_info->bounce_count); -} - -static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL); -#endif - - -/* allocate a 'safe' buffer and keep track of it */ -static inline struct safe_buffer * -alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, - size_t size, enum dma_data_direction dir) -{ - struct safe_buffer *buf; - struct dmabounce_pool *pool; - struct device *dev = device_info->dev; - unsigned long flags; - - dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", - __func__, ptr, size, dir); - - if (size <= device_info->small.size) { - pool = &device_info->small; - } else if (size <= device_info->large.size) { - pool = &device_info->large; - } else { - pool = NULL; - } - - buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); - if (buf == NULL) { - dev_warn(dev, "%s: kmalloc failed\n", __func__); - return NULL; - } - - buf->ptr = ptr; - buf->size = size; - buf->direction = dir; - buf->pool = pool; - - if (pool) { - buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, - &buf->safe_dma_addr); - } else { - buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, - GFP_ATOMIC); - } - - if (buf->safe == NULL) { - dev_warn(dev, - "%s: could not alloc dma memory (size=%d)\n", - __func__, size); - kfree(buf); - return NULL; - } - -#ifdef STATS - if (pool) - pool->allocs++; - device_info->total_allocs++; -#endif - - write_lock_irqsave(&device_info->lock, flags); - list_add(&buf->node, &device_info->safe_buffers); - write_unlock_irqrestore(&device_info->lock, flags); - - return buf; -} - -/* determine if a buffer is from our "safe" pool */ -static inline struct safe_buffer * -find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) -{ - struct safe_buffer *b, *rb = NULL; - unsigned long flags; - - read_lock_irqsave(&device_info->lock, flags); - - list_for_each_entry(b, &device_info->safe_buffers, node) - if (b->safe_dma_addr <= safe_dma_addr && - b->safe_dma_addr + b->size > safe_dma_addr) { - rb = b; - break; - } - - read_unlock_irqrestore(&device_info->lock, flags); - return rb; -} - -static inline void -free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) -{ - unsigned long flags; - - dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); - - write_lock_irqsave(&device_info->lock, flags); - - list_del(&buf->node); - - write_unlock_irqrestore(&device_info->lock, flags); - - if (buf->pool) - dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); - else - dma_free_coherent(device_info->dev, buf->size, buf->safe, - buf->safe_dma_addr); - - kfree(buf); -} - -/* ************************************************** */ - -static struct safe_buffer *find_safe_buffer_dev(struct device *dev, - dma_addr_t dma_addr, const char *where) -{ - if (!dev || !dev->archdata.dmabounce) - return NULL; - if (dma_mapping_error(dev, dma_addr)) { - dev_err(dev, "Trying to %s invalid mapping\n", where); - return NULL; - } - return find_safe_buffer(dev->archdata.dmabounce, dma_addr); -} - -static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size) -{ - if (!dev || !dev->archdata.dmabounce) - return 0; - - if (dev->dma_mask) { - unsigned long limit, mask = *dev->dma_mask; - - limit = (mask + 1) & ~mask; - if (limit && size > limit) { - dev_err(dev, "DMA mapping too big (requested %#x " - "mask %#Lx)\n", size, *dev->dma_mask); - return -E2BIG; - } - - /* Figure out if we need to bounce from the DMA mask. */ - if ((dma_addr | (dma_addr + size - 1)) & ~mask) - return 1; - } - - return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size); -} - -static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - struct safe_buffer *buf; - - if (device_info) - DO_STATS ( device_info->map_op_count++ ); - - buf = alloc_safe_buffer(device_info, ptr, size, dir); - if (buf == NULL) { - dev_err(dev, "%s: unable to map unsafe buffer %p!\n", - __func__, ptr); - return DMA_MAPPING_ERROR; - } - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", - __func__, ptr, buf->safe, size); - memcpy(buf->safe, ptr, size); - } - - return buf->safe_dma_addr; -} - -static inline void unmap_single(struct device *dev, struct safe_buffer *buf, - size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - BUG_ON(buf->size != size); - BUG_ON(buf->direction != dir); - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), - buf->safe, buf->safe_dma_addr); - - DO_STATS(dev->archdata.dmabounce->bounce_count++); - - if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) && - !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { - void *ptr = buf->ptr; - - dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe, ptr, size); - memcpy(ptr, buf->safe, size); - - /* - * Since we may have written to a page cache page, - * we need to ensure that the data will be coherent - * with user mappings. - */ - __cpuc_flush_dcache_area(ptr, size); - } - free_safe_buffer(dev->archdata.dmabounce, buf); -} - -/* ************************************************** */ - -/* - * see if a buffer address is in an 'unsafe' range. if it is - * allocate a 'safe' buffer and copy the unsafe buffer into it. - * substitute the safe buffer for the unsafe one. - * (basically move the buffer from an unsafe area to a safe one) - */ -static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t dma_addr; - int ret; - - dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", - __func__, page, offset, size, dir); - - dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset; - - ret = needs_bounce(dev, dma_addr, size); - if (ret < 0) - return DMA_MAPPING_ERROR; - - if (ret == 0) { - arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir); - return dma_addr; - } - - if (PageHighMem(page)) { - dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); - return DMA_MAPPING_ERROR; - } - - return map_single(dev, page_address(page) + offset, size, dir, attrs); -} - -/* - * see if a mapped address was really a "safe" buffer and if so, copy - * the data from the safe buffer back to the unsafe buffer and free up - * the safe buffer. (basically return things back to the way they - * should be) - */ -static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - struct safe_buffer *buf; - - dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n", - __func__, dma_addr, size, dir); - - buf = find_safe_buffer_dev(dev, dma_addr, __func__); - if (!buf) { - arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir); - return; - } - - unmap_single(dev, buf, size, dir, attrs); -} - -static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, - size_t sz, enum dma_data_direction dir) -{ - struct safe_buffer *buf; - unsigned long off; - - dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", - __func__, addr, sz, dir); - - buf = find_safe_buffer_dev(dev, addr, __func__); - if (!buf) - return 1; - - off = addr - buf->safe_dma_addr; - - BUG_ON(buf->direction != dir); - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, - buf->safe, buf->safe_dma_addr); - - DO_STATS(dev->archdata.dmabounce->bounce_count++); - - if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { - dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n", - __func__, buf->safe + off, buf->ptr + off, sz); - memcpy(buf->ptr + off, buf->safe + off, sz); - } - return 0; -} - -static void dmabounce_sync_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (!__dmabounce_sync_for_cpu(dev, handle, size, dir)) - return; - - arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir); -} - -static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, - size_t sz, enum dma_data_direction dir) -{ - struct safe_buffer *buf; - unsigned long off; - - dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n", - __func__, addr, sz, dir); - - buf = find_safe_buffer_dev(dev, addr, __func__); - if (!buf) - return 1; - - off = addr - buf->safe_dma_addr; - - BUG_ON(buf->direction != dir); - - dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n", - __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off, - buf->safe, buf->safe_dma_addr); - - DO_STATS(dev->archdata.dmabounce->bounce_count++); - - if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) { - dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n", - __func__,buf->ptr + off, buf->safe + off, sz); - memcpy(buf->safe + off, buf->ptr + off, sz); - } - return 0; -} - -static void dmabounce_sync_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (!__dmabounce_sync_for_device(dev, handle, size, dir)) - return; - - arm_dma_ops.sync_single_for_device(dev, handle, size, dir); -} - -static int dmabounce_dma_supported(struct device *dev, u64 dma_mask) -{ - if (dev->archdata.dmabounce) - return 0; - - return arm_dma_ops.dma_supported(dev, dma_mask); -} - -static const struct dma_map_ops dmabounce_ops = { - .alloc = arm_dma_alloc, - .free = arm_dma_free, - .mmap = arm_dma_mmap, - .get_sgtable = arm_dma_get_sgtable, - .map_page = dmabounce_map_page, - .unmap_page = dmabounce_unmap_page, - .sync_single_for_cpu = dmabounce_sync_for_cpu, - .sync_single_for_device = dmabounce_sync_for_device, - .map_sg = arm_dma_map_sg, - .unmap_sg = arm_dma_unmap_sg, - .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, - .sync_sg_for_device = arm_dma_sync_sg_for_device, - .dma_supported = dmabounce_dma_supported, -}; - -static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, - const char *name, unsigned long size) -{ - pool->size = size; - DO_STATS(pool->allocs = 0); - pool->pool = dma_pool_create(name, dev, size, - 0 /* byte alignment */, - 0 /* no page-crossing issues */); - - return pool->pool ? 0 : -ENOMEM; -} - -int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, - unsigned long large_buffer_size, - int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t)) -{ - struct dmabounce_device_info *device_info; - int ret; - - device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); - if (!device_info) { - dev_err(dev, - "Could not allocated dmabounce_device_info\n"); - return -ENOMEM; - } - - ret = dmabounce_init_pool(&device_info->small, dev, - "small_dmabounce_pool", small_buffer_size); - if (ret) { - dev_err(dev, - "dmabounce: could not allocate DMA pool for %ld byte objects\n", - small_buffer_size); - goto err_free; - } - - if (large_buffer_size) { - ret = dmabounce_init_pool(&device_info->large, dev, - "large_dmabounce_pool", - large_buffer_size); - if (ret) { - dev_err(dev, - "dmabounce: could not allocate DMA pool for %ld byte objects\n", - large_buffer_size); - goto err_destroy; - } - } - - device_info->dev = dev; - INIT_LIST_HEAD(&device_info->safe_buffers); - rwlock_init(&device_info->lock); - device_info->needs_bounce = needs_bounce_fn; - -#ifdef STATS - device_info->total_allocs = 0; - device_info->map_op_count = 0; - device_info->bounce_count = 0; - device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats); -#endif - - dev->archdata.dmabounce = device_info; - set_dma_ops(dev, &dmabounce_ops); - - dev_info(dev, "dmabounce: registered device\n"); - - return 0; - - err_destroy: - dma_pool_destroy(device_info->small.pool); - err_free: - kfree(device_info); - return ret; -} -EXPORT_SYMBOL(dmabounce_register_dev); - -void dmabounce_unregister_dev(struct device *dev) -{ - struct dmabounce_device_info *device_info = dev->archdata.dmabounce; - - dev->archdata.dmabounce = NULL; - set_dma_ops(dev, NULL); - - if (!device_info) { - dev_warn(dev, - "Never registered with dmabounce but attempting" - "to unregister!\n"); - return; - } - - if (!list_empty(&device_info->safe_buffers)) { - dev_err(dev, - "Removing from dmabounce with pending buffers!\n"); - BUG(); - } - - if (device_info->small.pool) - dma_pool_destroy(device_info->small.pool); - if (device_info->large.pool) - dma_pool_destroy(device_info->large.pool); - -#ifdef STATS - if (device_info->attr_res == 0) - device_remove_file(dev, &dev_attr_dmabounce_stats); -#endif - - kfree(device_info); - - dev_info(dev, "dmabounce: device unregistered\n"); -} -EXPORT_SYMBOL(dmabounce_unregister_dev); - -MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); -MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); -MODULE_LICENSE("GPL"); diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c index 2343e2b6214d..f5e6990b8856 100644 --- a/arch/arm/common/sa1111.c +++ b/arch/arm/common/sa1111.c @@ -1389,70 +1389,9 @@ void sa1111_driver_unregister(struct sa1111_driver *driver) } EXPORT_SYMBOL(sa1111_driver_unregister); -#ifdef CONFIG_DMABOUNCE -/* - * According to the "Intel StrongARM SA-1111 Microprocessor Companion - * Chip Specification Update" (June 2000), erratum #7, there is a - * significant bug in the SA1111 SDRAM shared memory controller. If - * an access to a region of memory above 1MB relative to the bank base, - * it is important that address bit 10 _NOT_ be asserted. Depending - * on the configuration of the RAM, bit 10 may correspond to one - * of several different (processor-relative) address bits. - * - * This routine only identifies whether or not a given DMA address - * is susceptible to the bug. - * - * This should only get called for sa1111_device types due to the - * way we configure our device dma_masks. - */ -static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size) -{ - /* - * Section 4.6 of the "Intel StrongARM SA-1111 Development Module - * User's Guide" mentions that jumpers R51 and R52 control the - * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or - * SDRAM bank 1 on Neponset). The default configuration selects - * Assabet, so any address in bank 1 is necessarily invalid. - */ - return (machine_is_assabet() || machine_is_pfs168()) && - (addr >= 0xc8000000 || (addr + size) >= 0xc8000000); -} - -static int sa1111_notifier_call(struct notifier_block *n, unsigned long action, - void *data) -{ - struct sa1111_dev *dev = to_sa1111_device(data); - - switch (action) { - case BUS_NOTIFY_ADD_DEVICE: - if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) { - int ret = dmabounce_register_dev(&dev->dev, 1024, 4096, - sa1111_needs_bounce); - if (ret) - dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret); - } - break; - - case BUS_NOTIFY_DEL_DEVICE: - if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) - dmabounce_unregister_dev(&dev->dev); - break; - } - return NOTIFY_OK; -} - -static struct notifier_block sa1111_bus_notifier = { - .notifier_call = sa1111_notifier_call, -}; -#endif - static int __init sa1111_init(void) { int ret = bus_register(&sa1111_bus_type); -#ifdef CONFIG_DMABOUNCE - if (ret == 0) - bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier); -#endif if (ret == 0) platform_driver_register(&sa1111_device_driver); return ret; @@ -1461,9 +1400,6 @@ static int __init sa1111_init(void) static void __exit sa1111_exit(void) { platform_driver_unregister(&sa1111_device_driver); -#ifdef CONFIG_DMABOUNCE - bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier); -#endif bus_unregister(&sa1111_bus_type); } diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h index be666f58bf7a..8754c0f5fc90 100644 --- a/arch/arm/include/asm/device.h +++ b/arch/arm/include/asm/device.h @@ -6,9 +6,6 @@ #define ASMARM_DEVICE_H struct dev_archdata { -#ifdef CONFIG_DMABOUNCE - struct dmabounce_device_info *dmabounce; -#endif #ifdef CONFIG_ARM_DMA_USE_IOMMU struct dma_iommu_mapping *mapping; #endif diff --git a/arch/arm/include/asm/dma-direct.h b/arch/arm/include/asm/dma-direct.h index 77fcb7ee5ec9..4f7bcde03abb 100644 --- a/arch/arm/include/asm/dma-direct.h +++ b/arch/arm/include/asm/dma-direct.h @@ -1,48 +1 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASM_ARM_DMA_DIRECT_H -#define ASM_ARM_DMA_DIRECT_H 1 - -#include <asm/memory.h> - -/* - * dma_to_pfn/pfn_to_dma/virt_to_dma are architecture private - * functions used internally by the DMA-mapping API to provide DMA - * addresses. They must not be used by drivers. - */ -static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) -{ - if (dev && dev->dma_range_map) - pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn))); - return (dma_addr_t)__pfn_to_bus(pfn); -} - -static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) -{ - unsigned long pfn = __bus_to_pfn(addr); - - if (dev && dev->dma_range_map) - pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn))); - return pfn; -} - -static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) -{ - if (dev) - return pfn_to_dma(dev, virt_to_pfn(addr)); - - return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); -} - -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -{ - unsigned int offset = paddr & ~PAGE_MASK; - return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; -} - -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) -{ - unsigned int offset = dev_addr & ~PAGE_MASK; - return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; -} - -#endif /* ASM_ARM_DMA_DIRECT_H */ +#include <mach/dma-direct.h> diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h deleted file mode 100644 index 77082246a5e1..000000000000 --- a/arch/arm/include/asm/dma-mapping.h +++ /dev/null @@ -1,128 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef ASMARM_DMA_MAPPING_H -#define ASMARM_DMA_MAPPING_H - -#ifdef __KERNEL__ - -#include <linux/mm_types.h> -#include <linux/scatterlist.h> - -#include <xen/xen.h> -#include <asm/xen/hypervisor.h> - -extern const struct dma_map_ops arm_dma_ops; -extern const struct dma_map_ops arm_coherent_dma_ops; - -static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) -{ - if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE)) - return &arm_dma_ops; - return NULL; -} - -/** - * arm_dma_alloc - allocate consistent memory for DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: required memory size - * @handle: bus-specific DMA address - * @attrs: optinal attributes that specific mapping properties - * - * Allocate some memory for a device for performing DMA. This function - * allocates pages, and will return the CPU-viewed address, and sets @handle - * to be the device-viewed address. - */ -extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, unsigned long attrs); - -/** - * arm_dma_free - free memory allocated by arm_dma_alloc - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @size: size of memory originally requested in dma_alloc_coherent - * @cpu_addr: CPU-view address returned from dma_alloc_coherent - * @handle: device-view address returned from dma_alloc_coherent - * @attrs: optinal attributes that specific mapping properties - * - * Free (and unmap) a DMA buffer previously allocated by - * arm_dma_alloc(). - * - * References to memory and mappings associated with cpu_addr/handle - * during and after this call executing are illegal. - */ -extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs); - -/** - * arm_dma_mmap - map a coherent DMA allocation into user space - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @vma: vm_area_struct describing requested user mapping - * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent - * @handle: device-view address returned from dma_alloc_coherent - * @size: size of memory originally requested in dma_alloc_coherent - * @attrs: optinal attributes that specific mapping properties - * - * Map a coherent DMA buffer previously allocated by dma_alloc_coherent - * into user space. The coherent DMA buffer must not be freed by the - * driver until the user space mapping has been released. - */ -extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); - -/* - * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" - * and utilize bounce buffers as needed to work around limited DMA windows. - * - * On the SA-1111, a bug limits DMA to only certain regions of RAM. - * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) - * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) - * - * The following are helper functions used by the dmabounce subystem - * - */ - -/** - * dmabounce_register_dev - * - * @dev: valid struct device pointer - * @small_buf_size: size of buffers to use with small buffer pool - * @large_buf_size: size of buffers to use with large buffer pool (can be 0) - * @needs_bounce_fn: called to determine whether buffer needs bouncing - * - * This function should be called by low-level platform code to register - * a device as requireing DMA buffer bouncing. The function will allocate - * appropriate DMA pools for the device. - */ -extern int dmabounce_register_dev(struct device *, unsigned long, - unsigned long, int (*)(struct device *, dma_addr_t, size_t)); - -/** - * dmabounce_unregister_dev - * - * @dev: valid struct device pointer - * - * This function should be called by low-level platform code when device - * that was previously registered with dmabounce_register_dev is removed - * from the system. - * - */ -extern void dmabounce_unregister_dev(struct device *); - - - -/* - * The scatter list versions of the above methods. - */ -extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, unsigned long attrs); -extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, - enum dma_data_direction, unsigned long attrs); -extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, - enum dma_data_direction); -extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); - -#endif /* __KERNEL__ */ -#endif diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index f673e13e0f94..a55a9038abc8 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -378,8 +378,6 @@ static inline unsigned long __virt_to_idmap(unsigned long x) #ifndef __virt_to_bus #define __virt_to_bus __virt_to_phys #define __bus_to_virt __phys_to_virt -#define __pfn_to_bus(x) __pfn_to_phys(x) -#define __bus_to_pfn(x) __phys_to_pfn(x) #endif /* diff --git a/arch/arm/mach-footbridge/Kconfig b/arch/arm/mach-footbridge/Kconfig index bcd4e4ca34f7..acc10b1caa69 100644 --- a/arch/arm/mach-footbridge/Kconfig +++ b/arch/arm/mach-footbridge/Kconfig @@ -61,6 +61,7 @@ endmenu # Footbridge support config FOOTBRIDGE + select ARCH_HAS_PHYS_TO_DMA bool # Footbridge in host mode diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 322495df271d..5020eb96b025 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -12,6 +12,7 @@ #include <linux/init.h> #include <linux/io.h> #include <linux/spinlock.h> +#include <linux/dma-direct.h> #include <video/vga.h> #include <asm/page.h> @@ -335,17 +336,19 @@ unsigned long __bus_to_virt(unsigned long res) return res; } EXPORT_SYMBOL(__bus_to_virt); - -unsigned long __pfn_to_bus(unsigned long pfn) +#else +static inline unsigned long fb_bus_sdram_offset(void) { - return __pfn_to_phys(pfn) + (fb_bus_sdram_offset() - PHYS_OFFSET); + return BUS_OFFSET; } -EXPORT_SYMBOL(__pfn_to_bus); +#endif /* CONFIG_FOOTBRIDGE_ADDIN */ -unsigned long __bus_to_pfn(unsigned long bus) +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - return __phys_to_pfn(bus - (fb_bus_sdram_offset() - PHYS_OFFSET)); + return paddr + (fb_bus_sdram_offset() - PHYS_OFFSET); } -EXPORT_SYMBOL(__bus_to_pfn); -#endif +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) +{ + return dev_addr - (fb_bus_sdram_offset() - PHYS_OFFSET); +} diff --git a/arch/arm/mach-footbridge/include/mach/dma-direct.h b/arch/arm/mach-footbridge/include/mach/dma-direct.h new file mode 100644 index 000000000000..01f9e8367c00 --- /dev/null +++ b/arch/arm/mach-footbridge/include/mach/dma-direct.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef MACH_FOOTBRIDGE_DMA_DIRECT_H +#define MACH_FOOTBRIDGE_DMA_DIRECT_H 1 + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr); + +#endif /* MACH_FOOTBRIDGE_DMA_DIRECT_H */ diff --git a/arch/arm/mach-footbridge/include/mach/memory.h b/arch/arm/mach-footbridge/include/mach/memory.h index 46fd4a8872b9..3a5d2638c18f 100644 --- a/arch/arm/mach-footbridge/include/mach/memory.h +++ b/arch/arm/mach-footbridge/include/mach/memory.h @@ -26,8 +26,6 @@ #ifndef __ASSEMBLY__ extern unsigned long __virt_to_bus(unsigned long); extern unsigned long __bus_to_virt(unsigned long); -extern unsigned long __pfn_to_bus(unsigned long); -extern unsigned long __bus_to_pfn(unsigned long); #endif #define __virt_to_bus __virt_to_bus #define __bus_to_virt __bus_to_virt @@ -42,8 +40,6 @@ extern unsigned long __bus_to_pfn(unsigned long); #define BUS_OFFSET 0xe0000000 #define __virt_to_bus(x) ((x) + (BUS_OFFSET - PAGE_OFFSET)) #define __bus_to_virt(x) ((x) - (BUS_OFFSET - PAGE_OFFSET)) -#define __pfn_to_bus(x) (__pfn_to_phys(x) + (BUS_OFFSET - PHYS_OFFSET)) -#define __bus_to_pfn(x) __phys_to_pfn((x) - (BUS_OFFSET - PHYS_OFFSET)) #else diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index db607955a7e4..5d4f977ac7d2 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -98,7 +98,7 @@ static int highbank_platform_notifier(struct notifier_block *nb, if (of_property_read_bool(dev->of_node, "dma-coherent")) { val = readl(sregs_base + reg); writel(val | 0xff01, sregs_base + reg); - set_dma_ops(dev, &arm_coherent_dma_ops); + dev->dma_coherent = true; } return NOTIFY_OK; diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 883dab1b54f3..a6b621ff0b87 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -95,7 +95,7 @@ static int mvebu_hwcc_notifier(struct notifier_block *nb, if (event != BUS_NOTIFY_ADD_DEVICE) return NOTIFY_DONE; - set_dma_ops(dev, &arm_coherent_dma_ops); + dev->dma_coherent = true; return NOTIFY_OK; } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1483b6a4319d..089c9c644cce 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -103,139 +103,6 @@ static struct arm_dma_buffer *arm_dma_buffer_find(void *virt) * before transfers and delay cache invalidation until transfer completion. * */ -static void __dma_page_cpu_to_dev(struct page *, unsigned long, - size_t, enum dma_data_direction); -static void __dma_page_dev_to_cpu(struct page *, unsigned long, - size_t, enum dma_data_direction); - -/** - * arm_dma_map_page - map a portion of a page for streaming DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * Ensure that any data held in the cache is appropriately discarded - * or written back. - * - * The device owns this memory once this call has completed. The CPU - * can regain ownership by calling dma_unmap_page(). - */ -static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_page_cpu_to_dev(page, offset, size, dir); - return pfn_to_dma(dev, page_to_pfn(page)) + offset; -} - -static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - return pfn_to_dma(dev, page_to_pfn(page)) + offset; -} - -/** - * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Unmap a page streaming mode DMA translation. The handle and size - * must match what was provided in the previous dma_map_page() call. - * All other usages are undefined. - * - * After this call, reads by the CPU to the buffer are guaranteed to see - * whatever the device wrote there. - */ -static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), - handle & ~PAGE_MASK, size, dir); -} - -static void arm_dma_sync_single_for_cpu(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned int offset = handle & (PAGE_SIZE - 1); - struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); - __dma_page_dev_to_cpu(page, offset, size, dir); -} - -static void arm_dma_sync_single_for_device(struct device *dev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned int offset = handle & (PAGE_SIZE - 1); - struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); - __dma_page_cpu_to_dev(page, offset, size, dir); -} - -/* - * Return whether the given device DMA address mask can be supported - * properly. For example, if your device can only drive the low 24-bits - * during bus mastering, then you would pass 0x00ffffff as the mask - * to this function. - */ -static int arm_dma_supported(struct device *dev, u64 mask) -{ - unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit); - - /* - * Translate the device's DMA mask to a PFN limit. This - * PFN number includes the page which we can DMA to. - */ - return dma_to_pfn(dev, mask) >= max_dma_pfn; -} - -const struct dma_map_ops arm_dma_ops = { - .alloc = arm_dma_alloc, - .free = arm_dma_free, - .alloc_pages = dma_direct_alloc_pages, - .free_pages = dma_direct_free_pages, - .mmap = arm_dma_mmap, - .get_sgtable = arm_dma_get_sgtable, - .map_page = arm_dma_map_page, - .unmap_page = arm_dma_unmap_page, - .map_sg = arm_dma_map_sg, - .unmap_sg = arm_dma_unmap_sg, - .map_resource = dma_direct_map_resource, - .sync_single_for_cpu = arm_dma_sync_single_for_cpu, - .sync_single_for_device = arm_dma_sync_single_for_device, - .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, - .sync_sg_for_device = arm_dma_sync_sg_for_device, - .dma_supported = arm_dma_supported, - .get_required_mask = dma_direct_get_required_mask, -}; -EXPORT_SYMBOL(arm_dma_ops); - -static void *arm_coherent_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, unsigned long attrs); -static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs); -static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs); - -const struct dma_map_ops arm_coherent_dma_ops = { - .alloc = arm_coherent_dma_alloc, - .free = arm_coherent_dma_free, - .alloc_pages = dma_direct_alloc_pages, - .free_pages = dma_direct_free_pages, - .mmap = arm_coherent_dma_mmap, - .get_sgtable = arm_dma_get_sgtable, - .map_page = arm_coherent_dma_map_page, - .map_sg = arm_dma_map_sg, - .map_resource = dma_direct_map_resource, - .dma_supported = arm_dma_supported, - .get_required_mask = dma_direct_get_required_mask, -}; -EXPORT_SYMBOL(arm_coherent_dma_ops); static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) { @@ -725,7 +592,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (page) { unsigned long flags; - *handle = pfn_to_dma(dev, page_to_pfn(page)); + *handle = phys_to_dma(dev, page_to_phys(page)); buf->virt = args.want_vaddr ? addr : page; spin_lock_irqsave(&arm_dma_bufs_lock, flags); @@ -739,74 +606,13 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, } /* - * Allocate DMA-coherent memory space and return both the kernel remapped - * virtual and bus address for that space. - */ -void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, - gfp_t gfp, unsigned long attrs) -{ - pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); - - return __dma_alloc(dev, size, handle, gfp, prot, false, - attrs, __builtin_return_address(0)); -} - -static void *arm_coherent_dma_alloc(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, unsigned long attrs) -{ - return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true, - attrs, __builtin_return_address(0)); -} - -static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - int ret = -ENXIO; - unsigned long nr_vma_pages = vma_pages(vma); - unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned long pfn = dma_to_pfn(dev, dma_addr); - unsigned long off = vma->vm_pgoff; - - if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) - return ret; - - if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { - ret = remap_pfn_range(vma, vma->vm_start, - pfn + off, - vma->vm_end - vma->vm_start, - vma->vm_page_prot); - } - - return ret; -} - -/* - * Create userspace mapping for the DMA-coherent memory. - */ -static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} - -int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size, - unsigned long attrs) -{ - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); - return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); -} - -/* * Free a buffer as defined by the above mapping. */ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, unsigned long attrs, bool is_coherent) { - struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); + struct page *page = phys_to_page(dma_to_phys(dev, handle)); struct arm_dma_buffer *buf; struct arm_dma_free_args args = { .dev = dev, @@ -824,40 +630,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, kfree(buf); } -void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs) -{ - __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); -} - -static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs) -{ - __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); -} - -int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t handle, size_t size, - unsigned long attrs) -{ - unsigned long pfn = dma_to_pfn(dev, handle); - struct page *page; - int ret; - - /* If the PFN is not valid, we do not have a struct page */ - if (!pfn_valid(pfn)) - return -ENXIO; - - page = pfn_to_page(pfn); - - ret = sg_alloc_table(sgt, 1, GFP_KERNEL); - if (unlikely(ret)) - return ret; - - sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); - return 0; -} - static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) @@ -907,8 +679,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, /* * Make an area consistent for devices. - * Note: Drivers should NOT use this function directly, as it will break - * platforms with CONFIG_DMABOUNCE. + * Note: Drivers should NOT use this function directly. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, @@ -961,122 +732,6 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, } } -/** - * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Map a set of buffers described by scatterlist in streaming mode for DMA. - * This is the scatter-gather version of the dma_map_single interface. - * Here the scatter gather list elements are each tagged with the - * appropriate dma address and length. They are obtained via - * sg_dma_{address,length}. - * - * Device ownership issues as mentioned for dma_map_single are the same - * here. - */ -int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - struct scatterlist *s; - int i, j, ret; - - for_each_sg(sg, s, nents, i) { -#ifdef CONFIG_NEED_SG_DMA_LENGTH - s->dma_length = s->length; -#endif - s->dma_address = ops->map_page(dev, sg_page(s), s->offset, - s->length, dir, attrs); - if (dma_mapping_error(dev, s->dma_address)) { - ret = -EIO; - goto bad_mapping; - } - } - return nents; - - bad_mapping: - for_each_sg(sg, s, i, j) - ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); - return ret; -} - -/** - * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to unmap (same as was passed to dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - * - * Unmap a set of streaming mode DMA translations. Again, CPU access - * rules concerning calls here are the same as for dma_unmap_single(). - */ -void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - struct scatterlist *s; - - int i; - - for_each_sg(sg, s, nents, i) - ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); -} - -/** - * arm_dma_sync_sg_for_cpu - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to map (returned from dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - */ -void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, - dir); -} - -/** - * arm_dma_sync_sg_for_device - * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices - * @sg: list of buffers - * @nents: number of buffers to map (returned from dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - */ -void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir) -{ - const struct dma_map_ops *ops = get_dma_ops(dev); - struct scatterlist *s; - int i; - - for_each_sg(sg, s, nents, i) - ops->sync_single_for_device(dev, sg_dma_address(s), s->length, - dir); -} - -static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) -{ - /* - * When CONFIG_ARM_LPAE is set, physical address can extend above - * 32-bits, which then can't be addressed by devices that only support - * 32-bit DMA. - * Use the generic dma-direct / swiotlb ops code in that case, as that - * handles bounce buffering for us. - */ - if (IS_ENABLED(CONFIG_ARM_LPAE)) - return NULL; - return coherent ? &arm_coherent_dma_ops : &arm_dma_ops; -} - #ifdef CONFIG_ARM_DMA_USE_IOMMU static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs) @@ -1423,13 +1078,13 @@ static void __iommu_free_atomic(struct device *dev, void *cpu_addr, __free_from_pool(cpu_addr, size); } -static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, unsigned long attrs, - int coherent_flag) +static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, + dma_addr_t *handle, gfp_t gfp, unsigned long attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; void *addr = NULL; + int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; *handle = DMA_MAPPING_ERROR; size = PAGE_ALIGN(size); @@ -1472,19 +1127,7 @@ err_buffer: return NULL; } -static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, unsigned long attrs) -{ - return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL); -} - -static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *handle, gfp_t gfp, unsigned long attrs) -{ - return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT); -} - -static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, +static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { @@ -1498,35 +1141,24 @@ static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma if (vma->vm_pgoff >= nr_pages) return -ENXIO; + if (!dev->dma_coherent) + vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); + err = vm_map_pages(vma, pages, nr_pages); if (err) pr_err("Remapping memory failed: %d\n", err); return err; } -static int arm_iommu_mmap_attrs(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, unsigned long attrs) -{ - vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); - - return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); -} - -static int arm_coherent_iommu_mmap_attrs(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size, unsigned long attrs) -{ - return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs); -} /* * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ -static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, - dma_addr_t handle, unsigned long attrs, int coherent_flag) +static void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) { + int coherent_flag = dev->dma_coherent ? COHERENT : NORMAL; struct page **pages; size = PAGE_ALIGN(size); @@ -1548,19 +1180,6 @@ static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_ad __iommu_free_buffer(dev, pages, size, attrs); } -static void arm_iommu_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle, - unsigned long attrs) -{ - __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL); -} - -static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t handle, unsigned long attrs) -{ - __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT); -} - static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) @@ -1580,8 +1199,7 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, */ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, - enum dma_data_direction dir, unsigned long attrs, - bool is_coherent) + enum dma_data_direction dir, unsigned long attrs) { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova, iova_base; @@ -1601,7 +1219,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); - if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); prot = __dma_info_to_prot(dir, attrs); @@ -1621,9 +1239,20 @@ fail: return ret; } -static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, - enum dma_data_direction dir, unsigned long attrs, - bool is_coherent) +/** + * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA + * @dev: valid struct device pointer + * @sg: list of buffers + * @nents: number of buffers to map + * @dir: DMA transfer direction + * + * Map a set of buffers described by scatterlist in streaming mode for DMA. + * The scatter gather list elements are merged together (if possible) and + * tagged with the appropriate dma address and length. They are obtained via + * sg_dma_{address,length}. + */ +static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, unsigned long attrs) { struct scatterlist *s = sg, *dma = sg, *start = sg; int i, count = 0, ret; @@ -1638,8 +1267,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { ret = __map_sg_chunk(dev, start, size, - &dma->dma_address, dir, attrs, - is_coherent); + &dma->dma_address, dir, attrs); if (ret < 0) goto bad_mapping; @@ -1653,8 +1281,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, } size += s->length; } - ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, - is_coherent); + ret = __map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs); if (ret < 0) goto bad_mapping; @@ -1672,44 +1299,19 @@ bad_mapping: } /** - * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction - * - * Map a set of i/o coherent buffers described by scatterlist in streaming - * mode for DMA. The scatter gather list elements are merged together (if - * possible) and tagged with the appropriate dma address and length. They are - * obtained via sg_dma_{address,length}. - */ -static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - return __iommu_map_sg(dev, sg, nents, dir, attrs, true); -} - -/** - * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA + * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer * @sg: list of buffers - * @nents: number of buffers to map - * @dir: DMA transfer direction + * @nents: number of buffers to unmap (same as was passed to dma_map_sg) + * @dir: DMA transfer direction (same as was passed to dma_map_sg) * - * Map a set of buffers described by scatterlist in streaming mode for DMA. - * The scatter gather list elements are merged together (if possible) and - * tagged with the appropriate dma address and length. They are obtained via - * sg_dma_{address,length}. + * Unmap a set of streaming mode DMA translations. Again, CPU access + * rules concerning calls here are the same as for dma_unmap_single(). */ -static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, unsigned long attrs) -{ - return __iommu_map_sg(dev, sg, nents, dir, attrs, false); -} - -static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs, bool is_coherent) +static void arm_iommu_unmap_sg(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + unsigned long attrs) { struct scatterlist *s; int i; @@ -1718,48 +1320,13 @@ static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, if (sg_dma_len(s)) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); - if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } } /** - * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to unmap (same as was passed to dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - * - * Unmap a set of streaming mode DMA translations. Again, CPU access - * rules concerning calls here are the same as for dma_unmap_single(). - */ -static void arm_coherent_iommu_unmap_sg(struct device *dev, - struct scatterlist *sg, int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); -} - -/** - * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg - * @dev: valid struct device pointer - * @sg: list of buffers - * @nents: number of buffers to unmap (same as was passed to dma_map_sg) - * @dir: DMA transfer direction (same as was passed to dma_map_sg) - * - * Unmap a set of streaming mode DMA translations. Again, CPU access - * rules concerning calls here are the same as for dma_unmap_single(). - */ -static void arm_iommu_unmap_sg(struct device *dev, - struct scatterlist *sg, int nents, - enum dma_data_direction dir, - unsigned long attrs) -{ - __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); -} - -/** * arm_iommu_sync_sg_for_cpu * @dev: valid struct device pointer * @sg: list of buffers @@ -1773,6 +1340,9 @@ static void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *s; int i; + if (dev->dma_coherent) + return; + for_each_sg(sg, s, nents, i) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); @@ -1792,22 +1362,24 @@ static void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *s; int i; + if (dev->dma_coherent) + return; + for_each_sg(sg, s, nents, i) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } - /** - * arm_coherent_iommu_map_page + * arm_iommu_map_page * @dev: valid struct device pointer * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * - * Coherent IOMMU aware version of arm_dma_map_page() + * IOMMU aware version of arm_dma_map_page() */ -static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, +static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long attrs) { @@ -1815,6 +1387,9 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p dma_addr_t dma_addr; int ret, prot, len = PAGE_ALIGN(size + offset); + if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) + __dma_page_cpu_to_dev(page, offset, size, dir); + dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; @@ -1832,50 +1407,6 @@ fail: } /** - * arm_iommu_map_page - * @dev: valid struct device pointer - * @page: page that buffer resides in - * @offset: offset into page for start of buffer - * @size: size of buffer to map - * @dir: DMA transfer direction - * - * IOMMU aware version of arm_dma_map_page() - */ -static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, enum dma_data_direction dir, - unsigned long attrs) -{ - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) - __dma_page_cpu_to_dev(page, offset, size, dir); - - return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); -} - -/** - * arm_coherent_iommu_unmap_page - * @dev: valid struct device pointer - * @handle: DMA address of buffer - * @size: size of buffer (same as passed to dma_map_page) - * @dir: DMA transfer direction (same as passed to dma_map_page) - * - * Coherent IOMMU aware version of arm_dma_unmap_page() - */ -static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); - dma_addr_t iova = handle & PAGE_MASK; - int offset = handle & ~PAGE_MASK; - int len = PAGE_ALIGN(size + offset); - - if (!iova) - return; - - iommu_unmap(mapping->domain, iova, len); - __free_iova(mapping, iova, len); -} - -/** * arm_iommu_unmap_page * @dev: valid struct device pointer * @handle: DMA address of buffer @@ -1889,15 +1420,17 @@ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); + struct page *page; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; - if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) + if (!dev->dma_coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) { + page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); __dma_page_dev_to_cpu(page, offset, size, dir); + } iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); @@ -1965,12 +1498,13 @@ static void arm_iommu_sync_single_for_cpu(struct device *dev, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); + struct page *page; unsigned int offset = handle & ~PAGE_MASK; - if (!iova) + if (dev->dma_coherent || !iova) return; + page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); __dma_page_dev_to_cpu(page, offset, size, dir); } @@ -1979,12 +1513,13 @@ static void arm_iommu_sync_single_for_device(struct device *dev, { struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev); dma_addr_t iova = handle & PAGE_MASK; - struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); + struct page *page; unsigned int offset = handle & ~PAGE_MASK; - if (!iova) + if (dev->dma_coherent || !iova) return; + page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); __dma_page_cpu_to_dev(page, offset, size, dir); } @@ -2006,26 +1541,6 @@ static const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - - .dma_supported = arm_dma_supported, -}; - -static const struct dma_map_ops iommu_coherent_ops = { - .alloc = arm_coherent_iommu_alloc_attrs, - .free = arm_coherent_iommu_free_attrs, - .mmap = arm_coherent_iommu_mmap_attrs, - .get_sgtable = arm_iommu_get_sgtable, - - .map_page = arm_coherent_iommu_map_page, - .unmap_page = arm_coherent_iommu_unmap_page, - - .map_sg = arm_coherent_iommu_map_sg, - .unmap_sg = arm_coherent_iommu_unmap_sg, - - .map_resource = arm_iommu_map_resource, - .unmap_resource = arm_iommu_unmap_resource, - - .dma_supported = arm_dma_supported, }; /** @@ -2201,40 +1716,32 @@ void arm_iommu_detach_device(struct device *dev) iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); to_dma_iommu_mapping(dev) = NULL; - set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent)); + set_dma_ops(dev, NULL); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); -static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) -{ - return coherent ? &iommu_coherent_ops : &iommu_ops; -} - -static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu) +static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) { struct dma_iommu_mapping *mapping; - if (!iommu) - return false; - mapping = arm_iommu_create_mapping(dev->bus, dma_base, size); if (IS_ERR(mapping)) { pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n", size, dev_name(dev)); - return false; + return; } if (__arm_iommu_attach_device(dev, mapping)) { pr_warn("Failed to attached device %s to IOMMU_mapping\n", dev_name(dev)); arm_iommu_release_mapping(mapping); - return false; + return; } - return true; + set_dma_ops(dev, &iommu_ops); } static void arm_teardown_iommu_dma_ops(struct device *dev) @@ -2250,27 +1757,20 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) #else -static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu) +static void arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) { - return false; } static void arm_teardown_iommu_dma_ops(struct device *dev) { } -#define arm_get_iommu_dma_map_ops arm_get_dma_map_ops - #endif /* CONFIG_ARM_DMA_USE_IOMMU */ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - const struct dma_map_ops *dma_ops; - dev->archdata.dma_coherent = coherent; -#ifdef CONFIG_SWIOTLB dev->dma_coherent = coherent; -#endif /* * Don't override the dma_ops if they have already been set. Ideally @@ -2280,12 +1780,8 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, if (dev->dma_ops) return; - if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu)) - dma_ops = arm_get_iommu_dma_map_ops(coherent); - else - dma_ops = arm_get_dma_map_ops(coherent); - - set_dma_ops(dev, dma_ops); + if (iommu) + arm_setup_iommu_dma_ops(dev, dma_base, size, iommu, coherent); xen_setup_dma_ops(dev); dev->archdata.dma_ops_setup = true; @@ -2301,7 +1797,6 @@ void arch_teardown_dma_ops(struct device *dev) set_dma_ops(dev, NULL); } -#ifdef CONFIG_SWIOTLB void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { @@ -2329,4 +1824,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, { __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false); } -#endif /* CONFIG_SWIOTLB */ |