diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-02 11:05:00 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-12-02 11:05:00 -0800 |
commit | 1daa56bcfd8b329447e0c1b1e91c3925d08489b7 (patch) | |
tree | 1adc9bc2f832de5761348eb0808daeec6bd97adc /drivers/iommu/dma-iommu.c | |
parent | a5255bc31673c72e264d837cd13cd3085d72cb58 (diff) | |
parent | 9b3a713feef8db41d4bcccb3b97e86ee906690c8 (diff) |
Merge tag 'iommu-updates-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull iommu updates from Joerg Roedel:
- Conversion of the AMD IOMMU driver to use the dma-iommu code for
imlementing the DMA-API. This gets rid of quite some code in the
driver itself, but also has some potential for regressions (non are
known at the moment).
- Support for the Qualcomm SMMUv2 implementation in the SDM845 SoC.
This also includes some firmware interface changes, but those are
acked by the respective maintainers.
- Preparatory work to support two distinct page-tables per domain in
the ARM-SMMU driver
- Power management improvements for the ARM SMMUv2
- Custom PASID allocator support
- Multiple PCI DMA alias support for the AMD IOMMU driver
- Adaption of the Mediatek driver to the changed IO/TLB flush interface
of the IOMMU core code.
- Preparatory patches for the Renesas IOMMU driver to support future
hardware.
* tag 'iommu-updates-v5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (62 commits)
iommu/rockchip: Don't provoke WARN for harmless IRQs
iommu/vt-d: Turn off translations at shutdown
iommu/vt-d: Check VT-d RMRR region in BIOS is reported as reserved
iommu/arm-smmu: Remove duplicate error message
iommu/arm-smmu-v3: Don't display an error when IRQ lines are missing
iommu/ipmmu-vmsa: Add utlb_offset_base
iommu/ipmmu-vmsa: Add helper functions for "uTLB" registers
iommu/ipmmu-vmsa: Calculate context registers' offset instead of a macro
iommu/ipmmu-vmsa: Add helper functions for MMU "context" registers
iommu/ipmmu-vmsa: tidyup register definitions
iommu/ipmmu-vmsa: Remove all unused register definitions
iommu/mediatek: Reduce the tlb flush timeout value
iommu/mediatek: Get rid of the pgtlock
iommu/mediatek: Move the tlb_sync into tlb_flush
iommu/mediatek: Delete the leaf in the tlb_flush
iommu/mediatek: Use gather to achieve the tlb range flush
iommu/mediatek: Add a new tlb_lock for tlb_flush
iommu/mediatek: Correct the flush_iotlb_all callback
iommu/io-pgtable-arm: Rename IOMMU_QCOM_SYS_CACHE and improve doc
iommu/io-pgtable-arm: Rationalise MAIR handling
...
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r-- | drivers/iommu/dma-iommu.c | 43 |
1 files changed, 35 insertions, 8 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 646332fbf3d7..0cc702a70a96 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -22,6 +22,7 @@ #include <linux/pci.h> #include <linux/scatterlist.h> #include <linux/vmalloc.h> +#include <linux/crash_dump.h> struct iommu_dma_msi_page { struct list_head list; @@ -353,6 +354,21 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, return iova_reserve_iommu_regions(dev, domain); } +static int iommu_dma_deferred_attach(struct device *dev, + struct iommu_domain *domain) +{ + const struct iommu_ops *ops = domain->ops; + + if (!is_kdump_kernel()) + return 0; + + if (unlikely(ops->is_attach_deferred && + ops->is_attach_deferred(domain, dev))) + return iommu_attach_device(domain, dev); + + return 0; +} + /** * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API * page flags. @@ -461,7 +477,7 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr, } static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, - size_t size, int prot) + size_t size, int prot, dma_addr_t dma_mask) { struct iommu_domain *domain = iommu_get_dma_domain(dev); struct iommu_dma_cookie *cookie = domain->iova_cookie; @@ -469,13 +485,16 @@ static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, size_t iova_off = iova_offset(iovad, phys); dma_addr_t iova; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return DMA_MAPPING_ERROR; + size = iova_align(iovad, size + iova_off); - iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); + iova = iommu_dma_alloc_iova(domain, size, dma_mask, dev); if (!iova) return DMA_MAPPING_ERROR; - if (iommu_map(domain, iova, phys - iova_off, size, prot)) { + if (iommu_map_atomic(domain, iova, phys - iova_off, size, prot)) { iommu_dma_free_iova(cookie, iova, size); return DMA_MAPPING_ERROR; } @@ -578,6 +597,9 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, *dma_handle = DMA_MAPPING_ERROR; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return NULL; + min_size = alloc_sizes & -alloc_sizes; if (min_size < PAGE_SIZE) { min_size = PAGE_SIZE; @@ -610,7 +632,7 @@ static void *iommu_dma_alloc_remap(struct device *dev, size_t size, arch_dma_prep_coherent(sg_page(sg), sg->length); } - if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) + if (iommu_map_sg_atomic(domain, iova, sgt.sgl, sgt.orig_nents, ioprot) < size) goto out_free_sg; @@ -710,7 +732,7 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, int prot = dma_info_to_prot(dir, coherent, attrs); dma_addr_t dma_handle; - dma_handle =__iommu_dma_map(dev, phys, size, prot); + dma_handle = __iommu_dma_map(dev, phys, size, prot, dma_get_mask(dev)); if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && dma_handle != DMA_MAPPING_ERROR) arch_sync_dma_for_device(phys, size, dir); @@ -820,6 +842,9 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, unsigned long mask = dma_get_seg_boundary(dev); int i; + if (unlikely(iommu_dma_deferred_attach(dev, domain))) + return 0; + if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) iommu_dma_sync_sg_for_device(dev, sg, nents, dir); @@ -870,7 +895,7 @@ static int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, * We'll leave any physical concatenation to the IOMMU driver's * implementation - it knows better than we do. */ - if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) + if (iommu_map_sg_atomic(domain, iova, sg, nents, prot) < iova_len) goto out_free_iova; return __finalise_sg(dev, sg, nents, iova); @@ -910,7 +935,8 @@ static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, size_t size, enum dma_data_direction dir, unsigned long attrs) { return __iommu_dma_map(dev, phys, size, - dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO); + dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, + dma_get_mask(dev)); } static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, @@ -1016,7 +1042,8 @@ static void *iommu_dma_alloc(struct device *dev, size_t size, if (!cpu_addr) return NULL; - *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot); + *handle = __iommu_dma_map(dev, page_to_phys(page), size, ioprot, + dev->coherent_dma_mask); if (*handle == DMA_MAPPING_ERROR) { __iommu_dma_free(dev, size, cpu_addr); return NULL; |