diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-20 12:09:52 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-20 12:09:52 -0700 |
commit | ac60602a6d8f6830dee89f4b87ee005f62eb7171 (patch) | |
tree | ea8810e0d7abc82755c8db00904015ecbf99a8b4 /include | |
parent | c6dd78fcb8eefa15dd861889e0f59d301cb5230c (diff) | |
parent | 449fa54d6815be8c2c1f68fa9dbbae9384a7c03e (diff) |
Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping
Pull dma-mapping fixes from Christoph Hellwig:
"Fix various regressions:
- force unencrypted dma-coherent buffers if encryption bit can't fit
into the dma coherent mask (Tom Lendacky)
- avoid limiting request size if swiotlb is not used (me)
- fix swiotlb handling in dma_direct_sync_sg_for_cpu/device (Fugang
Duan)"
* tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping:
dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device
dma-direct: only limit the mapping size if swiotlb could be used
dma-mapping: add a dma_addressing_limited helper
dma-direct: Force unencrypted DMA under SME for certain DMA masks
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/dma-direct.h | 9 | ||||
-rw-r--r-- | include/linux/dma-mapping.h | 14 |
2 files changed, 23 insertions, 0 deletions
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index b7338702592a..adf993a3bd58 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) } #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ +#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED +bool force_dma_unencrypted(struct device *dev); +#else +static inline bool force_dma_unencrypted(struct device *dev) +{ + return false; +} +#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */ + /* * If memory encryption is supported, phys_to_dma will set the memory encryption * bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 8d13e28a8e07..e11b115dd0e4 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) return dma_set_mask_and_coherent(dev, mask); } +/** + * dma_addressing_limited - return if the device is addressing limited + * @dev: device to check + * + * Return %true if the devices DMA mask is too small to address all memory in + * the system, else %false. Lack of addressing bits is the prime reason for + * bounce buffering, but might not be the only one. + */ +static inline bool dma_addressing_limited(struct device *dev) +{ + return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) < + dma_get_required_mask(dev); +} + #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent); |