diff options
author | Christoph Hellwig <hch@lst.de> | 2021-06-23 14:21:16 +0200 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2021-08-18 16:24:09 +0200 |
commit | faf4ef823ac5f3b6a34a73b76c52895dee3dce55 (patch) | |
tree | 81396c9f70e4d8b35b9be650a427c86ff1dc3433 /kernel/dma/direct.c | |
parent | 2a047e0662aee1bd773e0415accd785ad26a9398 (diff) |
dma-direct: add support for dma_coherent_default_memory
Add an option to allocate uncached memory for dma_alloc_coherent from
the global dma_coherent_default_memory. This will allow to move
arm-nommu (and eventually other platforms) to use generic code for
allocating uncached memory from a pre-populated pool.
Note that this is a different pool from the one that platforms that
can remap at runtime use for GFP_ATOMIC allocations for now, although
there might be opportunities to eventually end up with a common codebase
for the two use cases.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Dillon Min <dillon.minfei@gmail.com>
Diffstat (limited to 'kernel/dma/direct.c')
-rw-r--r-- | kernel/dma/direct.c | 15 |
1 files changed, 15 insertions, 0 deletions
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index f33ceb68aef2..8dca4f97d12d 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -156,9 +156,14 @@ void *dma_direct_alloc(struct device *dev, size_t size, if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && !dev_is_dma_coherent(dev)) return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !dev_is_dma_coherent(dev)) + return dma_alloc_from_global_coherent(dev, size, dma_handle); + /* * Remapping or decrypting memory may block. If either is required and * we can't block, allocate the memory from the atomic pools. @@ -255,11 +260,19 @@ void dma_direct_free(struct device *dev, size_t size, if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && + !IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && !dev_is_dma_coherent(dev)) { arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); return; } + if (IS_ENABLED(CONFIG_DMA_GLOBAL_POOL) && + !dev_is_dma_coherent(dev)) { + if (!dma_release_from_global_coherent(page_order, cpu_addr)) + WARN_ON_ONCE(1); + return; + } + /* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */ if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL) && dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size))) @@ -462,6 +475,8 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; + if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) + return ret; if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff) return -ENXIO; |