diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-23 11:13:56 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-23 11:13:56 -0800 |
commit | 20c759ca98468d96d1fff8bd5e6753f458dbbfbd (patch) | |
tree | 202bb6951fcb6857128d96154bcde1bddbb22316 /arch | |
parent | b82dde0230439215b55e545880e90337ee16f51a (diff) | |
parent | 114bf37e04d839b555b3dc460b5e6ce156f49cf0 (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge small final update from Andrew Morton:
- DAX feature work: add fsync/msync support
- kfree cleanup, MAINTAINERS update
* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
MAINTAINERS: return arch/sh to maintained state, with new maintainers
tree wide: use kvfree() than conditional kfree()/vfree()
dax: never rely on bh.b_dev being set by get_block()
xfs: call dax_pfn_mkwrite() for DAX fsync/msync
ext4: call dax_pfn_mkwrite() for DAX fsync/msync
ext2: call dax_pfn_mkwrite() for DAX fsync/msync
dax: add support for fsync/sync
mm: add find_get_entries_tag()
dax: support dirty DAX entries in radix tree
pmem: add wb_cache_pmem() to the PMEM API
dax: fix conversion of holes to PMDs
dax: fix NULL pointer dereference in __dax_dbg()
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 11 | ||||
-rw-r--r-- | arch/x86/include/asm/pmem.h | 11 |
2 files changed, 8 insertions, 14 deletions
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 534a60ae282e..0eca3812527e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1200,10 +1200,7 @@ error: while (i--) if (pages[i]) __free_pages(pages[i], 0); - if (array_size <= PAGE_SIZE) - kfree(pages); - else - vfree(pages); + kvfree(pages); return NULL; } @@ -1211,7 +1208,6 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size, struct dma_attrs *attrs) { int count = size >> PAGE_SHIFT; - int array_size = count * sizeof(struct page *); int i; if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { @@ -1222,10 +1218,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, __free_pages(pages[i], 0); } - if (array_size <= PAGE_SIZE) - kfree(pages); - else - vfree(pages); + kvfree(pages); return 0; } diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h index 1544fabcd7f9..c57fd1ea9689 100644 --- a/arch/x86/include/asm/pmem.h +++ b/arch/x86/include/asm/pmem.h @@ -67,18 +67,19 @@ static inline void arch_wmb_pmem(void) } /** - * __arch_wb_cache_pmem - write back a cache range with CLWB + * arch_wb_cache_pmem - write back a cache range with CLWB * @vaddr: virtual start address * @size: number of bytes to write back * * Write back a cache range using the CLWB (cache line write back) * instruction. This function requires explicit ordering with an - * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation. + * arch_wmb_pmem() call. */ -static inline void __arch_wb_cache_pmem(void *vaddr, size_t size) +static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size) { u16 x86_clflush_size = boot_cpu_data.x86_clflush_size; unsigned long clflush_mask = x86_clflush_size - 1; + void *vaddr = (void __force *)addr; void *vend = vaddr + size; void *p; @@ -115,7 +116,7 @@ static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes, len = copy_from_iter_nocache(vaddr, bytes, i); if (__iter_needs_pmem_wb(i)) - __arch_wb_cache_pmem(vaddr, bytes); + arch_wb_cache_pmem(addr, bytes); return len; } @@ -133,7 +134,7 @@ static inline void arch_clear_pmem(void __pmem *addr, size_t size) void *vaddr = (void __force *)addr; memset(vaddr, 0, size); - __arch_wb_cache_pmem(vaddr, size); + arch_wb_cache_pmem(addr, size); } static inline bool __arch_has_wmb_pmem(void) |