diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:26:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-10-09 22:26:14 -0400 |
commit | 0cf744bc7ae8e0072159a901f6e1a159bbc30ffa (patch) | |
tree | fc8222a3a5af4f42226070c3f76462cfcf0b4e50 /arch | |
parent | b528392669415dc1e53a047215e5ad6c2de879fc (diff) | |
parent | 7f8998c7aef3ac9c5f3f2943e083dfa6302e90d0 (diff) |
Merge branch 'akpm' (fixes from Andrew Morton)
Merge patch-bomb from Andrew Morton:
- part of OCFS2 (review is laggy again)
- procfs
- slab
- all of MM
- zram, zbud
- various other random things: arch, filesystems.
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (164 commits)
nosave: consolidate __nosave_{begin,end} in <asm/sections.h>
include/linux/screen_info.h: remove unused ORIG_* macros
kernel/sys.c: compat sysinfo syscall: fix undefined behavior
kernel/sys.c: whitespace fixes
acct: eliminate compile warning
kernel/async.c: switch to pr_foo()
include/linux/blkdev.h: use NULL instead of zero
include/linux/kernel.h: deduplicate code implementing clamp* macros
include/linux/kernel.h: rewrite min3, max3 and clamp using min and max
alpha: use Kbuild logic to include <asm-generic/sections.h>
frv: remove deprecated IRQF_DISABLED
frv: remove unused cpuinfo_frv and friends to fix future build error
zbud: avoid accessing last unused freelist
zsmalloc: simplify init_zspage free obj linking
mm/zsmalloc.c: correct comment for fullness group computation
zram: use notify_free to account all free notifications
zram: report maximum used memory
zram: zram memory size limitation
zsmalloc: change return value unit of zs_get_total_size_bytes
zsmalloc: move pages_allocated to zs_pool
...
Diffstat (limited to 'arch')
46 files changed, 401 insertions, 322 deletions
diff --git a/arch/alpha/include/asm/Kbuild b/arch/alpha/include/asm/Kbuild index a52cbf178c3a..25b49725df07 100644 --- a/arch/alpha/include/asm/Kbuild +++ b/arch/alpha/include/asm/Kbuild @@ -8,4 +8,5 @@ generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/alpha/include/asm/sections.h b/arch/alpha/include/asm/sections.h deleted file mode 100644 index 43b40edd6e44..000000000000 --- a/arch/alpha/include/asm/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _ALPHA_SECTIONS_H -#define _ALPHA_SECTIONS_H - -/* nothing to see, move along */ -#include <asm-generic/sections.h> - -#endif diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d9d32de9628c..18f392f8b744 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -14,6 +14,7 @@ config ARM select CLONE_BACKWARDS select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS if HAVE_EFFICIENT_UNALIGNED_ACCESS + select GENERIC_ALLOCATOR select GENERIC_ATOMIC64 if (CPU_V7M || CPU_V6 || !CPU_32v6K || !AEABI) select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_IDLE_POLL_SETUP @@ -61,6 +62,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 @@ -1659,6 +1661,10 @@ config ARCH_SELECT_MEMORY_MODEL config HAVE_ARCH_PFN_VALID def_bool ARCH_HAS_HOLES_MEMORYMODEL || !SPARSEMEM +config HAVE_GENERIC_RCU_GUP + def_bool y + depends on ARM_LPAE + config HIGHMEM bool "High Memory Support" depends on MMU diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 219ac88a9542..f0279411847d 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h @@ -182,6 +182,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_addr_end(addr,end) (end) #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) +#define pte_special(pte) (0) +static inline pte_t pte_mkspecial(pte_t pte) { return pte; } /* * We don't have huge page support for short descriptors, for the moment diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 06e0bc0f8b00..a31ecdad4b59 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -213,10 +213,19 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #define pmd_isclear(pmd, val) (!(pmd_val(pmd) & (val))) #define pmd_young(pmd) (pmd_isset((pmd), PMD_SECT_AF)) +#define pte_special(pte) (pte_isset((pte), L_PTE_SPECIAL)) +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= L_PTE_SPECIAL; + return pte; +} +#define __HAVE_ARCH_PTE_SPECIAL #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) +#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) +#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) @@ -224,6 +233,12 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !pmd_table(pmd)) #define pmd_trans_splitting(pmd) (pmd_isset((pmd), L_PMD_SECT_SPLITTING)) + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif #endif #define PMD_BIT_FUNC(fn,op) \ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 01baef07cd0c..90aa4583b308 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -226,7 +226,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_dirty(pte) (pte_isset((pte), L_PTE_DIRTY)) #define pte_young(pte) (pte_isset((pte), L_PTE_YOUNG)) #define pte_exec(pte) (pte_isclear((pte), L_PTE_XN)) -#define pte_special(pte) (0) #define pte_valid_user(pte) \ (pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte)) @@ -245,7 +244,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, unsigned long ext = 0; if (addr < TASK_SIZE && pte_valid_user(pteval)) { - __sync_icache_dcache(pteval); + if (!pte_special(pteval)) + __sync_icache_dcache(pteval); ext |= PTE_EXT_NG; } @@ -264,8 +264,6 @@ PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); PTE_BIT_FUNC(mknexec, |= L_PTE_XN); -static inline pte_t pte_mkspecial(pte_t pte) { return pte; } - static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f1a0dace3efe..3cadb726ec88 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -35,12 +35,39 @@ #define MMU_GATHER_BUNDLE 8 +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} + +struct mmu_table_batch { + struct rcu_head rcu; + unsigned int nr; + void *tables[0]; +}; + +#define MAX_TABLE_BATCH \ + ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) + +extern void tlb_table_flush(struct mmu_gather *tlb); +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * TLB handling. This allows us to remove pages from the page * tables, and efficiently handle the TLB issues. */ struct mmu_gather { struct mm_struct *mm; +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + struct mmu_table_batch *batch; + unsigned int need_flush; +#endif unsigned int fullmm; struct vm_area_struct *vma; unsigned long start, end; @@ -101,6 +128,9 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { tlb_flush(tlb); +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb_table_flush(tlb); +#endif } static inline void tlb_flush_mmu_free(struct mmu_gather *tlb) @@ -129,6 +159,10 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start tlb->pages = tlb->local; tlb->nr = 0; __tlb_alloc_page(tlb); + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + tlb->batch = NULL; +#endif } static inline void @@ -205,7 +239,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb_add_flush(tlb, addr + SZ_1M); #endif - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, @@ -213,7 +247,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, { #ifdef CONFIG_ARM_LPAE tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); #endif } diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index bb8b79648643..c4cc50e58c13 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -21,8 +21,7 @@ #include <asm/idmap.h> #include <asm/suspend.h> #include <asm/memory.h> - -extern const void __nosave_begin, __nosave_end; +#include <asm/sections.h> int pfn_is_nosave(unsigned long pfn) { diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7a996aaa061e..c245d903927f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -12,6 +12,7 @@ #include <linux/bootmem.h> #include <linux/module.h> #include <linux/mm.h> +#include <linux/genalloc.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/list.h> @@ -298,57 +299,29 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - struct vm_struct *area; - unsigned long addr; - /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(page_to_pfn(page)); - - if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { - vunmap((void *)addr); - return NULL; - } - return (void *)addr; + return dma_common_contiguous_remap(page, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, + prot, caller); } static void __dma_free_remap(void *cpu_addr, size_t size) { - unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; - struct vm_struct *area = find_vm_area(cpu_addr); - if (!area || (area->flags & flags) != flags) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); - return; - } - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static struct gen_pool *atomic_pool; -struct dma_pool { - size_t size; - spinlock_t lock; - unsigned long *bitmap; - unsigned long nr_pages; - void *vaddr; - struct page **pages; -}; - -static struct dma_pool atomic_pool = { - .size = DEFAULT_DMA_COHERENT_POOL_SIZE, -}; +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; static int __init early_coherent_pool(char *p) { - atomic_pool.size = memparse(p, &p); + atomic_pool_size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); @@ -358,14 +331,14 @@ void __init init_dma_coherent_pool_size(unsigned long size) /* * Catch any attempt to set the pool size too late. */ - BUG_ON(atomic_pool.vaddr); + BUG_ON(atomic_pool); /* * Set architecture specific coherent pool size only if * it has not been changed by kernel command line parameter. */ - if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) - atomic_pool.size = size; + if (atomic_pool_size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool_size = size; } /* @@ -373,52 +346,44 @@ void __init init_dma_coherent_pool_size(unsigned long size) */ static int __init atomic_pool_init(void) { - struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); gfp_t gfp = GFP_KERNEL | GFP_DMA; - unsigned long nr_pages = pool->size >> PAGE_SHIFT; - unsigned long *bitmap; struct page *page; - struct page **pages; void *ptr; - int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); - bitmap = kzalloc(bitmap_size, GFP_KERNEL); - if (!bitmap) - goto no_bitmap; - - pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); - if (!pages) - goto no_pages; + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto out; if (dev_get_cma_area(NULL)) - ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, - atomic_pool_init); + ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot, + &page, atomic_pool_init); else - ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, - atomic_pool_init); + ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot, + &page, atomic_pool_init); if (ptr) { - int i; - - for (i = 0; i < nr_pages; i++) - pages[i] = page + i; - - spin_lock_init(&pool->lock); - pool->vaddr = ptr; - pool->pages = pages; - pool->bitmap = bitmap; - pool->nr_pages = nr_pages; - pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", - (unsigned)pool->size / 1024); + int ret; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto destroy_genpool; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + pr_info("DMA: preallocated %zd KiB pool for atomic coherent allocations\n", + atomic_pool_size / 1024); return 0; } - kfree(pages); -no_pages: - kfree(bitmap); -no_bitmap: - pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", - (unsigned)pool->size / 1024); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +out: + pr_err("DMA: failed to allocate %zx KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); return -ENOMEM; } /* @@ -522,76 +487,36 @@ static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, static void *__alloc_from_pool(size_t size, struct page **ret_page) { - struct dma_pool *pool = &atomic_pool; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - unsigned int pageno; - unsigned long flags; + unsigned long val; void *ptr = NULL; - unsigned long align_mask; - if (!pool->vaddr) { + if (!atomic_pool) { WARN(1, "coherent pool not initialised!\n"); return NULL; } - /* - * Align the region allocation - allocations from pool are rather - * small, so align them to their order in pages, minimum is a page - * size. This helps reduce fragmentation of the DMA space. - */ - align_mask = (1 << get_order(size)) - 1; - - spin_lock_irqsave(&pool->lock, flags); - pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, align_mask); - if (pageno < pool->nr_pages) { - bitmap_set(pool->bitmap, pageno, count); - ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->pages[pageno]; - } else { - pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" - "Please increase it with coherent_pool= kernel parameter!\n", - (unsigned)pool->size / 1024); + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; } - spin_unlock_irqrestore(&pool->lock, flags); return ptr; } static bool __in_atomic_pool(void *start, size_t size) { - struct dma_pool *pool = &atomic_pool; - void *end = start + size; - void *pool_start = pool->vaddr; - void *pool_end = pool->vaddr + pool->size; - - if (start < pool_start || start >= pool_end) - return false; - - if (end <= pool_end) - return true; - - WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", - start, end - 1, pool_start, pool_end - 1); - - return false; + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); } static int __free_from_pool(void *start, size_t size) { - struct dma_pool *pool = &atomic_pool; - unsigned long pageno, count; - unsigned long flags; - if (!__in_atomic_pool(start, size)) return 0; - pageno = (start - pool->vaddr) >> PAGE_SHIFT; - count = size >> PAGE_SHIFT; - - spin_lock_irqsave(&pool->lock, flags); - bitmap_clear(pool->bitmap, pageno, count); - spin_unlock_irqrestore(&pool->lock, flags); + gen_pool_free(atomic_pool, (unsigned long)start, size); return 1; } @@ -1271,29 +1196,8 @@ static void * __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct vm_struct *area; - unsigned long p; - - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - - area->pages = pages; - area->nr_pages = nr_pages; - p = (unsigned long)area->addr; - - for (i = 0; i < nr_pages; i++) { - phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); - if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) - goto err; - p += PAGE_SIZE; - } - return area->addr; -err: - unmap_kernel_range((unsigned long)area->addr, size); - vunmap(area->addr); + return dma_common_pages_remap(pages, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); return NULL; } @@ -1355,11 +1259,13 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si static struct page **__atomic_get_pages(void *addr) { - struct dma_pool *pool = &atomic_pool; - struct page **pages = pool->pages; - int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + struct page *page; + phys_addr_t phys; + + phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr); + page = phys_to_page(phys); - return pages + offs; + return (struct page **)page; } static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) @@ -1501,8 +1407,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } __iommu_remove_mapping(dev, handle, size); diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 43d54f5b26b9..265b836b3bd1 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -400,3 +400,18 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l */ __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); } + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* dummy IPI to serialise against fast_gup */ + kick_all_cpus_sync(); +} +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9221645dd192..92bba32d9230 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -322,7 +322,7 @@ void __init arm_memblock_init(const struct machine_desc *mdesc) * reserve memory for DMA contigouos allocations, * must come from DMA area inside low memory */ - dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit)); + dma_contiguous_reserve(arm_dma_limit); arm_memblock_steal_permitted = false; memblock_dump_all(); diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3f0e854d0ff4..c49ca4c738bb 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -18,6 +18,7 @@ config ARM64 select COMMON_CLK select CPU_PM if (SUSPEND || CPU_IDLE) select DCACHE_WORD_ACCESS + select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST if SMP select GENERIC_CPU_AUTOPROBE @@ -56,6 +57,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE select HAVE_SYSCALL_TRACEPOINTS select IRQ_DOMAIN select MODULES_USE_ELF_RELA @@ -109,6 +111,9 @@ config GENERIC_CALIBRATE_DELAY config ZONE_DMA def_bool y +config HAVE_GENERIC_RCU_GUP + def_bool y + config ARCH_DMA_ADDR_T_64BIT def_bool y diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 77dbe1e6398d..cefd3e825612 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -244,6 +244,16 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define __HAVE_ARCH_PTE_SPECIAL +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pmd_t pud_pmd(pud_t pud) +{ + return __pmd(pud_val(pud)); +} + static inline pte_t pmd_pte(pmd_t pmd) { return __pte(pmd_val(pmd)); @@ -261,7 +271,13 @@ static inline pmd_t pte_pmd(pte_t pte) #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) #define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd)) -#endif +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +struct vm_area_struct; +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp); +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #define pmd_young(pmd) pte_young(pmd_pte(pmd)) #define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd))) @@ -282,6 +298,7 @@ static inline pmd_t pte_pmd(pte_t pte) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK)) +#define pud_write(pud) pte_write(pud_pte(pud)) #define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT) #define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)) @@ -383,6 +400,8 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); } +#define pud_page(pud) pmd_page(pud_pmd(pud)) + #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ #if CONFIG_ARM64_PGTABLE_LEVELS > 3 diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 62731ef9749a..a82c0c5c8b52 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -23,6 +23,20 @@ #include <asm-generic/tlb.h> +#include <linux/pagemap.h> +#include <linux/swap.h> + +#ifdef CONFIG_HAVE_RCU_TABLE_FREE + +#define tlb_remove_entry(tlb, entry) tlb_remove_table(tlb, entry) +static inline void __tlb_remove_table(void *_table) +{ + free_page_and_swap_cache((struct page *)_table); +} +#else +#define tlb_remove_entry(tlb, entry) tlb_remove_page(tlb, entry) +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ + /* * There's three ways the TLB shootdown code is used: * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). @@ -88,7 +102,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, pte); + tlb_remove_entry(tlb, pte); } #if CONFIG_ARM64_PGTABLE_LEVELS > 2 @@ -96,7 +110,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pmdp)); + tlb_remove_entry(tlb, virt_to_page(pmdp)); } #endif @@ -105,7 +119,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, unsigned long addr) { tlb_add_flush(tlb, addr); - tlb_remove_page(tlb, virt_to_page(pudp)); + tlb_remove_entry(tlb, virt_to_page(pudp)); } #endif diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 2c71077cacfd..d92094203913 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -20,6 +20,7 @@ #include <linux/gfp.h> #include <linux/export.h> #include <linux/slab.h> +#include <linux/genalloc.h> #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/vmalloc.h> @@ -38,6 +39,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot, return prot; } +static struct gen_pool *atomic_pool; + +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K +static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE; + +static int __init early_coherent_pool(char *p) +{ + atomic_pool_size = memparse(p, &p); + return 0; +} +early_param("coherent_pool", early_coherent_pool); + +static void *__alloc_from_pool(size_t size, struct page **ret_page) +{ + unsigned long val; + void *ptr = NULL; + + if (!atomic_pool) { + WARN(1, "coherent pool not initialised!\n"); + return NULL; + } + + val = gen_pool_alloc(atomic_pool, size); + if (val) { + phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val); + + *ret_page = phys_to_page(phys); + ptr = (void *)val; + } + + return ptr; +} + +static bool __in_atomic_pool(void *start, size_t size) +{ + return addr_in_gen_pool(atomic_pool, (unsigned long)start, size); +} + +static int __free_from_pool(void *start, size_t size) +{ + if (!__in_atomic_pool(start, size)) + return 0; + + gen_pool_free(atomic_pool, (unsigned long)start, size); + + return 1; +} + static void *__dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) @@ -50,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size, if (IS_ENABLED(CONFIG_ZONE_DMA) && dev->coherent_dma_mask <= DMA_BIT_MASK(32)) flags |= GFP_DMA; - if (IS_ENABLED(CONFIG_DMA_CMA)) { + if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) { struct page *page; size = PAGE_ALIGN(size); @@ -70,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs) { + bool freed; + phys_addr_t paddr = dma_to_phys(dev, dma_handle); + if (dev == NULL) { WARN_ONCE(1, "Use an actual device structure for DMA allocation\n"); return; } - if (IS_ENABLED(CONFIG_DMA_CMA)) { - phys_addr_t paddr = dma_to_phys(dev, dma_handle); - - dma_release_from_contiguous(dev, + freed = dma_release_from_contiguous(dev, phys_to_page(paddr), size >> PAGE_SHIFT); - } else { + if (!freed) swiotlb_free_coherent(dev, size, vaddr, dma_handle); - } } static void *__dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags, struct dma_attrs *attrs) { - struct page *page, **map; + struct page *page; void *ptr, *coherent_ptr; - int order, i; size = PAGE_ALIGN(size); - order = get_order(size); + + if (!(flags & __GFP_WAIT)) { + struct page *page = NULL; + void *addr = __alloc_from_pool(size, &page); + + if (addr) + *dma_handle = phys_to_dma(dev, page_to_phys(page)); + + return addr; + + } ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs); if (!ptr) goto no_mem; - map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA); - if (!map) - goto no_map; /* remove any dirty cache lines on the kernel alias */ __dma_flush_range(ptr, ptr + size); /* create a coherent mapping */ page = virt_to_page(ptr); - for (i = 0; i < (size >> PAGE_SHIFT); i++) - map[i] = page + i; - coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP, - __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false)); - kfree(map); + coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP, + __get_dma_pgprot(attrs, + __pgprot(PROT_NORMAL_NC), false), + NULL); if (!coherent_ptr) goto no_map; @@ -132,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size, { void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle)); + if (__free_from_pool(vaddr, size)) + return; vunmap(vaddr); __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs); } @@ -307,6 +362,67 @@ EXPORT_SYMBOL(coherent_swiotlb_dma_ops); extern int swiotlb_late_init_with_default_size(size_t default_size); +static int __init atomic_pool_init(void) +{ + pgprot_t prot = __pgprot(PROT_NORMAL_NC); + unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; + struct page *page; + void *addr; + unsigned int pool_size_order = get_order(atomic_pool_size); + + if (dev_get_cma_area(NULL)) + page = dma_alloc_from_contiguous(NULL, nr_pages, + pool_size_order); + else + page = alloc_pages(GFP_DMA, pool_size_order); + + if (page) { + int ret; + void *page_addr = page_address(page); + + memset(page_addr, 0, atomic_pool_size); + __dma_flush_range(page_addr, page_addr + atomic_pool_size); + + atomic_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!atomic_pool) + goto free_page; + + addr = dma_common_contiguous_remap(page, atomic_pool_size, + VM_USERMAP, prot, atomic_pool_init); + + if (!addr) + goto destroy_genpool; + + ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr, + page_to_phys(page), + atomic_pool_size, -1); + if (ret) + goto remove_mapping; + + gen_pool_set_algo(atomic_pool, + gen_pool_first_fit_order_align, + (void *)PAGE_SHIFT); + + pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n", + atomic_pool_size / 1024); + return 0; + } + goto out; + +remove_mapping: + dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP); +destroy_genpool: + gen_pool_destroy(atomic_pool); + atomic_pool = NULL; +free_page: + if (!dma_release_from_contiguous(NULL, page, nr_pages)) + __free_pages(page, pool_size_order); +out: + pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n", + atomic_pool_size / 1024); + return -ENOMEM; +} + static int __init swiotlb_late_init(void) { size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); @@ -315,7 +431,17 @@ static int __init swiotlb_late_init(void) return swiotlb_late_init_with_default_size(swiotlb_size); } -arch_initcall(swiotlb_late_init); + +static int __init arm64_dma_init(void) +{ + int ret = 0; + + ret |= swiotlb_late_init(); + ret |= atomic_pool_init(); + + return ret; +} +arch_initcall(arm64_dma_init); #define PREALLOC_DMA_DEBUG_ENTRIES 4096 diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c index 0d64089d28b5..b6f14e8d2121 100644 --- a/arch/arm64/mm/flush.c +++ b/arch/arm64/mm/flush.c @@ -104,3 +104,19 @@ EXPORT_SYMBOL(flush_dcache_page); */ EXPORT_SYMBOL(flush_cache_all); EXPORT_SYMBOL(flush_icache_range); + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#ifdef CONFIG_HAVE_RCU_TABLE_FREE +void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, + pmd_t *pmdp) +{ + pmd_t pmd = pmd_mksplitting(*pmdp); + + VM_BUG_ON(address & ~PMD_MASK); + set_pmd_at(vma->vm_mm, address, pmdp, pmd); + + /* dummy IPI to serialise against fast_gup */ + kick_all_cpus_sync(); +} +#endif /* CONFIG_HAVE_RCU_TABLE_FREE */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 802b94c4ca86..2ca489eaadd3 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += mcs_spinlock.h generic-y += module.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h generic-y += vga.h generic-y += xor.h diff --git a/arch/cris/include/asm/sections.h b/arch/cris/include/asm/sections.h deleted file mode 100644 index 2c998ce8967b..000000000000 --- a/arch/cris/include/asm/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _CRIS_SECTIONS_H -#define _CRIS_SECTIONS_H - -/* nothing to see, move along */ -#include <asm-generic/sections.h> - -#endif diff --git a/arch/frv/include/asm/processor.h b/arch/frv/include/asm/processor.h index 6554e78893f2..ae8d423e79d9 100644 --- a/arch/frv/include/asm/processor.h +++ b/arch/frv/include/asm/processor.h @@ -35,22 +35,6 @@ struct task_struct; /* - * CPU type and hardware bug flags. Kept separately for each CPU. - */ -struct cpuinfo_frv { -#ifdef CONFIG_MMU - unsigned long *pgd_quick; - unsigned long *pte_quick; - unsigned long pgtable_cache_sz; -#endif -} __cacheline_aligned; - -extern struct cpuinfo_frv __nongprelbss boot_cpu_data; - -#define cpu_data (&boot_cpu_data) -#define current_cpu_data boot_cpu_data - -/* * Bus types */ #define EISA_bus 0 diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c index 2cc327a1ca44..091b2839be90 100644 --- a/arch/frv/kernel/irq-mb93091.c +++ b/arch/frv/kernel/irq-mb93091.c @@ -107,25 +107,25 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask) static struct irqaction fpga_irq[4] = { [0] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.0", .dev_id = (void *) 0x0028UL, }, [1] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.1", .dev_id = (void *) 0x0050UL, }, [2] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.2", .dev_id = (void *) 0x1c00UL, }, [3] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga.3", .dev_id = (void *) 0x6386UL, } diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c index 95e4eb4f1f38..1f3015cf80f5 100644 --- a/arch/frv/kernel/irq-mb93093.c +++ b/arch/frv/kernel/irq-mb93093.c @@ -105,7 +105,6 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask) static struct irqaction fpga_irq[1] = { [0] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED, .name = "fpga.0", .dev_id = (void *) 0x0700UL, } diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c index ba648da0932d..8ca5aa4ff595 100644 --- a/arch/frv/kernel/irq-mb93493.c +++ b/arch/frv/kernel/irq-mb93493.c @@ -118,13 +118,13 @@ static irqreturn_t mb93493_interrupt(int irq, void *_piqsr) static struct irqaction mb93493_irq[2] = { [0] = { .handler = mb93493_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "mb93493.0", .dev_id = (void *) __addr_MB93493_IQSR(0), }, [1] = { .handler = mb93493_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "mb93493.1", .dev_id = (void *) __addr_MB93493_IQSR(1), } diff --git a/arch/frv/kernel/setup.c b/arch/frv/kernel/setup.c index 9f3a7a62d787..9f4a9a607dbe 100644 --- a/arch/frv/kernel/setup.c +++ b/arch/frv/kernel/setup.c @@ -104,8 +104,6 @@ unsigned long __nongprelbss dma_coherent_mem_end; unsigned long __initdata __sdram_old_base; unsigned long __initdata num_mappedpages; -struct cpuinfo_frv __nongprelbss boot_cpu_data; - char __initdata command_line[COMMAND_LINE_SIZE]; char __initdata redboot_command_line[COMMAND_LINE_SIZE]; diff --git a/arch/frv/kernel/time.c b/arch/frv/kernel/time.c index b457de496b70..332e00bf9d06 100644 --- a/arch/frv/kernel/time.c +++ b/arch/frv/kernel/time.c @@ -44,7 +44,6 @@ static irqreturn_t timer_interrupt(int irq, void *dummy); static struct irqaction timer_irq = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "timer", }; diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index e02448b0648b..3796801d6e0c 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -8,4 +8,5 @@ generic-y += mcs_spinlock.h generic-y += module.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/m32r/include/asm/sections.h b/arch/m32r/include/asm/sections.h deleted file mode 100644 index 5e5d21c4908a..000000000000 --- a/arch/m32r/include/asm/sections.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef _M32R_SECTIONS_H -#define _M32R_SECTIONS_H - -/* nothing to see, move along */ -#include <asm-generic/sections.h> - -#endif /* _M32R_SECTIONS_H */ diff --git a/arch/m32r/kernel/time.c b/arch/m32r/kernel/time.c index 1a15f81ea1bd..093f2761aa51 100644 --- a/arch/m32r/kernel/time.c +++ b/arch/m32r/kernel/time.c @@ -134,7 +134,6 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id) static struct irqaction irq0 = { .handler = timer_interrupt, - .flags = IRQF_DISABLED, .name = "MFT2", }; diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c index 3a480b3df0d6..9aa01adb407f 100644 --- a/arch/m68k/kernel/sys_m68k.c +++ b/arch/m68k/kernel/sys_m68k.c @@ -376,7 +376,6 @@ cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len) asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { - struct vm_area_struct *vma; int ret = -EINVAL; if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL || @@ -389,17 +388,21 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) if (!capable(CAP_SYS_ADMIN)) goto out; } else { + struct vm_area_struct *vma; + + /* Check for overflow. */ + if (addr + len < addr) + goto out; + /* * Verify that the specified address region actually belongs * to this process. */ - vma = find_vma (current->mm, addr); ret = -EINVAL; - /* Check for overflow. */ - if (addr + len < addr) - goto out; - if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) - goto out; + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm, addr); + if (!vma || addr < vma->vm_start || addr + len > vma->vm_end) + goto out_unlock; } if (CPU_IS_020_OR_030) { @@ -429,7 +432,7 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) __asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr)); } ret = 0; - goto out; + goto out_unlock; } else { /* * 040 or 060: don't blindly trust 'scope', someone could @@ -446,6 +449,8 @@ sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) ret = cache_flush_060 (addr, scope, cache, len); } } +out_unlock: + up_read(¤t->mm->mmap_sem); out: return ret; } diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h deleted file mode 100644 index 3adac3b53d19..000000000000 --- a/arch/mips/include/asm/suspend.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SUSPEND_H -#define __ASM_SUSPEND_H - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - -#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c index 521e5963df05..2129e67723ff 100644 --- a/arch/mips/power/cpu.c +++ b/arch/mips/power/cpu.c @@ -7,7 +7,7 @@ * Author: Hu Hongbing <huhb@lemote.com> * Wu Zhangjin <wuzhangjin@gmail.com> */ -#include <asm/suspend.h> +#include <asm/sections.h> #include <asm/fpu.h> #include <asm/dsp.h> diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 77eb1a68d13b..54a062cb9f2c 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -8,4 +8,5 @@ generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/sections.h b/arch/mn10300/include/asm/sections.h deleted file mode 100644 index 2b8c5160388f..000000000000 --- a/arch/mn10300/include/asm/sections.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/sections.h> diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d98c1ecc3266..f60d4ea8b50c 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -38,10 +38,9 @@ static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } #ifdef CONFIG_NUMA_BALANCING - static inline int pte_present(pte_t pte) { - return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); + return pte_val(pte) & _PAGE_NUMA_MASK; } #define pte_present_nonuma pte_present_nonuma @@ -50,37 +49,6 @@ static inline int pte_present_nonuma(pte_t pte) return pte_val(pte) & (_PAGE_PRESENT); } -#define pte_numa pte_numa -static inline int pte_numa(pte_t pte) -{ - return (pte_val(pte) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; -} - -#define pte_mknonnuma pte_mknonnuma -static inline pte_t pte_mknonnuma(pte_t pte) -{ - pte_val(pte) &= ~_PAGE_NUMA; - pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; - return pte; -} - -#define pte_mknuma pte_mknuma -static inline pte_t pte_mknuma(pte_t pte) -{ - /* - * We should not set _PAGE_NUMA on non present ptes. Also clear the - * present bit so that hash_page will return 1 and we collect this - * as numa fault. - */ - if (pte_present(pte)) { - pte_val(pte) |= _PAGE_NUMA; - pte_val(pte) &= ~_PAGE_PRESENT; - } else - VM_BUG_ON(1); - return pte; -} - #define ptep_set_numa ptep_set_numa static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -92,12 +60,6 @@ static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, return; } -#define pmd_numa pmd_numa -static inline int pmd_numa(pmd_t pmd) -{ - return pte_numa(pmd_pte(pmd)); -} - #define pmdp_set_numa pmdp_set_numa static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -109,16 +71,21 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, return; } -#define pmd_mknonnuma pmd_mknonnuma -static inline pmd_t pmd_mknonnuma(pmd_t pmd) +/* + * Generic NUMA pte helpers expect pteval_t and pmdval_t types to exist + * which was inherited from x86. For the purposes of powerpc pte_basic_t and + * pmd_t are equivalent + */ +#define pteval_t pte_basic_t +#define pmdval_t pmd_t +static inline pteval_t ptenuma_flags(pte_t pte) { - return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); + return pte_val(pte) & _PAGE_NUMA_MASK; } -#define pmd_mknuma pmd_mknuma -static inline pmd_t pmd_mknuma(pmd_t pmd) +static inline pmdval_t pmdnuma_flags(pmd_t pmd) { - return pte_pmd(pte_mknuma(pmd_pte(pmd))); + return pmd_val(pmd) & _PAGE_NUMA_MASK; } # else diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 8d1569c29042..e040c3595129 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -98,6 +98,11 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); _PAGE_USER | _PAGE_ACCESSED | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) +#ifdef CONFIG_NUMA_BALANCING +/* Mask of bits that distinguish present and numa ptes */ +#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PRESENT) +#endif + /* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c index 0167d53da30c..a531154cc0f3 100644 --- a/arch/powerpc/kernel/suspend.c +++ b/arch/powerpc/kernel/suspend.c @@ -9,9 +9,7 @@ #include <linux/mm.h> #include <asm/page.h> - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; +#include <asm/sections.h> /* * pfn_is_nosave - check if given pfn is in the 'nosave' section diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index a7a7537ce1e7..1c4c5accd220 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -13,14 +13,10 @@ #include <asm/ipl.h> #include <asm/cio.h> #include <asm/pci.h> +#include <asm/sections.h> #include "entry.h" /* - * References to section boundaries - */ -extern const void __nosave_begin, __nosave_end; - -/* * The restore of the saved pages in an hibernation image will set * the change and referenced bits in the storage key for each page. * Overindication of the referenced bits after an hibernation cycle diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 3fe5681744f1..46461c19f284 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -10,6 +10,7 @@ generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += preempt.h generic-y += scatterlist.h +generic-y += sections.h generic-y += trace_clock.h generic-y += xor.h generic-y += serial.h diff --git a/arch/score/include/asm/sections.h b/arch/score/include/asm/sections.h deleted file mode 100644 index 9441d23af005..000000000000 --- a/arch/score/include/asm/sections.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_SCORE_SECTIONS_H -#define _ASM_SCORE_SECTIONS_H - -#include <asm-generic/sections.h> - -#endif /* _ASM_SCORE_SECTIONS_H */ diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 1b6199740e98..7a99e6af6372 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -3,7 +3,6 @@ #include <asm-generic/sections.h> -extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char __start_eh_frame[], __stop_eh_frame[]; diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c index 42b0b8ce699a..17bd2e167e07 100644 --- a/arch/sparc/power/hibernate.c +++ b/arch/sparc/power/hibernate.c @@ -9,11 +9,9 @@ #include <asm/hibernate.h> #include <asm/visasm.h> #include <asm/page.h> +#include <asm/sections.h> #include <asm/tlb.h> -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - struct saved_context saved_context; /* diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h index 4dcd34ae194c..77b522694e74 100644 --- a/arch/unicore32/include/mach/pm.h +++ b/arch/unicore32/include/mach/pm.h @@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state); /* Defined in hibernate_asm.S */ extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist); -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - extern struct pbe *restore_pblist; #endif diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c index d75ef8b6cb56..9969ec374abb 100644 --- a/arch/unicore32/kernel/hibernate.c +++ b/arch/unicore32/kernel/hibernate.c @@ -18,6 +18,7 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> +#include <asm/sections.h> #include <asm/suspend.h> #include "mach/pm.h" diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e4b1f431c7ed..3eb8a41509b3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -30,7 +30,6 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_SUPPORTS_INT128 if X86_64 - select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_IDE select HAVE_OPROFILE select HAVE_PCSPKR_PLATFORM diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index f216963760e5..0f9724c9c510 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -325,6 +325,20 @@ static inline pteval_t pte_flags(pte_t pte) return native_pte_val(pte) & PTE_FLAGS_MASK; } +#ifdef CONFIG_NUMA_BALANCING +/* Set of bits that distinguishes present, prot_none and numa ptes */ +#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) +static inline pteval_t ptenuma_flags(pte_t pte) +{ + return pte_flags(pte) & _PAGE_NUMA_MASK; +} + +static inline pmdval_t pmdnuma_flags(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_NUMA_MASK; +} +#endif /* CONFIG_NUMA_BALANCING */ + #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 7d28c885d238..291226b952a9 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -13,13 +13,11 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mmzone.h> +#include <asm/sections.h> /* Defined in hibernate_asm_32.S */ extern int restore_image(void); -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - /* Pointer to the temporary resume page tables */ pgd_t *resume_pg_dir; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 35e2bb6c0f37..009947d419a6 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -17,11 +17,9 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mtrr.h> +#include <asm/sections.h> #include <asm/suspend.h> -/* References to section boundaries */ -extern __visible const void __nosave_begin, __nosave_end; - /* Defined in hibernate_asm_64.S */ extern asmlinkage __visible int restore_image(void); |