diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 5 | ||||
-rw-r--r-- | mm/kasan/kasan.h | 12 | ||||
-rw-r--r-- | mm/kfence/core.c | 19 | ||||
-rw-r--r-- | mm/kfence/kfence_test.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 3 | ||||
-rw-r--r-- | mm/memory.c | 11 | ||||
-rw-r--r-- | mm/migrate.c | 48 | ||||
-rw-r--r-- | mm/mmap_lock.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 57 | ||||
-rw-r--r-- | mm/rmap.c | 39 | ||||
-rw-r--r-- | mm/secretmem.c | 1 | ||||
-rw-r--r-- | mm/slab.h | 15 | ||||
-rw-r--r-- | mm/slub.c | 93 | ||||
-rw-r--r-- | mm/util.c | 10 |
15 files changed, 158 insertions, 163 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 271f2ca862c8..f5561ea7d90a 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -398,12 +398,12 @@ static void cgwb_release_workfn(struct work_struct *work) blkcg_unpin_online(blkcg); fprop_local_destroy_percpu(&wb->memcg_completions); - percpu_ref_exit(&wb->refcnt); spin_lock_irq(&cgwb_lock); list_del(&wb->offline_node); spin_unlock_irq(&cgwb_lock); + percpu_ref_exit(&wb->refcnt); wb_exit(wb); WARN_ON_ONCE(!list_empty(&wb->b_attached)); kfree_rcu(wb, rcu); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 924553aa8f78..dfc940d5221d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5440,8 +5440,9 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, continue; } - refs = min3(pages_per_huge_page(h) - pfn_offset, - (vma->vm_end - vaddr) >> PAGE_SHIFT, remainder); + /* vaddr may not be aligned to PAGE_SIZE */ + refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, + (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); if (pages || vmas) record_subpages_vmas(mem_map_offset(page, pfn_offset), diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index 98e3059bfea4..d739cdd1621a 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -9,6 +9,7 @@ #ifdef CONFIG_KASAN_HW_TAGS #include <linux/static_key.h> +#include "../slab.h" DECLARE_STATIC_KEY_FALSE(kasan_flag_stacktrace); extern bool kasan_flag_async __ro_after_init; @@ -387,6 +388,17 @@ static inline void kasan_unpoison(const void *addr, size_t size, bool init) if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK)) return; + /* + * Explicitly initialize the memory with the precise object size to + * avoid overwriting the SLAB redzone. This disables initialization in + * the arch code and may thus lead to performance penalty. The penalty + * is accepted since SLAB redzones aren't enabled in production builds. + */ + if (__slub_debug_enabled() && + init && ((unsigned long)size & KASAN_GRANULE_MASK)) { + init = false; + memzero_explicit((void *)addr, size); + } size = round_up(size, KASAN_GRANULE_SIZE); hw_set_mem_tag_range((void *)addr, size, tag, init); diff --git a/mm/kfence/core.c b/mm/kfence/core.c index d7666ace9d2e..575c685aa642 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -734,6 +734,22 @@ void kfence_shutdown_cache(struct kmem_cache *s) void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { /* + * Perform size check before switching kfence_allocation_gate, so that + * we don't disable KFENCE without making an allocation. + */ + if (size > PAGE_SIZE) + return NULL; + + /* + * Skip allocations from non-default zones, including DMA. We cannot + * guarantee that pages in the KFENCE pool will have the requested + * properties (e.g. reside in DMAable memory). + */ + if ((flags & GFP_ZONEMASK) || + (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) + return NULL; + + /* * allocation_gate only needs to become non-zero, so it doesn't make * sense to continue writing to it and pay the associated contention * cost, in case we have a large number of concurrent allocations. @@ -757,9 +773,6 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (!READ_ONCE(kfence_enabled)) return NULL; - if (size > PAGE_SIZE) - return NULL; - return kfence_guarded_alloc(s, size, flags); } diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 7f24b9bcb2ec..942cbc16ad26 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -852,7 +852,7 @@ static void kfence_test_exit(void) tracepoint_synchronize_unregister(); } -late_initcall(kfence_test_init); +late_initcall_sync(kfence_test_init); module_exit(kfence_test_exit); MODULE_LICENSE("GPL v2"); diff --git a/mm/memblock.c b/mm/memblock.c index 0041ff62c584..de7b553baa50 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -947,7 +947,8 @@ static bool should_skip_region(struct memblock_type *type, return true; /* skip hotpluggable memory regions if needed */ - if (movable_node_is_enabled() && memblock_is_hotpluggable(m)) + if (movable_node_is_enabled() && memblock_is_hotpluggable(m) && + !(flags & MEMBLOCK_HOTPLUG)) return true; /* if we want mirror memory skip non-mirror memory regions */ diff --git a/mm/memory.c b/mm/memory.c index 747a01d495f2..25fc46e87214 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4026,8 +4026,17 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return ret; } - if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) + if (vmf->prealloc_pte) { + vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); + if (likely(pmd_none(*vmf->pmd))) { + mm_inc_nr_ptes(vma->vm_mm); + pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte); + vmf->prealloc_pte = NULL; + } + spin_unlock(vmf->ptl); + } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) { return VM_FAULT_OOM; + } } /* See comment in handle_pte_fault() */ diff --git a/mm/migrate.c b/mm/migrate.c index 23cbd9de030b..34a9ad3e0a4f 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -537,54 +537,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, } /* - * Gigantic pages are so large that we do not guarantee that page++ pointer - * arithmetic will work across the entire page. We need something more - * specialized. - */ -static void __copy_gigantic_page(struct page *dst, struct page *src, - int nr_pages) -{ - int i; - struct page *dst_base = dst; - struct page *src_base = src; - - for (i = 0; i < nr_pages; ) { - cond_resched(); - copy_highpage(dst, src); - - i++; - dst = mem_map_next(dst, dst_base, i); - src = mem_map_next(src, src_base, i); - } -} - -void copy_huge_page(struct page *dst, struct page *src) -{ - int i; - int nr_pages; - - if (PageHuge(src)) { - /* hugetlbfs page */ - struct hstate *h = page_hstate(src); - nr_pages = pages_per_huge_page(h); - - if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { - __copy_gigantic_page(dst, src, nr_pages); - return; - } - } else { - /* thp page */ - BUG_ON(!PageTransHuge(src)); - nr_pages = thp_nr_pages(src); - } - - for (i = 0; i < nr_pages; i++) { - cond_resched(); - copy_highpage(dst + i, src + i); - } -} - -/* * Copy the page to its new location */ void migrate_page_states(struct page *newpage, struct page *page) diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c index f5852a058ce0..1854850b4b89 100644 --- a/mm/mmap_lock.c +++ b/mm/mmap_lock.c @@ -156,14 +156,14 @@ static inline void put_memcg_path_buf(void) #define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ do { \ const char *memcg_path; \ - preempt_disable(); \ + local_lock(&memcg_paths.lock); \ memcg_path = get_mm_memcg_path(mm); \ trace_mmap_lock_##type(mm, \ memcg_path != NULL ? memcg_path : "", \ ##__VA_ARGS__); \ if (likely(memcg_path != NULL)) \ put_memcg_path_buf(); \ - preempt_enable(); \ + local_unlock(&memcg_paths.lock); \ } while (0) #else /* !CONFIG_MEMCG */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3b97e17806be..856b175c15a4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -840,21 +840,24 @@ void init_mem_debugging_and_hardening(void) } #endif - if (_init_on_alloc_enabled_early) { - if (page_poisoning_requested) - pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_alloc\n"); - else - static_branch_enable(&init_on_alloc); - } - if (_init_on_free_enabled_early) { - if (page_poisoning_requested) - pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " - "will take precedence over init_on_free\n"); - else - static_branch_enable(&init_on_free); + if ((_init_on_alloc_enabled_early || _init_on_free_enabled_early) && + page_poisoning_requested) { + pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, " + "will take precedence over init_on_alloc and init_on_free\n"); + _init_on_alloc_enabled_early = false; + _init_on_free_enabled_early = false; } + if (_init_on_alloc_enabled_early) + static_branch_enable(&init_on_alloc); + else + static_branch_disable(&init_on_alloc); + + if (_init_on_free_enabled_early) + static_branch_enable(&init_on_free); + else + static_branch_disable(&init_on_free); + #ifdef CONFIG_DEBUG_PAGEALLOC if (!debug_pagealloc_enabled()) return; @@ -3820,7 +3823,7 @@ static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) #endif /* CONFIG_FAIL_PAGE_ALLOC */ -static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); } @@ -5221,9 +5224,6 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, unsigned int alloc_flags = ALLOC_WMARK_LOW; int nr_populated = 0, nr_account = 0; - if (unlikely(nr_pages <= 0)) - return 0; - /* * Skip populated array elements to determine if any pages need * to be allocated before disabling IRQs. @@ -5231,19 +5231,35 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, while (page_array && nr_populated < nr_pages && page_array[nr_populated]) nr_populated++; + /* No pages requested? */ + if (unlikely(nr_pages <= 0)) + goto out; + /* Already populated array? */ if (unlikely(page_array && nr_pages - nr_populated == 0)) - return nr_populated; + goto out; /* Use the single page allocator for one page. */ if (nr_pages - nr_populated == 1) goto failed; +#ifdef CONFIG_PAGE_OWNER + /* + * PAGE_OWNER may recurse into the allocator to allocate space to + * save the stack with pagesets.lock held. Releasing/reacquiring + * removes much of the performance benefit of bulk allocation so + * force the caller to allocate one page at a time as it'll have + * similar performance to added complexity to the bulk allocator. + */ + if (static_branch_unlikely(&page_owner_inited)) + goto failed; +#endif + /* May set ALLOC_NOFRAGMENT, fragmentation will return 1 page. */ gfp &= gfp_allowed_mask; alloc_gfp = gfp; if (!prepare_alloc_pages(gfp, 0, preferred_nid, nodemask, &ac, &alloc_gfp, &alloc_flags)) - return 0; + goto out; gfp = alloc_gfp; /* Find an allowed local zone that meets the low watermark. */ @@ -5311,6 +5327,7 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, __count_zid_vm_events(PGALLOC, zone_idx(zone), nr_account); zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); +out: return nr_populated; failed_irq: @@ -5326,7 +5343,7 @@ failed: nr_populated++; } - return nr_populated; + goto out; } EXPORT_SYMBOL_GPL(__alloc_pages_bulk); diff --git a/mm/rmap.c b/mm/rmap.c index 795f9d5f8386..b9eb5c12f3fe 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1440,21 +1440,20 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, /* * If the page is mlock()d, we cannot swap it out. */ - if (!(flags & TTU_IGNORE_MLOCK)) { - if (vma->vm_flags & VM_LOCKED) { - /* PTE-mapped THP are never marked as mlocked */ - if (!PageTransCompound(page) || - (PageHead(page) && !PageDoubleMap(page))) { - /* - * Holding pte lock, we do *not* need - * mmap_lock here - */ - mlock_vma_page(page); - } - ret = false; - page_vma_mapped_walk_done(&pvmw); - break; - } + if (!(flags & TTU_IGNORE_MLOCK) && + (vma->vm_flags & VM_LOCKED)) { + /* + * PTE-mapped THP are never marked as mlocked: so do + * not set it on a DoubleMap THP, nor on an Anon THP + * (which may still be PTE-mapped after DoubleMap was + * cleared). But stop unmapping even in those cases. + */ + if (!PageTransCompound(page) || (PageHead(page) && + !PageDoubleMap(page) && !PageAnon(page))) + mlock_vma_page(page); + page_vma_mapped_walk_done(&pvmw); + ret = false; + break; } /* Unexpected PMD-mapped THP? */ @@ -1986,8 +1985,10 @@ static bool page_mlock_one(struct page *page, struct vm_area_struct *vma, */ if (vma->vm_flags & VM_LOCKED) { /* - * PTE-mapped THP are never marked as mlocked, but - * this function is never called when PageDoubleMap(). + * PTE-mapped THP are never marked as mlocked; but + * this function is never called on a DoubleMap THP, + * nor on an Anon THP (which may still be PTE-mapped + * after DoubleMap was cleared). */ mlock_vma_page(page); /* @@ -2022,6 +2023,10 @@ void page_mlock(struct page *page) VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); + /* Anon THP are only marked as mlocked when singly mapped */ + if (PageTransCompound(page) && PageAnon(page)) + return; + rmap_walk(page, &rwc); } diff --git a/mm/secretmem.c b/mm/secretmem.c index f77d25467a14..030f02ddc7c1 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -152,6 +152,7 @@ static void secretmem_freepage(struct page *page) } const struct address_space_operations secretmem_aops = { + .set_page_dirty = __set_page_dirty_no_writeback, .freepage = secretmem_freepage, .migratepage = secretmem_migratepage, .isolate_page = secretmem_isolate_page, diff --git a/mm/slab.h b/mm/slab.h index 67e06637ff2e..f997fd5e42c8 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -216,10 +216,18 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); #endif extern void print_tracking(struct kmem_cache *s, void *object); long validate_slab_cache(struct kmem_cache *s); +static inline bool __slub_debug_enabled(void) +{ + return static_branch_unlikely(&slub_debug_enabled); +} #else static inline void print_tracking(struct kmem_cache *s, void *object) { } +static inline bool __slub_debug_enabled(void) +{ + return false; +} #endif /* @@ -229,11 +237,10 @@ static inline void print_tracking(struct kmem_cache *s, void *object) */ static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) { -#ifdef CONFIG_SLUB_DEBUG - VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); - if (static_branch_unlikely(&slub_debug_enabled)) + if (IS_ENABLED(CONFIG_SLUB_DEBUG)) + VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); + if (__slub_debug_enabled()) return s->flags & flags; -#endif return false; } diff --git a/mm/slub.c b/mm/slub.c index dc863c1ea324..090fa14628f9 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -26,7 +26,6 @@ #include <linux/cpuset.h> #include <linux/mempolicy.h> #include <linux/ctype.h> -#include <linux/stackdepot.h> #include <linux/debugobjects.h> #include <linux/kallsyms.h> #include <linux/kfence.h> @@ -120,25 +119,11 @@ */ #ifdef CONFIG_SLUB_DEBUG - #ifdef CONFIG_SLUB_DEBUG_ON DEFINE_STATIC_KEY_TRUE(slub_debug_enabled); #else DEFINE_STATIC_KEY_FALSE(slub_debug_enabled); #endif - -static inline bool __slub_debug_enabled(void) -{ - return static_branch_unlikely(&slub_debug_enabled); -} - -#else /* CONFIG_SLUB_DEBUG */ - -static inline bool __slub_debug_enabled(void) -{ - return false; -} - #endif /* CONFIG_SLUB_DEBUG */ static inline bool kmem_cache_debug(struct kmem_cache *s) @@ -221,8 +206,8 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s) #define TRACK_ADDRS_COUNT 16 struct track { unsigned long addr; /* Called from address */ -#ifdef CONFIG_STACKDEPOT - depot_stack_handle_t handle; +#ifdef CONFIG_STACKTRACE + unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */ #endif int cpu; /* Was running on cpu */ int pid; /* Pid context */ @@ -626,27 +611,22 @@ static struct track *get_track(struct kmem_cache *s, void *object, return kasan_reset_tag(p + alloc); } -#ifdef CONFIG_STACKDEPOT -static depot_stack_handle_t save_stack_depot_trace(gfp_t flags) -{ - unsigned long entries[TRACK_ADDRS_COUNT]; - depot_stack_handle_t handle; - unsigned int nr_entries; - - nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 4); - handle = stack_depot_save(entries, nr_entries, flags); - return handle; -} -#endif - static void set_track(struct kmem_cache *s, void *object, enum track_item alloc, unsigned long addr) { struct track *p = get_track(s, object, alloc); if (addr) { -#ifdef CONFIG_STACKDEPOT - p->handle = save_stack_depot_trace(GFP_NOWAIT); +#ifdef CONFIG_STACKTRACE + unsigned int nr_entries; + + metadata_access_enable(); + nr_entries = stack_trace_save(kasan_reset_tag(p->addrs), + TRACK_ADDRS_COUNT, 3); + metadata_access_disable(); + + if (nr_entries < TRACK_ADDRS_COUNT) + p->addrs[nr_entries] = 0; #endif p->addr = addr; p->cpu = smp_processor_id(); @@ -673,19 +653,14 @@ static void print_track(const char *s, struct track *t, unsigned long pr_time) pr_err("%s in %pS age=%lu cpu=%u pid=%d\n", s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid); -#ifdef CONFIG_STACKDEPOT +#ifdef CONFIG_STACKTRACE { - depot_stack_handle_t handle; - unsigned long *entries; - unsigned int nr_entries; - - handle = READ_ONCE(t->handle); - if (!handle) { - pr_err("object allocation/free stack trace missing\n"); - } else { - nr_entries = stack_depot_fetch(handle, &entries); - stack_trace_print(entries, nr_entries, 0); - } + int i; + for (i = 0; i < TRACK_ADDRS_COUNT; i++) + if (t->addrs[i]) + pr_err("\t%pS\n", (void *)t->addrs[i]); + else + break; } #endif } @@ -4059,26 +4034,18 @@ void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct page *page) objp = fixup_red_left(s, objp); trackp = get_track(s, objp, TRACK_ALLOC); kpp->kp_ret = (void *)trackp->addr; -#ifdef CONFIG_STACKDEPOT - { - depot_stack_handle_t handle; - unsigned long *entries; - unsigned int nr_entries; - - handle = READ_ONCE(trackp->handle); - if (handle) { - nr_entries = stack_depot_fetch(handle, &entries); - for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) - kpp->kp_stack[i] = (void *)entries[i]; - } +#ifdef CONFIG_STACKTRACE + for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { + kpp->kp_stack[i] = (void *)trackp->addrs[i]; + if (!kpp->kp_stack[i]) + break; + } - trackp = get_track(s, objp, TRACK_FREE); - handle = READ_ONCE(trackp->handle); - if (handle) { - nr_entries = stack_depot_fetch(handle, &entries); - for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++) - kpp->kp_free_stack[i] = (void *)entries[i]; - } + trackp = get_track(s, objp, TRACK_FREE); + for (i = 0; i < KS_ADDRS_COUNT && i < TRACK_ADDRS_COUNT; i++) { + kpp->kp_free_stack[i] = (void *)trackp->addrs[i]; + if (!kpp->kp_free_stack[i]) + break; } #endif #endif diff --git a/mm/util.c b/mm/util.c index 99c6cc77de9e..9043d03750a7 100644 --- a/mm/util.c +++ b/mm/util.c @@ -731,6 +731,16 @@ int __page_mapcount(struct page *page) } EXPORT_SYMBOL_GPL(__page_mapcount); +void copy_huge_page(struct page *dst, struct page *src) +{ + unsigned i, nr = compound_nr(src); + + for (i = 0; i < nr; i++) { + cond_resched(); + copy_highpage(nth_page(dst, i), nth_page(src, i)); + } +} + int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; int sysctl_overcommit_ratio __read_mostly = 50; unsigned long sysctl_overcommit_kbytes __read_mostly; |