diff options
author | akpm <akpm@linux-foundation.org> | 2022-06-27 10:31:34 -0700 |
---|---|---|
committer | akpm <akpm@linux-foundation.org> | 2022-06-27 10:31:34 -0700 |
commit | 46a3b1125308f8f90a065eeecfafd2a96b01a36c (patch) | |
tree | 6e080118fbad5aa217d160cedf02f10108ce3bf2 /mm | |
parent | 6edda04ccc7cfb281d139e352dbd5dd933bd2751 (diff) | |
parent | 03c765b0e3b4cb5063276b086c76f7a612856a9a (diff) |
Merge branch 'master' into mm-stable
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 11 | ||||
-rw-r--r-- | mm/damon/reclaim.c | 8 | ||||
-rw-r--r-- | mm/filemap.c | 15 | ||||
-rw-r--r-- | mm/huge_memory.c | 1 | ||||
-rw-r--r-- | mm/hwpoison-inject.c | 2 | ||||
-rw-r--r-- | mm/kfence/core.c | 7 | ||||
-rw-r--r-- | mm/madvise.c | 2 | ||||
-rw-r--r-- | mm/memcontrol.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 12 | ||||
-rw-r--r-- | mm/migrate.c | 1 | ||||
-rw-r--r-- | mm/page_isolation.c | 2 | ||||
-rw-r--r-- | mm/readahead.c | 2 | ||||
-rw-r--r-- | mm/slub.c | 43 | ||||
-rw-r--r-- | mm/swap.c | 2 | ||||
-rw-r--r-- | mm/usercopy.c | 26 | ||||
-rw-r--r-- | mm/vmalloc.c | 2 |
16 files changed, 98 insertions, 40 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ff60bd7d74e0..95550b8fa7fe 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -231,20 +231,13 @@ static __init int bdi_class_init(void) } postcore_initcall(bdi_class_init); -static int bdi_init(struct backing_dev_info *bdi); - static int __init default_bdi_init(void) { - int err; - bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 0); if (!bdi_wq) return -ENOMEM; - - err = bdi_init(&noop_backing_dev_info); - - return err; + return 0; } subsys_initcall(default_bdi_init); @@ -781,7 +774,7 @@ static void cgwb_remove_from_bdi_list(struct bdi_writeback *wb) #endif /* CONFIG_CGROUP_WRITEBACK */ -static int bdi_init(struct backing_dev_info *bdi) +int bdi_init(struct backing_dev_info *bdi) { int ret; diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 8efbfb24f3a1..4b07c29effe9 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -374,6 +374,8 @@ static void damon_reclaim_timer_fn(struct work_struct *work) } static DECLARE_DELAYED_WORK(damon_reclaim_timer, damon_reclaim_timer_fn); +static bool damon_reclaim_initialized; + static int enabled_store(const char *val, const struct kernel_param *kp) { @@ -382,6 +384,10 @@ static int enabled_store(const char *val, if (rc < 0) return rc; + /* system_wq might not initialized yet */ + if (!damon_reclaim_initialized) + return rc; + if (enabled) schedule_delayed_work(&damon_reclaim_timer, 0); @@ -449,6 +455,8 @@ static int __init damon_reclaim_init(void) damon_add_target(ctx, target); schedule_delayed_work(&damon_reclaim_timer, 0); + + damon_reclaim_initialized = true; return 0; } diff --git a/mm/filemap.c b/mm/filemap.c index ac3775c1ce4c..ffdfbc8b0e3c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2385,6 +2385,8 @@ static void filemap_get_read_batch(struct address_space *mapping, continue; if (xas.xa_index > max || xa_is_value(folio)) break; + if (xa_is_sibling(folio)) + break; if (!folio_try_get_rcu(folio)) goto retry; @@ -2629,6 +2631,13 @@ err: return err; } +static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) +{ + unsigned int shift = folio_shift(folio); + + return (pos1 >> shift == pos2 >> shift); +} + /** * filemap_read - Read data from the page cache. * @iocb: The iocb to read. @@ -2700,11 +2709,11 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, writably_mapped = mapping_writably_mapped(mapping); /* - * When a sequential read accesses a page several times, only + * When a read accesses the same folio several times, only * mark it as accessed the first time. */ - if (iocb->ki_pos >> PAGE_SHIFT != - ra->prev_pos >> PAGE_SHIFT) + if (!pos_same_folio(iocb->ki_pos, ra->prev_pos - 1, + fbatch.folios[0])) folio_mark_accessed(fbatch.folios[0]); for (i = 0; i < folio_batch_count(&fbatch); i++) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f7248002dad9..834f288b3769 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2377,6 +2377,7 @@ static void __split_huge_page_tail(struct page *head, int tail, page_tail); page_tail->mapping = head->mapping; page_tail->index = head->index + tail; + page_tail->private = 0; /* Page flags must be visible before we make the page non-compound. */ smp_wmb(); diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c index 5c0cddd81505..65e242b5a432 100644 --- a/mm/hwpoison-inject.c +++ b/mm/hwpoison-inject.c @@ -48,7 +48,7 @@ static int hwpoison_inject(void *data, u64 val) inject: pr_info("Injecting memory failure at pfn %#lx\n", pfn); - err = memory_failure(pfn, 0); + err = memory_failure(pfn, MF_SW_SIMULATED); return (err == -EOPNOTSUPP) ? 0 : err; } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 4e7cd4c8e687..4b5e5a3d3a63 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g unsigned long flags; struct slab *slab; void *addr; + const bool random_right_allocate = prandom_u32_max(2); + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS); /* Try to obtain a free object. */ raw_spin_lock_irqsave(&kfence_freelist_lock, flags); @@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g * is that the out-of-bounds accesses detected are deterministic for * such allocations. */ - if (prandom_u32_max(2)) { + if (random_right_allocate) { /* Allocate on the "right" side, re-calculate address. */ meta->addr += PAGE_SIZE - size; meta->addr = ALIGN_DOWN(meta->addr, cache->align); @@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g if (cache->ctor) cache->ctor(addr); - if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS)) + if (random_fault) kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); diff --git a/mm/madvise.c b/mm/madvise.c index d7b4f2602949..0316bbc6441b 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -1112,7 +1112,7 @@ static int madvise_inject_error(int behavior, } else { pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n", pfn, start); - ret = memory_failure(pfn, MF_COUNT_INCREASED); + ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED); if (ret == -EOPNOTSUPP) ret = 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c1ae9b3f8d35..655c09393ad5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4887,7 +4887,7 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) { /* * Deprecated. - * Please, take a look at tools/cgroup/slabinfo.py . + * Please, take a look at tools/cgroup/memcg_slabinfo.py . */ return 0; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index b85661cbdc4a..da39ec8afca8 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -69,6 +69,8 @@ int sysctl_memory_failure_recovery __read_mostly = 1; atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); +static bool hw_memory_failure __read_mostly = false; + static bool __page_handle_poison(struct page *page) { int ret; @@ -1768,6 +1770,9 @@ int memory_failure(unsigned long pfn, int flags) mutex_lock(&mf_mutex); + if (!(flags & MF_SW_SIMULATED)) + hw_memory_failure = true; + p = pfn_to_online_page(pfn); if (!p) { res = arch_memory_failure(pfn, flags); @@ -2103,6 +2108,13 @@ int unpoison_memory(unsigned long pfn) mutex_lock(&mf_mutex); + if (hw_memory_failure) { + unpoison_pr_info("Unpoison: Disabled after HW memory failure %#lx\n", + pfn, &unpoison_rs); + ret = -EOPNOTSUPP; + goto unlock_mutex; + } + if (!PageHWPoison(p)) { unpoison_pr_info("Unpoison: Page was already unpoisoned %#lx\n", pfn, &unpoison_rs); diff --git a/mm/migrate.c b/mm/migrate.c index e51588e95f57..6c1ea61f39d8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1106,6 +1106,7 @@ static int unmap_and_move(new_page_t get_new_page, if (!newpage) return -ENOMEM; + newpage->private = 0; rc = __unmap_and_move(page, newpage, force, mode); if (rc == MIGRATEPAGE_SUCCESS) set_page_owner_migrate_reason(newpage, reason); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index d200d41ad0d3..9d73dc38e3d7 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -286,6 +286,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * @flags: isolation flags * @gfp_flags: GFP flags used for migrating pages * @isolate_before: isolate the pageblock before the boundary_pfn + * @skip_isolation: the flag to skip the pageblock isolation in second + * isolate_single_pageblock() * * Free and in-use pages can be as big as MAX_ORDER-1 and contain more than one * pageblock. When not all pageblocks within a page are isolated at the same diff --git a/mm/readahead.c b/mm/readahead.c index 57a015108254..fdcd28cbd92d 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -510,6 +510,7 @@ void page_cache_ra_order(struct readahead_control *ractl, new_order--; } + filemap_invalidate_lock_shared(mapping); while (index <= limit) { unsigned int order = new_order; @@ -536,6 +537,7 @@ void page_cache_ra_order(struct readahead_control *ractl, } read_pages(ractl); + filemap_invalidate_unlock_shared(mapping); /* * If there were already pages in the page cache, then we may have diff --git a/mm/slub.c b/mm/slub.c index e5535020e0fd..b1281b8654bd 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -726,25 +726,48 @@ static struct track *get_track(struct kmem_cache *s, void *object, return kasan_reset_tag(p + alloc); } -static void noinline set_track(struct kmem_cache *s, void *object, - enum track_item alloc, unsigned long addr) -{ - struct track *p = get_track(s, object, alloc); - #ifdef CONFIG_STACKDEPOT +static noinline depot_stack_handle_t set_track_prepare(void) +{ + depot_stack_handle_t handle; unsigned long entries[TRACK_ADDRS_COUNT]; unsigned int nr_entries; nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); - p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); + handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); + + return handle; +} +#else +static inline depot_stack_handle_t set_track_prepare(void) +{ + return 0; +} #endif +static void set_track_update(struct kmem_cache *s, void *object, + enum track_item alloc, unsigned long addr, + depot_stack_handle_t handle) +{ + struct track *p = get_track(s, object, alloc); + +#ifdef CONFIG_STACKDEPOT + p->handle = handle; +#endif p->addr = addr; p->cpu = smp_processor_id(); p->pid = current->pid; p->when = jiffies; } +static __always_inline void set_track(struct kmem_cache *s, void *object, + enum track_item alloc, unsigned long addr) +{ + depot_stack_handle_t handle = set_track_prepare(); + + set_track_update(s, object, alloc, addr, handle); +} + static void init_tracking(struct kmem_cache *s, void *object) { struct track *p; @@ -1373,6 +1396,10 @@ static noinline int free_debug_processing( int cnt = 0; unsigned long flags, flags2; int ret = 0; + depot_stack_handle_t handle = 0; + + if (s->flags & SLAB_STORE_USER) + handle = set_track_prepare(); spin_lock_irqsave(&n->list_lock, flags); slab_lock(slab, &flags2); @@ -1391,7 +1418,7 @@ next_object: } if (s->flags & SLAB_STORE_USER) - set_track(s, object, TRACK_FREE, addr); + set_track_update(s, object, TRACK_FREE, addr, handle); trace(s, slab, object, 0); /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ init_object(s, object, SLUB_RED_INACTIVE); @@ -2936,6 +2963,7 @@ redo: if (!freelist) { c->slab = NULL; + c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); stat(s, DEACTIVATE_BYPASS); goto new_slab; @@ -2968,6 +2996,7 @@ deactivate_slab: freelist = c->freelist; c->slab = NULL; c->freelist = NULL; + c->tid = next_tid(c->tid); local_unlock_irqrestore(&s->cpu_slab->lock, flags); deactivate_slab(s, slab, freelist); diff --git a/mm/swap.c b/mm/swap.c index f3922a96b2e9..034bb24879a3 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -881,7 +881,7 @@ void lru_cache_disable(void) * lru_disable_count = 0 will have exited the critical * section when synchronize_rcu() returns. */ - synchronize_rcu(); + synchronize_rcu_expedited(); #ifdef CONFIG_SMP __lru_add_drain_all(true); #else diff --git a/mm/usercopy.c b/mm/usercopy.c index baeacc735b83..4e1da708699b 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -161,29 +161,27 @@ static inline void check_bogus_address(const unsigned long ptr, unsigned long n, static inline void check_heap_object(const void *ptr, unsigned long n, bool to_user) { + uintptr_t addr = (uintptr_t)ptr; + unsigned long offset; struct folio *folio; if (is_kmap_addr(ptr)) { - unsigned long page_end = (unsigned long)ptr | (PAGE_SIZE - 1); - - if ((unsigned long)ptr + n - 1 > page_end) - usercopy_abort("kmap", NULL, to_user, - offset_in_page(ptr), n); + offset = offset_in_page(ptr); + if (n > PAGE_SIZE - offset) + usercopy_abort("kmap", NULL, to_user, offset, n); return; } if (is_vmalloc_addr(ptr)) { - struct vm_struct *area = find_vm_area(ptr); - unsigned long offset; + struct vmap_area *area = find_vmap_area(addr); - if (!area) { + if (!area) usercopy_abort("vmalloc", "no area", to_user, 0, n); - return; - } - offset = ptr - area->addr; - if (offset + n > get_vm_area_size(area)) + if (n > area->va_end - addr) { + offset = addr - area->va_start; usercopy_abort("vmalloc", NULL, to_user, offset, n); + } return; } @@ -196,8 +194,8 @@ static inline void check_heap_object(const void *ptr, unsigned long n, /* Check slab allocator for flags and size. */ __check_heap_object(ptr, n, folio_slab(folio), to_user); } else if (folio_test_large(folio)) { - unsigned long offset = ptr - folio_address(folio); - if (offset + n > folio_size(folio)) + offset = ptr - folio_address(folio); + if (n > folio_size(folio) - offset) usercopy_abort("page alloc", NULL, to_user, offset, n); } } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index fff3925c6f8f..5977b178694d 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -1791,7 +1791,7 @@ static void free_unmap_vmap_area(struct vmap_area *va) free_vmap_area_noflush(va); } -static struct vmap_area *find_vmap_area(unsigned long addr) +struct vmap_area *find_vmap_area(unsigned long addr) { struct vmap_area *va; |