diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-05-04 13:09:43 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-05-04 13:09:43 -0700 |
commit | 15fb96a35db7aad8eb7cf98206b10e50a966e388 (patch) | |
tree | eac3fa17f07c62ca557aa0ffa56e2c3ee5d1dbc6 /mm | |
parent | 671e148d079f4d4eca0a98f7dadf1fe69d856374 (diff) | |
parent | 245f0922689364b21163af4937a05ea0ba576fae (diff) |
Merge tag 'mm-stable-2023-05-03-16-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull more MM updates from Andrew Morton:
- Some DAMON cleanups from Kefeng Wang
- Some KSM work from David Hildenbrand, to make the PR_SET_MEMORY_MERGE
ioctl's behavior more similar to KSM's behavior.
[ Andrew called these "final", but I suspect we'll have a series fixing
up the fact that the last commit in the dmapools series in the
previous pull seems to have unintentionally just reverted all the
other commits in the same series.. - Linus ]
* tag 'mm-stable-2023-05-03-16-22' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
mm: hwpoison: coredump: support recovery from dump_user_range()
mm/page_alloc: add some comments to explain the possible hole in __pageblock_pfn_to_page()
mm/ksm: move disabling KSM from s390/gmap code to KSM code
selftests/ksm: ksm_functional_tests: add prctl unmerge test
mm/ksm: unmerge and clear VM_MERGEABLE when setting PR_SET_MEMORY_MERGE=0
mm/damon/paddr: fix missing folio_sz update in damon_pa_young()
mm/damon/paddr: minor refactor of damon_pa_mark_accessed_or_deactivate()
mm/damon/paddr: minor refactor of damon_pa_pageout()
Diffstat (limited to 'mm')
-rw-r--r-- | mm/damon/paddr.c | 26 | ||||
-rw-r--r-- | mm/ksm.c | 70 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 |
3 files changed, 89 insertions, 16 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index dd9c33fbe805..467b99166b43 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -134,10 +134,8 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) } need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) { - folio_put(folio); - return false; - } + if (need_lock && !folio_trylock(folio)) + goto out; rmap_walk(folio, &rwc); @@ -238,21 +236,18 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) if (!folio) continue; - if (damos_pa_filter_out(s, folio)) { - folio_put(folio); - continue; - } + if (damos_pa_filter_out(s, folio)) + goto put_folio; folio_clear_referenced(folio); folio_test_clear_young(folio); - if (!folio_isolate_lru(folio)) { - folio_put(folio); - continue; - } + if (!folio_isolate_lru(folio)) + goto put_folio; if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); +put_folio: folio_put(folio); } applied = reclaim_pages(&folio_list); @@ -271,16 +266,15 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (!folio) continue; - if (damos_pa_filter_out(s, folio)) { - folio_put(folio); - continue; - } + if (damos_pa_filter_out(s, folio)) + goto put_folio; if (mark_accessed) folio_mark_accessed(folio); else folio_deactivate(folio); applied += folio_nr_pages(folio); +put_folio: folio_put(folio); } return applied * PAGE_SIZE; @@ -2520,6 +2520,22 @@ static void __ksm_add_vma(struct vm_area_struct *vma) vm_flags_set(vma, VM_MERGEABLE); } +static int __ksm_del_vma(struct vm_area_struct *vma) +{ + int err; + + if (!(vma->vm_flags & VM_MERGEABLE)) + return 0; + + if (vma->anon_vma) { + err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); + if (err) + return err; + } + + vm_flags_clear(vma, VM_MERGEABLE); + return 0; +} /** * ksm_add_vma - Mark vma as mergeable if compatible * @@ -2542,6 +2558,20 @@ static void ksm_add_vmas(struct mm_struct *mm) __ksm_add_vma(vma); } +static int ksm_del_vmas(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + int err; + + VMA_ITERATOR(vmi, mm, 0); + for_each_vma(vmi, vma) { + err = __ksm_del_vma(vma); + if (err) + return err; + } + return 0; +} + /** * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all * compatible VMA's @@ -2569,6 +2599,46 @@ int ksm_enable_merge_any(struct mm_struct *mm) return 0; } +/** + * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, + * previously enabled via ksm_enable_merge_any(). + * + * Disabling merging implies unmerging any merged pages, like setting + * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and + * merging on all compatible VMA's remains enabled. + * + * @mm: Pointer to mm + * + * Returns 0 on success, otherwise error code + */ +int ksm_disable_merge_any(struct mm_struct *mm) +{ + int err; + + if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return 0; + + err = ksm_del_vmas(mm); + if (err) { + ksm_add_vmas(mm); + return err; + } + + clear_bit(MMF_VM_MERGE_ANY, &mm->flags); + return 0; +} + +int ksm_disable(struct mm_struct *mm) +{ + mmap_assert_write_locked(mm); + + if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) + return 0; + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return ksm_disable_merge_any(mm); + return ksm_del_vmas(mm); +} + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9de2a18519a1..47421bedc12b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1502,6 +1502,15 @@ void __free_pages_core(struct page *page, unsigned int order) * interleaving within a single pageblock. It is therefore sufficient to check * the first and last page of a pageblock and avoid checking each individual * page in a pageblock. + * + * Note: the function may return non-NULL struct page even for a page block + * which contains a memory hole (i.e. there is no physical memory for a subset + * of the pfn range). For example, if the pageblock order is MAX_ORDER, which + * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole + * even though the start pfn is online and valid. This should be safe most of + * the time because struct pages are still initialized via init_unavailable_range() + * and pfn walkers shouldn't touch any physical memory range for which they do + * not recognize any specific metadata in struct pages. */ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) |