diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig.debug | 10 | ||||
-rw-r--r-- | mm/damon/paddr.c | 26 | ||||
-rw-r--r-- | mm/kasan/hw_tags.c | 4 | ||||
-rw-r--r-- | mm/ksm.c | 70 | ||||
-rw-r--r-- | mm/mempolicy.c | 4 | ||||
-rw-r--r-- | mm/page_alloc.c | 9 |
6 files changed, 102 insertions, 21 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 6dae63b46368..a925415b4d10 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -274,6 +274,12 @@ config DEBUG_KMEMLEAK_AUTO_SCAN config PER_VMA_LOCK_STATS bool "Statistics for per-vma locks" depends on PER_VMA_LOCK - default y help - Statistics for per-vma locks. + Say Y here to enable success, retry and failure counters of page + faults handled under protection of per-vma locks. When enabled, the + counters are exposed in /proc/vmstat. This information is useful for + kernel developers to evaluate effectiveness of per-vma locks and to + identify pathological cases. Counting these events introduces a small + overhead in the page fault path. + + If in doubt, say N. diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index dd9c33fbe805..467b99166b43 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -134,10 +134,8 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) } need_lock = !folio_test_anon(folio) || folio_test_ksm(folio); - if (need_lock && !folio_trylock(folio)) { - folio_put(folio); - return false; - } + if (need_lock && !folio_trylock(folio)) + goto out; rmap_walk(folio, &rwc); @@ -238,21 +236,18 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) if (!folio) continue; - if (damos_pa_filter_out(s, folio)) { - folio_put(folio); - continue; - } + if (damos_pa_filter_out(s, folio)) + goto put_folio; folio_clear_referenced(folio); folio_test_clear_young(folio); - if (!folio_isolate_lru(folio)) { - folio_put(folio); - continue; - } + if (!folio_isolate_lru(folio)) + goto put_folio; if (folio_test_unevictable(folio)) folio_putback_lru(folio); else list_add(&folio->lru, &folio_list); +put_folio: folio_put(folio); } applied = reclaim_pages(&folio_list); @@ -271,16 +266,15 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (!folio) continue; - if (damos_pa_filter_out(s, folio)) { - folio_put(folio); - continue; - } + if (damos_pa_filter_out(s, folio)) + goto put_folio; if (mark_accessed) folio_mark_accessed(folio); else folio_deactivate(folio); applied += folio_nr_pages(folio); +put_folio: folio_put(folio); } return applied * PAGE_SIZE; diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c index f98b9f4d9d3e..06141bbc1e51 100644 --- a/mm/kasan/hw_tags.c +++ b/mm/kasan/hw_tags.c @@ -285,7 +285,7 @@ static void init_vmalloc_pages(const void *start, unsigned long size) const void *addr; for (addr = start; addr < start + size; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); + struct page *page = vmalloc_to_page(addr); clear_highpage_kasan_tagged(page); } @@ -297,7 +297,7 @@ void *__kasan_unpoison_vmalloc(const void *start, unsigned long size, u8 tag; unsigned long redzone_start, redzone_size; - if (!kasan_vmalloc_enabled() || !is_vmalloc_or_module_addr(start)) { + if (!kasan_vmalloc_enabled()) { if (flags & KASAN_VMALLOC_INIT) init_vmalloc_pages(start, size); return (void *)start; @@ -2520,6 +2520,22 @@ static void __ksm_add_vma(struct vm_area_struct *vma) vm_flags_set(vma, VM_MERGEABLE); } +static int __ksm_del_vma(struct vm_area_struct *vma) +{ + int err; + + if (!(vma->vm_flags & VM_MERGEABLE)) + return 0; + + if (vma->anon_vma) { + err = unmerge_ksm_pages(vma, vma->vm_start, vma->vm_end); + if (err) + return err; + } + + vm_flags_clear(vma, VM_MERGEABLE); + return 0; +} /** * ksm_add_vma - Mark vma as mergeable if compatible * @@ -2542,6 +2558,20 @@ static void ksm_add_vmas(struct mm_struct *mm) __ksm_add_vma(vma); } +static int ksm_del_vmas(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + int err; + + VMA_ITERATOR(vmi, mm, 0); + for_each_vma(vmi, vma) { + err = __ksm_del_vma(vma); + if (err) + return err; + } + return 0; +} + /** * ksm_enable_merge_any - Add mm to mm ksm list and enable merging on all * compatible VMA's @@ -2569,6 +2599,46 @@ int ksm_enable_merge_any(struct mm_struct *mm) return 0; } +/** + * ksm_disable_merge_any - Disable merging on all compatible VMA's of the mm, + * previously enabled via ksm_enable_merge_any(). + * + * Disabling merging implies unmerging any merged pages, like setting + * MADV_UNMERGEABLE would. If unmerging fails, the whole operation fails and + * merging on all compatible VMA's remains enabled. + * + * @mm: Pointer to mm + * + * Returns 0 on success, otherwise error code + */ +int ksm_disable_merge_any(struct mm_struct *mm) +{ + int err; + + if (!test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return 0; + + err = ksm_del_vmas(mm); + if (err) { + ksm_add_vmas(mm); + return err; + } + + clear_bit(MMF_VM_MERGE_ANY, &mm->flags); + return 0; +} + +int ksm_disable(struct mm_struct *mm) +{ + mmap_assert_write_locked(mm); + + if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) + return 0; + if (test_bit(MMF_VM_MERGE_ANY, &mm->flags)) + return ksm_disable_merge_any(mm); + return ksm_del_vmas(mm); +} + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, unsigned long end, int advice, unsigned long *vm_flags) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 2068b594dc88..1756389a0609 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -808,8 +808,10 @@ static int mbind_range(struct vma_iterator *vmi, struct vm_area_struct *vma, vmstart = vma->vm_start; } - if (mpol_equal(vma_policy(vma), new_pol)) + if (mpol_equal(vma_policy(vma), new_pol)) { + *prev = vma; return 0; + } pgoff = vma->vm_pgoff + ((vmstart - vma->vm_start) >> PAGE_SHIFT); merged = vma_merge(vmi, vma->vm_mm, *prev, vmstart, vmend, vma->vm_flags, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9de2a18519a1..47421bedc12b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1502,6 +1502,15 @@ void __free_pages_core(struct page *page, unsigned int order) * interleaving within a single pageblock. It is therefore sufficient to check * the first and last page of a pageblock and avoid checking each individual * page in a pageblock. + * + * Note: the function may return non-NULL struct page even for a page block + * which contains a memory hole (i.e. there is no physical memory for a subset + * of the pfn range). For example, if the pageblock order is MAX_ORDER, which + * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole + * even though the start pfn is online and valid. This should be safe most of + * the time because struct pages are still initialized via init_unavailable_range() + * and pfn walkers shouldn't touch any physical memory range for which they do + * not recognize any specific metadata in struct pages. */ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, unsigned long end_pfn, struct zone *zone) |