diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2024-03-28 22:58:29 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2024-04-25 20:56:31 -0700 |
commit | e0abfbb67142448d57d7841b749d35981a0b92c7 (patch) | |
tree | 69f6d6f8fee17debc59114adbb49081bf7a834ba | |
parent | 412ad5fbe9285fd8066d3b977db0cd7fb39f671d (diff) |
mm: rename vma_pgoff_address back to vma_address
With all callers converted, we can use the nice shorter name. Take this
opportunity to reorder the arguments to the logical order (larger object
first).
Link: https://lkml.kernel.org/r/20240328225831.1765286-4-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/internal.h | 9 | ||||
-rw-r--r-- | mm/memory-failure.c | 2 | ||||
-rw-r--r-- | mm/page_vma_mapped.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 12 |
4 files changed, 12 insertions, 13 deletions
diff --git a/mm/internal.h b/mm/internal.h index f4ef48d57b1c..d567381b12cc 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -805,17 +805,16 @@ void mlock_drain_remote(int cpu); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); /** - * vma_pgoff_address - Find the virtual address a page range is mapped at + * vma_address - Find the virtual address a page range is mapped at + * @vma: The vma which maps this object. * @pgoff: The page offset within its object. * @nr_pages: The number of pages to consider. - * @vma: The vma which maps this object. * * If any page in this range is mapped by this VMA, return the first address * where any of these pages appear. Otherwise, return -EFAULT. */ -static inline unsigned long -vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages, - struct vm_area_struct *vma) +static inline unsigned long vma_address(struct vm_area_struct *vma, + pgoff_t pgoff, unsigned long nr_pages) { unsigned long address; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 9e50586f2e37..0d863e9216af 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -455,7 +455,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p, tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma); if (is_zone_device_page(p)) { if (fsdax_pgoff != FSDAX_INVALID_PGOFF) - tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma); + tk->addr = vma_address(vma, fsdax_pgoff, 1); tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr); } else tk->size_shift = page_shift(compound_head(p)); diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index ac48d6284bad..53b8868ede61 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -334,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma) .flags = PVMW_SYNC, }; - pvmw.address = vma_pgoff_address(pgoff, 1, vma); + pvmw.address = vma_address(vma, pgoff, 1); if (pvmw.address == -EFAULT) return 0; if (!page_vma_mapped_walk(&pvmw)) diff --git a/mm/rmap.c b/mm/rmap.c index 4b08b1a06688..56b313aa2ebf 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -794,7 +794,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) /* The !page__anon_vma above handles KSM folios */ pgoff = folio->index + folio_page_idx(folio, page); - return vma_pgoff_address(pgoff, 1, vma); + return vma_address(vma, pgoff, 1); } /* @@ -1132,7 +1132,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, if (invalid_mkclean_vma(vma, NULL)) return 0; - pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma); + pvmw.address = vma_address(vma, pgoff, nr_pages); VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma); return page_vma_mkclean_one(&pvmw); @@ -2592,8 +2592,8 @@ static void rmap_walk_anon(struct folio *folio, anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff_start, pgoff_end) { struct vm_area_struct *vma = avc->vma; - unsigned long address = vma_pgoff_address(pgoff_start, - folio_nr_pages(folio), vma); + unsigned long address = vma_address(vma, pgoff_start, + folio_nr_pages(folio)); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); @@ -2654,8 +2654,8 @@ static void rmap_walk_file(struct folio *folio, lookup: vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff_start, pgoff_end) { - unsigned long address = vma_pgoff_address(pgoff_start, - folio_nr_pages(folio), vma); + unsigned long address = vma_address(vma, pgoff_start, + folio_nr_pages(folio)); VM_BUG_ON_VMA(address == -EFAULT, vma); cond_resched(); |