summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Hildenbrand <david@redhat.com>2023-09-13 14:51:13 +0200
committerAndrew Morton <akpm@linux-foundation.org>2023-10-04 10:32:27 -0700
commit09c550508a4b8f7844b197cc16877dd0f7c42d8f (patch)
tree4f953ac989a8846855ac009a5a79d877104d3757
parent132b180f06a74ddfc526709928036db3b7a1cf6d (diff)
mm/rmap: pass folio to hugepage_add_anon_rmap()
Let's pass a folio; we are always mapping the entire thing. Link: https://lkml.kernel.org/r/20230913125113.313322-7-david@redhat.com Signed-off-by: David Hildenbrand <david@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: Muchun Song <muchun.song@linux.dev> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/rmap.h2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/rmap.c8
3 files changed, 5 insertions, 7 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 51cc21ebb568..d22f4d21a11c 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
void page_remove_rmap(struct page *, struct vm_area_struct *,
bool compound);
-void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
+void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
diff --git a/mm/migrate.c b/mm/migrate.c
index 2053b54556ca..eb6bc4053bc4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -249,7 +249,7 @@ static bool remove_migration_pte(struct folio *folio,
pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
if (folio_test_anon(folio))
- hugepage_add_anon_rmap(new, vma, pvmw.address,
+ hugepage_add_anon_rmap(folio, vma, pvmw.address,
rmap_flags);
else
page_dup_file_rmap(new, true);
diff --git a/mm/rmap.c b/mm/rmap.c
index ed4b602bcbd5..d24e2c36372e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2542,18 +2542,16 @@ void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc)
*
* RMAP_COMPOUND is ignored.
*/
-void hugepage_add_anon_rmap(struct page *page, struct vm_area_struct *vma,
+void hugepage_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
unsigned long address, rmap_t flags)
{
- struct folio *folio = page_folio(page);
-
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
atomic_inc(&folio->_entire_mapcount);
if (flags & RMAP_EXCLUSIVE)
- SetPageAnonExclusive(page);
+ SetPageAnonExclusive(&folio->page);
VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
- PageAnonExclusive(page), folio);
+ PageAnonExclusive(&folio->page), folio);
}
void hugepage_add_new_anon_rmap(struct folio *folio,