summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorMatthew Wilcox (Oracle) <willy@infradead.org>2021-02-28 16:02:57 -0500
committerMatthew Wilcox (Oracle) <willy@infradead.org>2021-10-18 07:49:39 -0400
commitd9c08e2232fbca4fa56b9350f7f84835c4aa3add (patch)
tree0d02fe92c7891f69cbc724732f2e1e43b3d64cf4 /mm
parent76580b6529db622964b4ffee59ded25efcb73748 (diff)
mm/rmap: Add folio_mkclean()
Transform page_mkclean() into folio_mkclean() and add a page_mkclean() wrapper around folio_mkclean(). folio_mkclean is 15 bytes smaller than page_mkclean, but the kernel is enlarged by 33 bytes due to inlining page_folio() into each caller. This will go away once the callers are converted to use folio_mkclean(). Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: David Howells <dhowells@redhat.com> Acked-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/rmap.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 059556dbefec..3a1059c284c3 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -981,7 +981,7 @@ static bool invalid_mkclean_vma(struct vm_area_struct *vma, void *arg)
return true;
}
-int page_mkclean(struct page *page)
+int folio_mkclean(struct folio *folio)
{
int cleaned = 0;
struct address_space *mapping;
@@ -991,20 +991,20 @@ int page_mkclean(struct page *page)
.invalid_vma = invalid_mkclean_vma,
};
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
- if (!page_mapped(page))
+ if (!folio_mapped(folio))
return 0;
- mapping = page_mapping(page);
+ mapping = folio_mapping(folio);
if (!mapping)
return 0;
- rmap_walk(page, &rwc);
+ rmap_walk(&folio->page, &rwc);
return cleaned;
}
-EXPORT_SYMBOL_GPL(page_mkclean);
+EXPORT_SYMBOL_GPL(folio_mkclean);
/**
* page_move_anon_rmap - move a page to our anon_vma