diff options
author | SeongJae Park <sj@kernel.org> | 2023-01-09 21:33:33 +0000 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-01-18 17:12:58 -0800 |
commit | af40e35a992ff5691166badd52a4fa2f940c2cea (patch) | |
tree | de95b915c9fc6785ae1511e3e0c29716419dfbba | |
parent | 7477d7560cb2c756d6a8ab165d9ed52537df54e7 (diff) |
mm/damon/paddr: rename 'damon_pa_access_chk_result->page_sz' to 'folio_sz'
DAMON's physical address space monitoring operations set is using folio
now. Rename 'damon_pa_access_chk_result->page_sz' to reflect the fact.
Link: https://lkml.kernel.org/r/20230109213335.62525-5-sj@kernel.org
Signed-off-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/damon/paddr.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 99d4c357ef2b..65c1e0f91535 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -80,7 +80,8 @@ static void damon_pa_prepare_access_checks(struct damon_ctx *ctx) } struct damon_pa_access_chk_result { - unsigned long page_sz; + /* size of the folio for the access checked physical memory address */ + unsigned long folio_sz; bool accessed; }; @@ -91,7 +92,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); result->accessed = false; - result->page_sz = PAGE_SIZE; + result->folio_sz = PAGE_SIZE; while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { @@ -103,7 +104,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, result->accessed = pmd_young(*pvmw.pmd) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); - result->page_sz = HPAGE_PMD_SIZE; + result->folio_sz = HPAGE_PMD_SIZE; #else WARN_ON_ONCE(1); #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -118,11 +119,11 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, return !result->accessed; } -static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) +static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) { struct folio *folio = damon_get_folio(PHYS_PFN(paddr)); struct damon_pa_access_chk_result result = { - .page_sz = PAGE_SIZE, + .folio_sz = PAGE_SIZE, .accessed = false, }; struct rmap_walk_control rwc = { @@ -157,25 +158,25 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz) folio_put(folio); out: - *page_sz = result.page_sz; + *folio_sz = result.folio_sz; return result.accessed; } static void __damon_pa_check_access(struct damon_region *r) { static unsigned long last_addr; - static unsigned long last_page_sz = PAGE_SIZE; + static unsigned long last_folio_sz = PAGE_SIZE; static bool last_accessed; /* If the region is in the last checked page, reuse the result */ - if (ALIGN_DOWN(last_addr, last_page_sz) == - ALIGN_DOWN(r->sampling_addr, last_page_sz)) { + if (ALIGN_DOWN(last_addr, last_folio_sz) == + ALIGN_DOWN(r->sampling_addr, last_folio_sz)) { if (last_accessed) r->nr_accesses++; return; } - last_accessed = damon_pa_young(r->sampling_addr, &last_page_sz); + last_accessed = damon_pa_young(r->sampling_addr, &last_folio_sz); if (last_accessed) r->nr_accesses++; |