diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2020-12-14 19:06:20 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-15 12:13:39 -0800 |
commit | b8eddff8886b173b0a0f21a3bb1a594cc6d974d1 (patch) | |
tree | d51af0968a8043adccb64e3811d13599635e7925 | |
parent | f38d58b7343882f5412a5e5719d9b302f305f2d1 (diff) |
mm: memcontrol: add file_thp, shmem_thp to memory.stat
As huge page usage in the page cache and for shmem files proliferates in
our production environment, the performance monitoring team has asked for
per-cgroup stats on those pages.
We already track and export anon_thp per cgroup. We already track file
THP and shmem THP per node, so making them per-cgroup is only a matter of
switching from node to lruvec counters. All callsites are in places where
the pages are charged and locked, so page->memcg is stable.
[hannes@cmpxchg.org: add documentation]
Link: https://lkml.kernel.org/r/20201026174029.GC548555@cmpxchg.org
Link: https://lkml.kernel.org/r/20201022151844.489337-1-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Rik van Riel <riel@surriel.com>
Reviewed-by: Shakeel Butt <shakeelb@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | Documentation/admin-guide/cgroup-v2.rst | 8 | ||||
-rw-r--r-- | mm/filemap.c | 4 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/khugepaged.c | 4 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/shmem.c | 2 |
6 files changed, 20 insertions, 8 deletions
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 608d7c279396..515bb13084a0 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1300,6 +1300,14 @@ PAGE_SIZE multiple when read back. Amount of memory used in anonymous mappings backed by transparent hugepages + file_thp + Amount of cached filesystem data backed by transparent + hugepages + + shmem_thp + Amount of shm, tmpfs, shared anonymous mmap()s backed by + transparent hugepages + inactive_anon, active_anon, inactive_file, active_file, unevictable Amount of memory, swap-backed and filesystem-backed, on the internal memory management lists used by the diff --git a/mm/filemap.c b/mm/filemap.c index 343ba8571ff9..39bb88140680 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -204,9 +204,9 @@ static void unaccount_page_cache_page(struct address_space *mapping, if (PageSwapBacked(page)) { __mod_lruvec_page_state(page, NR_SHMEM, -nr); if (PageTransHuge(page)) - __dec_node_page_state(page, NR_SHMEM_THPS); + __dec_lruvec_page_state(page, NR_SHMEM_THPS); } else if (PageTransHuge(page)) { - __dec_node_page_state(page, NR_FILE_THPS); + __dec_lruvec_page_state(page, NR_FILE_THPS); filemap_nr_thps_dec(mapping); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index ec2bb93f7431..42b18d461086 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2710,9 +2710,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) spin_unlock(&ds_queue->split_queue_lock); if (mapping) { if (PageSwapBacked(head)) - __dec_node_page_state(head, NR_SHMEM_THPS); + __dec_lruvec_page_state(head, NR_SHMEM_THPS); else - __dec_node_page_state(head, NR_FILE_THPS); + __dec_lruvec_page_state(head, NR_FILE_THPS); } __split_huge_page(page, list, end, flags); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 4e3dff13eb70..757292532767 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1845,9 +1845,9 @@ out_unlock: } if (is_shmem) - __inc_node_page_state(new_page, NR_SHMEM_THPS); + __inc_lruvec_page_state(new_page, NR_SHMEM_THPS); else { - __inc_node_page_state(new_page, NR_FILE_THPS); + __inc_lruvec_page_state(new_page, NR_FILE_THPS); filemap_nr_thps_inc(mapping); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 29459a6ce1c7..c3654510fb70 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1512,6 +1512,8 @@ static struct memory_stat memory_stats[] = { * constant(e.g. powerpc). */ { "anon_thp", 0, NR_ANON_THPS }, + { "file_thp", 0, NR_FILE_THPS }, + { "shmem_thp", 0, NR_SHMEM_THPS }, #endif { "inactive_anon", PAGE_SIZE, NR_INACTIVE_ANON }, { "active_anon", PAGE_SIZE, NR_ACTIVE_ANON }, @@ -1542,7 +1544,9 @@ static int __init memory_stats_init(void) for (i = 0; i < ARRAY_SIZE(memory_stats); i++) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (memory_stats[i].idx == NR_ANON_THPS) + if (memory_stats[i].idx == NR_ANON_THPS || + memory_stats[i].idx == NR_FILE_THPS || + memory_stats[i].idx == NR_SHMEM_THPS) memory_stats[i].ratio = HPAGE_PMD_SIZE; #endif VM_BUG_ON(!memory_stats[i].ratio); diff --git a/mm/shmem.c b/mm/shmem.c index b7361fce50bc..67ff829e2f0e 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -713,7 +713,7 @@ next: } if (PageTransHuge(page)) { count_vm_event(THP_FILE_ALLOC); - __inc_node_page_state(page, NR_SHMEM_THPS); + __inc_lruvec_page_state(page, NR_SHMEM_THPS); } mapping->nrpages += nr; __mod_lruvec_page_state(page, NR_FILE_PAGES, nr); |