diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-07-26 15:25:26 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-07-26 16:19:19 -0700 |
commit | dd78fedde4b99b322f2dc849d467d365a82e23ca (patch) | |
tree | 5095c6e9fd2527a8636188059e35f7f45b9648a2 /mm/rmap.c | |
parent | 7267ec008b5cd8b3579e188b1ff238815643e372 (diff) |
rmap: support file thp
Naive approach: on mapping/unmapping the page as compound we update
->_mapcount on each 4k page. That's not efficient, but it's not obvious
how we can optimize this. We can look into optimization later.
PG_double_map optimization doesn't work for file pages since lifecycle
of file pages is different comparing to anon pages: file page can be
mapped again at any time.
Link: http://lkml.kernel.org/r/1466021202-61880-11-git-send-email-kirill.shutemov@linux.intel.com
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r-- | mm/rmap.c | 48 |
1 files changed, 35 insertions, 13 deletions
diff --git a/mm/rmap.c b/mm/rmap.c index 701b93fea2a0..2b336c4277da 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1270,18 +1270,34 @@ void page_add_new_anon_rmap(struct page *page, * * The caller needs to hold the pte lock. */ -void page_add_file_rmap(struct page *page) +void page_add_file_rmap(struct page *page, bool compound) { + int i, nr = 1; + + VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); lock_page_memcg(page); - if (atomic_inc_and_test(&page->_mapcount)) { - __inc_zone_page_state(page, NR_FILE_MAPPED); - mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); + if (compound && PageTransHuge(page)) { + for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + if (atomic_inc_and_test(&page[i]._mapcount)) + nr++; + } + if (!atomic_inc_and_test(compound_mapcount_ptr(page))) + goto out; + } else { + if (!atomic_inc_and_test(&page->_mapcount)) + goto out; } + __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, nr); + mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); +out: unlock_page_memcg(page); } -static void page_remove_file_rmap(struct page *page) +static void page_remove_file_rmap(struct page *page, bool compound) { + int i, nr = 1; + + VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); lock_page_memcg(page); /* Hugepages are not counted in NR_FILE_MAPPED for now. */ @@ -1292,15 +1308,24 @@ static void page_remove_file_rmap(struct page *page) } /* page still mapped by someone else? */ - if (!atomic_add_negative(-1, &page->_mapcount)) - goto out; + if (compound && PageTransHuge(page)) { + for (i = 0, nr = 0; i < HPAGE_PMD_NR; i++) { + if (atomic_add_negative(-1, &page[i]._mapcount)) + nr++; + } + if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) + goto out; + } else { + if (!atomic_add_negative(-1, &page->_mapcount)) + goto out; + } /* * We use the irq-unsafe __{inc|mod}_zone_page_stat because * these counters are not modified in interrupt context, and * pte lock(a spinlock) is held, which implies preemption disabled. */ - __dec_zone_page_state(page, NR_FILE_MAPPED); + __mod_zone_page_state(page_zone(page), NR_FILE_MAPPED, -nr); mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); if (unlikely(PageMlocked(page))) @@ -1356,11 +1381,8 @@ static void page_remove_anon_compound_rmap(struct page *page) */ void page_remove_rmap(struct page *page, bool compound) { - if (!PageAnon(page)) { - VM_BUG_ON_PAGE(compound && !PageHuge(page), page); - page_remove_file_rmap(page); - return; - } + if (!PageAnon(page)) + return page_remove_file_rmap(page, compound); if (compound) return page_remove_anon_compound_rmap(page); |