diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-03-15 14:57:22 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 16:55:16 -0700 |
commit | 62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch) | |
tree | 43a902faf461c65393a4efebf9ff9622017b92b1 /fs/xfs | |
parent | 6a93ca8fde3cfce0f00f02281139a377c83e8d8c (diff) |
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore,
it's safe to make lock_page_memcg() and the memcg stat functions take
pages, and spare the callers from memcg objects.
[akpm@linux-foundation.org: fix warnings]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_aops.c | 7 |
1 files changed, 3 insertions, 4 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 5f85ebc52a98..5c57b7b40728 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1957,7 +1957,6 @@ xfs_vm_set_page_dirty( loff_t end_offset; loff_t offset; int newly_dirty; - struct mem_cgroup *memcg; if (unlikely(!mapping)) return !TestSetPageDirty(page); @@ -1981,7 +1980,7 @@ xfs_vm_set_page_dirty( * Lock out page->mem_cgroup migration to keep PageDirty * synchronized with per-memcg dirty page counters. */ - memcg = lock_page_memcg(page); + lock_page_memcg(page); newly_dirty = !TestSetPageDirty(page); spin_unlock(&mapping->private_lock); @@ -1992,13 +1991,13 @@ xfs_vm_set_page_dirty( spin_lock_irqsave(&mapping->tree_lock, flags); if (page->mapping) { /* Race with truncate? */ WARN_ON_ONCE(!PageUptodate(page)); - account_page_dirtied(page, mapping, memcg); + account_page_dirtied(page, mapping); radix_tree_tag_set(&mapping->page_tree, page_index(page), PAGECACHE_TAG_DIRTY); } spin_unlock_irqrestore(&mapping->tree_lock, flags); } - unlock_page_memcg(memcg); + unlock_page_memcg(page); if (newly_dirty) __mark_inode_dirty(mapping->host, I_DIRTY_PAGES); return newly_dirty; |