From 97a6c37b34f46feed2544bd40891ee6dd0fd1554 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 23 Mar 2011 16:42:27 -0700 Subject: memcg: change page_cgroup_zoneinfo signature Instead of passing a whole struct page_cgroup to this function, let it take only what it really needs from it: the struct mem_cgroup and the page. This has the advantage that reading pc->mem_cgroup is now done at the same place where the ordering rules for this pointer are enforced and explained. It is also in preparation for removing the pc->page backpointer. Signed-off-by: Johannes Weiner Acked-by: KAMEZAWA Hiroyuki Cc: Daisuke Nishimura Cc: Balbir Singh Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) (limited to 'mm/memcontrol.c') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5f7b0e1d789c..2881c9ef969a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -364,11 +364,10 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem) } static struct mem_cgroup_per_zone * -page_cgroup_zoneinfo(struct page_cgroup *pc) +page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page) { - struct mem_cgroup *mem = pc->mem_cgroup; - int nid = page_cgroup_nid(pc); - int zid = page_cgroup_zid(pc); + int nid = page_to_nid(page); + int zid = page_zonenum(page); return mem_cgroup_zoneinfo(mem, nid, zid); } @@ -800,7 +799,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru) * We don't check PCG_USED bit. It's cleared when the "page" is finally * removed from global LRU. */ - mz = page_cgroup_zoneinfo(pc); + mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); /* huge page split is done under lru_lock. so, we have no races. */ MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); if (mem_cgroup_is_root(pc->mem_cgroup)) @@ -836,7 +835,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page) smp_rmb(); if (mem_cgroup_is_root(pc->mem_cgroup)) return; - mz = page_cgroup_zoneinfo(pc); + mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); list_move_tail(&pc->lru, &mz->lists[lru]); } @@ -856,7 +855,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) smp_rmb(); if (mem_cgroup_is_root(pc->mem_cgroup)) return; - mz = page_cgroup_zoneinfo(pc); + mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); list_move(&pc->lru, &mz->lists[lru]); } @@ -873,7 +872,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) return; /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); - mz = page_cgroup_zoneinfo(pc); + mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); /* huge page split is done under lru_lock. so, we have no races. */ MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); SetPageCgroupAcctLRU(pc); @@ -1043,7 +1042,7 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page) return NULL; /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ smp_rmb(); - mz = page_cgroup_zoneinfo(pc); + mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); if (!mz) return NULL; @@ -2192,7 +2191,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail) * We hold lru_lock, then, reduce counter directly. */ lru = page_lru(head); - mz = page_cgroup_zoneinfo(head_pc); + mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head); MEM_CGROUP_ZSTAT(mz, lru) -= 1; } tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; -- cgit v1.2.3-58-ga151