diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2012-07-31 16:42:21 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-07-31 18:42:41 -0700 |
commit | da1def55919f4852c4759249a78d63a0c5d2d8f9 (patch) | |
tree | 9397a43deef85cf9c7f94671593454d77605d2b2 /mm | |
parent | 6d76dcf40405144a448040a350fd214ddc243d5e (diff) |
hugetlb/cgroup: add support for cgroup removal
Add support for cgroup removal. If we don't have parent cgroup, the
charges are moved to root cgroup.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb_cgroup.c | 70 |
1 files changed, 68 insertions, 2 deletions
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 63e04cfa437d..bc518bedea98 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -105,10 +105,76 @@ static void hugetlb_cgroup_destroy(struct cgroup *cgroup) kfree(h_cgroup); } + +/* + * Should be called with hugetlb_lock held. + * Since we are holding hugetlb_lock, pages cannot get moved from + * active list or uncharged from the cgroup, So no need to get + * page reference and test for page active here. This function + * cannot fail. + */ +static void hugetlb_cgroup_move_parent(int idx, struct cgroup *cgroup, + struct page *page) +{ + int csize; + struct res_counter *counter; + struct res_counter *fail_res; + struct hugetlb_cgroup *page_hcg; + struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_cgroup(cgroup); + struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(cgroup); + + page_hcg = hugetlb_cgroup_from_page(page); + /* + * We can have pages in active list without any cgroup + * ie, hugepage with less than 3 pages. We can safely + * ignore those pages. + */ + if (!page_hcg || page_hcg != h_cg) + goto out; + + csize = PAGE_SIZE << compound_order(page); + if (!parent) { + parent = root_h_cgroup; + /* root has no limit */ + res_counter_charge_nofail(&parent->hugepage[idx], + csize, &fail_res); + } + counter = &h_cg->hugepage[idx]; + res_counter_uncharge_until(counter, counter->parent, csize); + + set_hugetlb_cgroup(page, parent); +out: + return; +} + +/* + * Force the hugetlb cgroup to empty the hugetlb resources by moving them to + * the parent cgroup. + */ static int hugetlb_cgroup_pre_destroy(struct cgroup *cgroup) { - /* We will add the cgroup removal support in later patches */ - return -EBUSY; + struct hstate *h; + struct page *page; + int ret = 0, idx = 0; + + do { + if (cgroup_task_count(cgroup) || + !list_empty(&cgroup->children)) { + ret = -EBUSY; + goto out; + } + for_each_hstate(h) { + spin_lock(&hugetlb_lock); + list_for_each_entry(page, &h->hugepage_activelist, lru) + hugetlb_cgroup_move_parent(idx, cgroup, page); + + spin_unlock(&hugetlb_lock); + idx++; + } + cond_resched(); + } while (hugetlb_cgroup_have_usage(cgroup)); +out: + return ret; } int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, |