hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list.
A page's hugetlb cgroup assignment and movement to the active list should
occur with hugetlb_lock held. Otherwise when we remove the hugetlb cgroup
we will iterate the active list and find pages with NULL hugetlb cgroup
values.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c
index 680e481..9834a01 100644
--- a/mm/hugetlb_cgroup.c
+++ b/mm/hugetlb_cgroup.c
@@ -213,6 +213,7 @@
return ret;
}
+/* Should be called with hugetlb_lock held */
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page)
@@ -220,9 +221,7 @@
if (hugetlb_cgroup_disabled() || !h_cg)
return;
- spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, h_cg);
- spin_unlock(&hugetlb_lock);
return;
}
@@ -389,6 +388,7 @@
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
{
struct hugetlb_cgroup *h_cg;
+ struct hstate *h = page_hstate(oldhpage);
if (hugetlb_cgroup_disabled())
return;
@@ -401,6 +401,7 @@
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup(newhpage, h_cg);
+ list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
cgroup_release_and_wakeup_rmdir(&h_cg->css);
return;