cgroups: use hierarchy_mutex in memory controller

Update the memory controller to use its hierarchy_mutex rather than
calling cgroup_lock() to protected against cgroup_mkdir()/cgroup_rmdir()
from occurring in its hierarchy.

Signed-off-by: Paul Menage <menage@google.com>
Tested-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Paul Menage 2009-01-07 18:08:37 -08:00 committed by Linus Torvalds
parent 999cd8a450
commit 2cb378c862

View file

@ -154,7 +154,7 @@ struct mem_cgroup {
/* /*
* While reclaiming in a hiearchy, we cache the last child we * While reclaiming in a hiearchy, we cache the last child we
* reclaimed from. Protected by cgroup_lock() * reclaimed from. Protected by hierarchy_mutex
*/ */
struct mem_cgroup *last_scanned_child; struct mem_cgroup *last_scanned_child;
/* /*
@ -615,7 +615,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
/* /*
* This routine finds the DFS walk successor. This routine should be * This routine finds the DFS walk successor. This routine should be
* called with cgroup_mutex held * called with hierarchy_mutex held
*/ */
static struct mem_cgroup * static struct mem_cgroup *
mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem) mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
@ -685,7 +685,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
/* /*
* Scan all children under the mem_cgroup mem * Scan all children under the mem_cgroup mem
*/ */
cgroup_lock(); mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
if (list_empty(&root_mem->css.cgroup->children)) { if (list_empty(&root_mem->css.cgroup->children)) {
ret = root_mem; ret = root_mem;
goto done; goto done;
@ -706,7 +706,7 @@ mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
done: done:
root_mem->last_scanned_child = ret; root_mem->last_scanned_child = ret;
cgroup_unlock(); mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
return ret; return ret;
} }
@ -770,18 +770,16 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
while (next_mem != root_mem) { while (next_mem != root_mem) {
if (mem_cgroup_is_obsolete(next_mem)) { if (mem_cgroup_is_obsolete(next_mem)) {
mem_cgroup_put(next_mem); mem_cgroup_put(next_mem);
cgroup_lock();
next_mem = mem_cgroup_get_first_node(root_mem); next_mem = mem_cgroup_get_first_node(root_mem);
cgroup_unlock();
continue; continue;
} }
ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap, ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
get_swappiness(next_mem)); get_swappiness(next_mem));
if (mem_cgroup_check_under_limit(root_mem)) if (mem_cgroup_check_under_limit(root_mem))
return 0; return 0;
cgroup_lock(); mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
next_mem = mem_cgroup_get_next_node(next_mem, root_mem); next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
cgroup_unlock(); mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
} }
return ret; return ret;
} }