cpuset: fix unchecked calls to NODEMASK_ALLOC()

Those functions that use NODEMASK_ALLOC() can't propagate errno
to users, but will fail silently.

Fix it by using a static nodemask_t variable for each function, and
those variables are protected by cgroup_mutex;

[akpm@linux-foundation.org: fix comment spelling, strengthen cgroup_lock comment]
Signed-off-by: Li Zefan <lizf@cn.fujitsu.com>
Cc: Paul Menage <menage@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Miao Xie <miaox@cn.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Li Zefan 2011-03-23 16:42:47 -07:00 committed by Linus Torvalds
parent c8163ca8af
commit ee24d37977

View file

@ -1015,17 +1015,12 @@ static void cpuset_change_nodemask(struct task_struct *p,
struct cpuset *cs; struct cpuset *cs;
int migrate; int migrate;
const nodemask_t *oldmem = scan->data; const nodemask_t *oldmem = scan->data;
NODEMASK_ALLOC(nodemask_t, newmems, GFP_KERNEL); static nodemask_t newmems; /* protected by cgroup_mutex */
if (!newmems)
return;
cs = cgroup_cs(scan->cg); cs = cgroup_cs(scan->cg);
guarantee_online_mems(cs, newmems); guarantee_online_mems(cs, &newmems);
cpuset_change_task_nodemask(p, newmems); cpuset_change_task_nodemask(p, &newmems);
NODEMASK_FREE(newmems);
mm = get_task_mm(p); mm = get_task_mm(p);
if (!mm) if (!mm)
@ -1438,41 +1433,35 @@ static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont,
struct mm_struct *mm; struct mm_struct *mm;
struct cpuset *cs = cgroup_cs(cont); struct cpuset *cs = cgroup_cs(cont);
struct cpuset *oldcs = cgroup_cs(oldcont); struct cpuset *oldcs = cgroup_cs(oldcont);
NODEMASK_ALLOC(nodemask_t, to, GFP_KERNEL); static nodemask_t to; /* protected by cgroup_mutex */
if (to == NULL)
goto alloc_fail;
if (cs == &top_cpuset) { if (cs == &top_cpuset) {
cpumask_copy(cpus_attach, cpu_possible_mask); cpumask_copy(cpus_attach, cpu_possible_mask);
} else { } else {
guarantee_online_cpus(cs, cpus_attach); guarantee_online_cpus(cs, cpus_attach);
} }
guarantee_online_mems(cs, to); guarantee_online_mems(cs, &to);
/* do per-task migration stuff possibly for each in the threadgroup */ /* do per-task migration stuff possibly for each in the threadgroup */
cpuset_attach_task(tsk, to, cs); cpuset_attach_task(tsk, &to, cs);
if (threadgroup) { if (threadgroup) {
struct task_struct *c; struct task_struct *c;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
cpuset_attach_task(c, to, cs); cpuset_attach_task(c, &to, cs);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
/* change mm; only needs to be done once even if threadgroup */ /* change mm; only needs to be done once even if threadgroup */
*to = cs->mems_allowed; to = cs->mems_allowed;
mm = get_task_mm(tsk); mm = get_task_mm(tsk);
if (mm) { if (mm) {
mpol_rebind_mm(mm, to); mpol_rebind_mm(mm, &to);
if (is_memory_migrate(cs)) if (is_memory_migrate(cs))
cpuset_migrate_mm(mm, &oldcs->mems_allowed, to); cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to);
mmput(mm); mmput(mm);
} }
alloc_fail:
NODEMASK_FREE(to);
} }
/* The various types of files and directories in a cpuset file system */ /* The various types of files and directories in a cpuset file system */
@ -2055,10 +2044,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
struct cpuset *cp; /* scans cpusets being updated */ struct cpuset *cp; /* scans cpusets being updated */
struct cpuset *child; /* scans child cpusets of cp */ struct cpuset *child; /* scans child cpusets of cp */
struct cgroup *cont; struct cgroup *cont;
NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); static nodemask_t oldmems; /* protected by cgroup_mutex */
if (oldmems == NULL)
return;
list_add_tail((struct list_head *)&root->stack_list, &queue); list_add_tail((struct list_head *)&root->stack_list, &queue);
@ -2075,7 +2061,7 @@ static void scan_for_empty_cpusets(struct cpuset *root)
nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY])) nodes_subset(cp->mems_allowed, node_states[N_HIGH_MEMORY]))
continue; continue;
*oldmems = cp->mems_allowed; oldmems = cp->mems_allowed;
/* Remove offline cpus and mems from this cpuset. */ /* Remove offline cpus and mems from this cpuset. */
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
@ -2091,10 +2077,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
remove_tasks_in_empty_cpuset(cp); remove_tasks_in_empty_cpuset(cp);
else { else {
update_tasks_cpumask(cp, NULL); update_tasks_cpumask(cp, NULL);
update_tasks_nodemask(cp, oldmems, NULL); update_tasks_nodemask(cp, &oldmems, NULL);
} }
} }
NODEMASK_FREE(oldmems);
} }
/* /*
@ -2136,19 +2121,16 @@ void cpuset_update_active_cpus(void)
static int cpuset_track_online_nodes(struct notifier_block *self, static int cpuset_track_online_nodes(struct notifier_block *self,
unsigned long action, void *arg) unsigned long action, void *arg)
{ {
NODEMASK_ALLOC(nodemask_t, oldmems, GFP_KERNEL); static nodemask_t oldmems; /* protected by cgroup_mutex */
if (oldmems == NULL)
return NOTIFY_DONE;
cgroup_lock(); cgroup_lock();
switch (action) { switch (action) {
case MEM_ONLINE: case MEM_ONLINE:
*oldmems = top_cpuset.mems_allowed; oldmems = top_cpuset.mems_allowed;
mutex_lock(&callback_mutex); mutex_lock(&callback_mutex);
top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY];
mutex_unlock(&callback_mutex); mutex_unlock(&callback_mutex);
update_tasks_nodemask(&top_cpuset, oldmems, NULL); update_tasks_nodemask(&top_cpuset, &oldmems, NULL);
break; break;
case MEM_OFFLINE: case MEM_OFFLINE:
/* /*
@ -2162,7 +2144,6 @@ static int cpuset_track_online_nodes(struct notifier_block *self,
} }
cgroup_unlock(); cgroup_unlock();
NODEMASK_FREE(oldmems);
return NOTIFY_OK; return NOTIFY_OK;
} }
#endif #endif