sched: Fix select_task_rq() vs hotplug issues

Since select_task_rq() is now responsible for guaranteeing
->cpus_allowed and cpu_active_mask, we need to verify this.

select_task_rq_rt() can blindly return
smp_processor_id()/task_cpu() without checking the valid masks,
select_task_rq_fair() can do the same in the rare case that all
SD_flags are disabled.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
LKML-Reference: <20091216170517.961475466@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-12-16 18:04:38 +01:00 committed by Ingo Molnar
parent 3802290628
commit 5da9a0fb67

View file

@ -2317,6 +2317,43 @@ void task_oncpu_function_call(struct task_struct *p,
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static int select_fallback_rq(int cpu, struct task_struct *p)
{
int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
/* Look for allowed, online CPU in same node. */
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
return dest_cpu;
/* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
if (dest_cpu < nr_cpu_ids)
return dest_cpu;
/* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) {
rcu_read_lock();
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
rcu_read_unlock();
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no "
"longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
}
return dest_cpu;
}
/* /*
* Called from: * Called from:
* *
@ -2343,14 +2380,8 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
* not worry about this generic constraint ] * not worry about this generic constraint ]
*/ */
if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
!cpu_active(cpu))) { !cpu_active(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
/*
* XXX: race against hot-plug modifying cpu_active_mask
*/
BUG_ON(cpu >= nr_cpu_ids);
}
return cpu; return cpu;
} }
@ -7319,36 +7350,10 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
{ {
int dest_cpu; int dest_cpu;
const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu));
again: again:
/* Look for allowed, online CPU in same node. */ dest_cpu = select_fallback_rq(dead_cpu, p);
for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
goto move;
/* Any allowed, online CPU? */
dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
if (dest_cpu < nr_cpu_ids)
goto move;
/* No more Mr. Nice Guy. */
if (dest_cpu >= nr_cpu_ids) {
cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
/*
* Don't tell them about moving exiting tasks or
* kernel threads (both mm NULL), since they never
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
pr_info("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, dead_cpu);
}
}
move:
/* It can have affinity changed while we were choosing. */ /* It can have affinity changed while we were choosing. */
if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu))) if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
goto again; goto again;