x86: fix cpu_mask_to_apicid_and to include cpu_online_mask

Impact: fix potential APIC crash

In determining the destination apicid, there are usually three cpumasks
that are considered: the incoming cpumask arg, cfg->domain and the
cpu_online_mask.  Since we are just introducing the cpu_mask_to_apicid_and
function, make sure it includes the cpu_online_mask in it's evaluation.
[Added with this patch.]

There are two io_apic.c functions that did not previously use the
cpu_online_mask:  setup_IO_APIC_irq and msi_compose_msg.  Both of these
simply used cpu_mask_to_apicid(cfg->domain & TARGET_CPUS), and all but
one arch (NUMAQ[*]) returns only online cpus in the TARGET_CPUS mask,
so the behavior is identical for all cases.

[*: NUMAQ bug?]

Note that alloc_cpumask_var is only used for the 32-bit cases where
it's highly likely that the cpumask set size will be small and therefore
CPUMASK_OFFSTACK=n.  But if that's not the case, failing the allocate
will cause the same return value as the default.

Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Mike Travis 2008-12-17 15:21:39 -08:00 committed by Ingo Molnar
parent 9a3d8f735e
commit a775a38b13
8 changed files with 52 additions and 41 deletions

View file

@ -138,7 +138,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
for_each_cpu_and(cpu, cpumask, andmask)
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
if (cpu < nr_cpu_ids)
return cpu_to_logical_apicid(cpu);

View file

@ -214,51 +214,47 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return apicid;
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask)
{
int num_bits_set;
int num_bits_set2;
int cpus_found = 0;
int cpu;
int apicid = 0;
int apicid = cpu_to_logical_apicid(0);
cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
return apicid;
cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask);
num_bits_set = cpumask_weight(cpumask);
num_bits_set2 = cpumask_weight(andmask);
num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
return cpu_to_logical_apicid(0);
#endif
if (num_bits_set == NR_CPUS)
goto exit;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first_and(cpumask, andmask);
cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask) &&
cpumask_test_cpu(cpu, andmask)) {
if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) {
printk(KERN_WARNING
"%s: Not a valid mask!\n", __func__);
#if defined CONFIG_ES7000_CLUSTERED_APIC
return 0xFF;
#else
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
return cpu_to_logical_apicid(0);
#endif
}
apicid = new_apicid;
cpus_found++;
}
cpu++;
}
exit:
free_cpumask_var(cpumask);
return apicid;
}

View file

@ -72,8 +72,9 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
{
unsigned long mask1 = cpumask_bits(cpumask)[0];
unsigned long mask2 = cpumask_bits(andmask)[0];
unsigned long mask3 = cpumask_bits(cpu_online_mask)[0];
return (unsigned int)(mask1 & mask2);
return (unsigned int)(mask1 & mask2 & mask3);
}
static inline u32 phys_pkg_id(u32 cpuid_apic, int index_msb)

View file

@ -170,35 +170,37 @@ static inline unsigned int cpu_mask_to_apicid(const cpumask_t *cpumask)
return apicid;
}
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *inmask,
const struct cpumask *andmask)
{
int num_bits_set;
int num_bits_set2;
int cpus_found = 0;
int cpu;
int apicid = 0;
int apicid = 0xFF;
cpumask_var_t cpumask;
if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
return (int) 0xFF;
cpumask_and(cpumask, inmask, andmask);
cpumask_and(cpumask, cpumask, cpu_online_mask);
num_bits_set = cpumask_weight(cpumask);
num_bits_set2 = cpumask_weight(andmask);
num_bits_set = min(num_bits_set, num_bits_set2);
/* Return id to all */
if (num_bits_set >= nr_cpu_ids)
return 0xFF;
if (num_bits_set == nr_cpu_ids)
goto exit;
/*
* The cpus in the mask must all be on the apic cluster. If are not
* on the same apicid cluster return default value of TARGET_CPUS.
*/
cpu = cpumask_first_and(cpumask, andmask);
cpu = cpumask_first(cpumask);
apicid = cpu_to_logical_apicid(cpu);
while (cpus_found < num_bits_set) {
if (cpumask_test_cpu(cpu, cpumask)
&& cpumask_test_cpu(cpu, andmask)) {
if (cpumask_test_cpu(cpu, cpumask)) {
int new_apicid = cpu_to_logical_apicid(cpu);
if (apicid_cluster(apicid) !=
apicid_cluster(new_apicid)) {
printk(KERN_WARNING
"%s: Not a valid mask!\n", __func__);
apicid_cluster(new_apicid)){
printk ("%s: Not a valid mask!\n", __func__);
return 0xFF;
}
apicid = apicid | new_apicid;
@ -206,6 +208,8 @@ static inline unsigned int cpu_mask_to_apicid_and(const struct cpumask *cpumask,
}
cpu++;
}
exit:
free_cpumask_var(cpumask);
return apicid;
}

View file

@ -276,7 +276,9 @@ physflat_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
for_each_cpu_and(cpu, cpumask, andmask)
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;

View file

@ -133,7 +133,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
for_each_cpu_and(cpu, cpumask, andmask)
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;

View file

@ -132,7 +132,9 @@ static unsigned int x2apic_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
for_each_cpu_and(cpu, cpumask, andmask)
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;

View file

@ -188,7 +188,9 @@ static unsigned int uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
cpu = cpumask_any_and(cpumask, andmask);
for_each_cpu_and(cpu, cpumask, andmask)
if (cpumask_test_cpu(cpu, cpu_online_mask))
break;
if (cpu < nr_cpu_ids)
return per_cpu(x86_cpu_to_apicid, cpu);
return BAD_APICID;