[PATCH] for_each_possible_cpu: fixes for generic part
replaces for_each_cpu with for_each_possible_cpu(). Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
631d6747e1
commit
0a94502277
|
@ -3514,7 +3514,7 @@ int __init blk_dev_init(void)
|
||||||
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
iocontext_cachep = kmem_cache_create("blkdev_ioc",
|
||||||
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
|
sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
|
||||||
|
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
|
INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
|
||||||
|
|
||||||
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
|
open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
|
||||||
|
|
|
@ -373,6 +373,6 @@ static void __devinit fdtable_defer_list_init(int cpu)
|
||||||
void __init files_defer_init(void)
|
void __init files_defer_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
fdtable_defer_list_init(i);
|
fdtable_defer_list_init(i);
|
||||||
}
|
}
|
||||||
|
|
|
@ -534,7 +534,7 @@ static int show_stat(struct seq_file *p, void *v)
|
||||||
if (wall_to_monotonic.tv_nsec)
|
if (wall_to_monotonic.tv_nsec)
|
||||||
--jif;
|
--jif;
|
||||||
|
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
|
user = cputime64_add(user, kstat_cpu(i).cpustat.user);
|
||||||
|
|
|
@ -19,7 +19,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS];
|
||||||
#define percpu_modcopy(pcpudst, src, size) \
|
#define percpu_modcopy(pcpudst, src, size) \
|
||||||
do { \
|
do { \
|
||||||
unsigned int __i; \
|
unsigned int __i; \
|
||||||
for_each_cpu(__i) \
|
for_each_possible_cpu(__i) \
|
||||||
memcpy((pcpudst)+__per_cpu_offset[__i], \
|
memcpy((pcpudst)+__per_cpu_offset[__i], \
|
||||||
(src), (size)); \
|
(src), (size)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
|
@ -152,14 +152,14 @@ struct disk_attribute {
|
||||||
({ \
|
({ \
|
||||||
typeof(gendiskp->dkstats->field) res = 0; \
|
typeof(gendiskp->dkstats->field) res = 0; \
|
||||||
int i; \
|
int i; \
|
||||||
for_each_cpu(i) \
|
for_each_possible_cpu(i) \
|
||||||
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
|
res += per_cpu_ptr(gendiskp->dkstats, i)->field; \
|
||||||
res; \
|
res; \
|
||||||
})
|
})
|
||||||
|
|
||||||
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
|
static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) {
|
||||||
int i;
|
int i;
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
memset(per_cpu_ptr(gendiskp->dkstats, i), value,
|
||||||
sizeof (struct disk_stats));
|
sizeof (struct disk_stats));
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,7 +46,7 @@ static inline int kstat_irqs(int irq)
|
||||||
{
|
{
|
||||||
int cpu, sum = 0;
|
int cpu, sum = 0;
|
||||||
|
|
||||||
for_each_cpu(cpu)
|
for_each_possible_cpu(cpu)
|
||||||
sum += kstat_cpu(cpu).irqs[irq];
|
sum += kstat_cpu(cpu).irqs[irq];
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
|
|
|
@ -341,7 +341,7 @@ static void __init setup_per_cpu_areas(void)
|
||||||
#endif
|
#endif
|
||||||
ptr = alloc_bootmem(size * nr_possible_cpus);
|
ptr = alloc_bootmem(size * nr_possible_cpus);
|
||||||
|
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
__per_cpu_offset[i] = ptr - __per_cpu_start;
|
__per_cpu_offset[i] = ptr - __per_cpu_start;
|
||||||
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
|
||||||
ptr += size;
|
ptr += size;
|
||||||
|
|
|
@ -301,7 +301,7 @@ rcu_torture_printk(char *page)
|
||||||
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
||||||
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
|
||||||
|
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
||||||
pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
|
pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
|
||||||
batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
|
batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
|
||||||
|
@ -535,7 +535,7 @@ rcu_torture_init(void)
|
||||||
atomic_set(&n_rcu_torture_error, 0);
|
atomic_set(&n_rcu_torture_error, 0);
|
||||||
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
|
||||||
atomic_set(&rcu_torture_wcount[i], 0);
|
atomic_set(&rcu_torture_wcount[i], 0);
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
|
||||||
per_cpu(rcu_torture_count, cpu)[i] = 0;
|
per_cpu(rcu_torture_count, cpu)[i] = 0;
|
||||||
per_cpu(rcu_torture_batch, cpu)[i] = 0;
|
per_cpu(rcu_torture_batch, cpu)[i] = 0;
|
||||||
|
|
|
@ -1625,7 +1625,7 @@ unsigned long nr_uninterruptible(void)
|
||||||
{
|
{
|
||||||
unsigned long i, sum = 0;
|
unsigned long i, sum = 0;
|
||||||
|
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
sum += cpu_rq(i)->nr_uninterruptible;
|
sum += cpu_rq(i)->nr_uninterruptible;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1642,7 +1642,7 @@ unsigned long long nr_context_switches(void)
|
||||||
{
|
{
|
||||||
unsigned long long i, sum = 0;
|
unsigned long long i, sum = 0;
|
||||||
|
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
sum += cpu_rq(i)->nr_switches;
|
sum += cpu_rq(i)->nr_switches;
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
|
@ -1652,7 +1652,7 @@ unsigned long nr_iowait(void)
|
||||||
{
|
{
|
||||||
unsigned long i, sum = 0;
|
unsigned long i, sum = 0;
|
||||||
|
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
sum += atomic_read(&cpu_rq(i)->nr_iowait);
|
sum += atomic_read(&cpu_rq(i)->nr_iowait);
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
|
@ -6080,7 +6080,7 @@ void __init sched_init(void)
|
||||||
runqueue_t *rq;
|
runqueue_t *rq;
|
||||||
int i, j, k;
|
int i, j, k;
|
||||||
|
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
prio_array_t *array;
|
prio_array_t *array;
|
||||||
|
|
||||||
rq = cpu_rq(i);
|
rq = cpu_rq(i);
|
||||||
|
|
|
@ -3311,7 +3311,7 @@ void *__alloc_percpu(size_t size)
|
||||||
* and we have no way of figuring out how to fix the array
|
* and we have no way of figuring out how to fix the array
|
||||||
* that we have allocated then....
|
* that we have allocated then....
|
||||||
*/
|
*/
|
||||||
for_each_cpu(i) {
|
for_each_possible_cpu(i) {
|
||||||
int node = cpu_to_node(i);
|
int node = cpu_to_node(i);
|
||||||
|
|
||||||
if (node_online(node))
|
if (node_online(node))
|
||||||
|
@ -3398,7 +3398,7 @@ void free_percpu(const void *objp)
|
||||||
/*
|
/*
|
||||||
* We allocate for all cpus so we cannot use for online cpu here.
|
* We allocate for all cpus so we cannot use for online cpu here.
|
||||||
*/
|
*/
|
||||||
for_each_cpu(i)
|
for_each_possible_cpu(i)
|
||||||
kfree(p->ptrs[i]);
|
kfree(p->ptrs[i]);
|
||||||
kfree(p);
|
kfree(p);
|
||||||
}
|
}
|
||||||
|
|
|
@ -512,7 +512,7 @@ long percpu_counter_sum(struct percpu_counter *fbc)
|
||||||
|
|
||||||
spin_lock(&fbc->lock);
|
spin_lock(&fbc->lock);
|
||||||
ret = fbc->count;
|
ret = fbc->count;
|
||||||
for_each_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
long *pcount = per_cpu_ptr(fbc->counters, cpu);
|
long *pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||||
ret += *pcount;
|
ret += *pcount;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue