percpu_counter: percpu_counter_hotcpu_callback() cleanup
In commit ebd8fef304
("percpu_counter: make percpu_counters_lock
irq-safe") we disabled irqs in percpu_counter_hotcpu_callback()
We can grab every counter spinlock without having to disable
irqs again.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
44b4b461a0
commit
aaf0f2fa68
|
@ -176,13 +176,12 @@ static int percpu_counter_cpu_dead(unsigned int cpu)
|
||||||
spin_lock_irq(&percpu_counters_lock);
|
spin_lock_irq(&percpu_counters_lock);
|
||||||
list_for_each_entry(fbc, &percpu_counters, list) {
|
list_for_each_entry(fbc, &percpu_counters, list) {
|
||||||
s32 *pcount;
|
s32 *pcount;
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&fbc->lock, flags);
|
raw_spin_lock(&fbc->lock);
|
||||||
pcount = per_cpu_ptr(fbc->counters, cpu);
|
pcount = per_cpu_ptr(fbc->counters, cpu);
|
||||||
fbc->count += *pcount;
|
fbc->count += *pcount;
|
||||||
*pcount = 0;
|
*pcount = 0;
|
||||||
raw_spin_unlock_irqrestore(&fbc->lock, flags);
|
raw_spin_unlock(&fbc->lock);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&percpu_counters_lock);
|
spin_unlock_irq(&percpu_counters_lock);
|
||||||
#endif
|
#endif
|
||||||
|
|
Loading…
Reference in New Issue