mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-09-27 12:57:53 +00:00
percpu_counter: use this_cpu_ptr() instead of per_cpu_ptr()
this_cpu_ptr() avoids an array lookup and can use the percpu offset of the local cpu directly. Signed-off-by: Christoph Lameter <cl@linux.com> Cc: Eric Dumazet <eric.dumazet@gmail.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
041b78f232
commit
ea00c30b5b
1 changed files with 3 additions and 3 deletions
|
@ -73,9 +73,9 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||||
{
|
{
|
||||||
s64 count;
|
s64 count;
|
||||||
s32 *pcount;
|
s32 *pcount;
|
||||||
int cpu = get_cpu();
|
|
||||||
|
|
||||||
pcount = per_cpu_ptr(fbc->counters, cpu);
|
preempt_disable();
|
||||||
|
pcount = this_cpu_ptr(fbc->counters);
|
||||||
count = *pcount + amount;
|
count = *pcount + amount;
|
||||||
if (count >= batch || count <= -batch) {
|
if (count >= batch || count <= -batch) {
|
||||||
spin_lock(&fbc->lock);
|
spin_lock(&fbc->lock);
|
||||||
|
@ -85,7 +85,7 @@ void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch)
|
||||||
} else {
|
} else {
|
||||||
*pcount = count;
|
*pcount = count;
|
||||||
}
|
}
|
||||||
put_cpu();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__percpu_counter_add);
|
EXPORT_SYMBOL(__percpu_counter_add);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue