diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 8cb7c071bd5c..3a44dd1e33d2 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h @@ -198,14 +198,21 @@ static inline bool percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount) { unsigned long flags; + bool good = false; s64 count; + if (amount == 0) + return true; + local_irq_save(flags); count = fbc->count + amount; - if (count <= limit) + if ((amount > 0 && count <= limit) || + (amount < 0 && count >= limit)) { fbc->count = count; + good = true; + } local_irq_restore(flags); - return count <= limit; + return good; } /* non-SMP percpu_counter_add_local is the same with percpu_counter_add */ diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 58a3392f471b..44dd133594d4 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c @@ -279,8 +279,16 @@ int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch) EXPORT_SYMBOL(__percpu_counter_compare); /* - * Compare counter, and add amount if the total is within limit. - * Return true if amount was added, false if it would exceed limit. + * Compare counter, and add amount if total is: less than or equal to limit if + * amount is positive, or greater than or equal to limit if amount is negative. + * Return true if amount is added, or false if total would be beyond the limit. + * + * Negative limit is allowed, but unusual. + * When negative amounts (subs) are given to percpu_counter_limited_add(), + * the limit would most naturally be 0 - but other limits are also allowed. + * + * Overflow beyond S64_MAX is not allowed for: counter, limit and amount + * are all assumed to be sane (far from S64_MIN and S64_MAX). */ bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount, s32 batch) @@ -288,10 +296,10 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 count; s64 unknown; unsigned long flags; - bool good; + bool good = false; - if (amount > limit) - return false; + if (amount == 0) + return true; local_irq_save(flags); unknown = batch * num_online_cpus(); @@ -299,7 +307,8 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, /* Skip taking the lock when safe */ if (abs(count + amount) <= batch && - fbc->count + unknown <= limit) { + ((amount > 0 && fbc->count + unknown <= limit) || + (amount < 0 && fbc->count - unknown >= limit))) { this_cpu_add(*fbc->counters, amount); local_irq_restore(flags); return true; @@ -309,7 +318,19 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, count = fbc->count + amount; /* Skip percpu_counter_sum() when safe */ - if (count + unknown > limit) { + if (amount > 0) { + if (count - unknown > limit) + goto out; + if (count + unknown <= limit) + good = true; + } else { + if (count + unknown < limit) + goto out; + if (count - unknown >= limit) + good = true; + } + + if (!good) { s32 *pcount; int cpu; @@ -317,15 +338,20 @@ bool __percpu_counter_limited_add(struct percpu_counter *fbc, pcount = per_cpu_ptr(fbc->counters, cpu); count += *pcount; } + if (amount > 0) { + if (count > limit) + goto out; + } else { + if (count < limit) + goto out; + } + good = true; } - good = count <= limit; - if (good) { - count = __this_cpu_read(*fbc->counters); - fbc->count += count + amount; - __this_cpu_sub(*fbc->counters, count); - } - + count = __this_cpu_read(*fbc->counters); + fbc->count += count + amount; + __this_cpu_sub(*fbc->counters, count); +out: raw_spin_unlock(&fbc->lock); local_irq_restore(flags); return good;