linux-stable/include/linux/percpu_counter.h
Qian Cai 7e23452002 percpu_counter: fix a data race at vm_committed_as
"vm_committed_as.count" could be accessed concurrently as reported by
KCSAN,

 BUG: KCSAN: data-race in __vm_enough_memory / percpu_counter_add_batch

 write to 0xffffffff9451c538 of 8 bytes by task 65879 on cpu 35:
  percpu_counter_add_batch+0x83/0xd0
  percpu_counter_add_batch at lib/percpu_counter.c:91
  __vm_enough_memory+0xb9/0x260
  dup_mm+0x3a4/0x8f0
  copy_process+0x2458/0x3240
  _do_fork+0xaa/0x9f0
  __do_sys_clone+0x125/0x160
  __x64_sys_clone+0x70/0x90
  do_syscall_64+0x91/0xb05
  entry_SYSCALL_64_after_hwframe+0x49/0xbe

 read to 0xffffffff9451c538 of 8 bytes by task 66773 on cpu 19:
  __vm_enough_memory+0x199/0x260
  percpu_counter_read_positive at include/linux/percpu_counter.h:81
  (inlined by) __vm_enough_memory at mm/util.c:839
  mmap_region+0x1b2/0xa10
  do_mmap+0x45c/0x700
  vm_mmap_pgoff+0xc0/0x130
  ksys_mmap_pgoff+0x6e/0x300
  __x64_sys_mmap+0x33/0x40
  do_syscall_64+0x91/0xb05
  entry_SYSCALL_64_after_hwframe+0x49/0xbe

The read is outside percpu_counter::lock critical section which results in
a data race.  Fix it by adding a READ_ONCE() in
percpu_counter_read_positive() which could also service as the existing
compiler memory barrier.

Signed-off-by: Qian Cai <cai@lca.pw>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Marco Elver <elver@google.com>
Link: http://lkml.kernel.org/r/1582302724-2804-1-git-send-email-cai@lca.pw
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-04-07 10:43:43 -07:00

192 lines
4.3 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_PERCPU_COUNTER_H
#define _LINUX_PERCPU_COUNTER_H
/*
* A simple "approximate counter" for use in ext2 and ext3 superblocks.
*
* WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
*/
#include <linux/spinlock.h>
#include <linux/smp.h>
#include <linux/list.h>
#include <linux/threads.h>
#include <linux/percpu.h>
#include <linux/types.h>
#include <linux/gfp.h>
#ifdef CONFIG_SMP
struct percpu_counter {
raw_spinlock_t lock;
s64 count;
#ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */
#endif
s32 __percpu *counters;
};
extern int percpu_counter_batch;
int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
struct lock_class_key *key);
#define percpu_counter_init(fbc, value, gfp) \
({ \
static struct lock_class_key __key; \
\
__percpu_counter_init(fbc, value, gfp, &__key); \
})
void percpu_counter_destroy(struct percpu_counter *fbc);
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
}
static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
s64 ret = __percpu_counter_sum(fbc);
return ret < 0 ? 0 : ret;
}
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return __percpu_counter_sum(fbc);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
/*
* It is possible for the percpu_counter_read() to return a small negative
* number for some counter which should never be negative.
*
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
/* Prevent reloads of fbc->count */
s64 ret = READ_ONCE(fbc->count);
if (ret >= 0)
return ret;
return 0;
}
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return (fbc->counters != NULL);
}
#else /* !CONFIG_SMP */
struct percpu_counter {
s64 count;
};
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
fbc->count = amount;
return 0;
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
{
}
static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
{
fbc->count = amount;
}
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
{
if (fbc->count > rhs)
return 1;
else if (fbc->count < rhs)
return -1;
else
return 0;
}
static inline int
__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
{
return percpu_counter_compare(fbc, rhs);
}
static inline void
percpu_counter_add(struct percpu_counter *fbc, s64 amount)
{
preempt_disable();
fbc->count += amount;
preempt_enable();
}
static inline void
percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount, s32 batch)
{
percpu_counter_add(fbc, amount);
}
static inline s64 percpu_counter_read(struct percpu_counter *fbc)
{
return fbc->count;
}
/*
* percpu_counter is intended to track positive numbers. In the UP case the
* number should never be negative.
*/
static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
{
return fbc->count;
}
static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
{
return percpu_counter_read_positive(fbc);
}
static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
{
return percpu_counter_read(fbc);
}
static inline bool percpu_counter_initialized(struct percpu_counter *fbc)
{
return true;
}
#endif /* CONFIG_SMP */
static inline void percpu_counter_inc(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, 1);
}
static inline void percpu_counter_dec(struct percpu_counter *fbc)
{
percpu_counter_add(fbc, -1);
}
static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
{
percpu_counter_add(fbc, -amount);
}
#endif /* _LINUX_PERCPU_COUNTER_H */