linux-stable/tools/perf/tests/openat-syscall-all-cpus.c
Andi Kleen 475fb533fb perf evsel: Fix buffer overflow while freeing events
Fix buffer overflow for:

  % perf stat -e msr/tsc/,cstate_core/c7-residency/ true

that causes glibc free list corruption. For some reason it doesn't
trigger in valgrind, but it is visible in AS:

  =================================================================
  ==32681==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x603000003f5c at pc 0x0000005671ef bp 0x7ffdaaac9ac0 sp 0x7ffdaaac9ab0
  READ of size 4 at 0x603000003f5c thread T0
    #0 0x5671ee in perf_evsel__close_fd util/evsel.c:1196
    #1 0x56c57a in perf_evsel__close util/evsel.c:1717
    #2 0x55ed5f in perf_evlist__close util/evlist.c:1631
    #3 0x4647e1 in __run_perf_stat /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:749
    #4 0x4648e3 in run_perf_stat /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:767
    #5 0x46e1bc in cmd_stat /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:2785
    #6 0x52f83d in run_builtin /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:296
    #7 0x52fd49 in handle_internal_command /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:348
    #8 0x5300de in run_argv /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:392
    #9 0x5308f3 in main /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:530
    #10 0x7f0672d13400 in __libc_start_main (/lib64/libc.so.6+0x20400)
    #11 0x428419 in _start (/home/ak/hle/obj-perf/perf+0x428419)

  0x603000003f5c is located 0 bytes to the right of 28-byte region [0x603000003f40,0x603000003f5c)
  allocated by thread T0 here:
    #0 0x7f0675139020 in calloc (/lib64/libasan.so.3+0xc7020)
    #1 0x648a2d in zalloc util/util.h:23
    #2 0x648a88 in xyarray__new util/xyarray.c:9
    #3 0x566419 in perf_evsel__alloc_fd util/evsel.c:1039
    #4 0x56b427 in perf_evsel__open util/evsel.c:1529
    #5 0x56c620 in perf_evsel__open_per_thread util/evsel.c:1730
    #6 0x461dea in create_perf_stat_counter /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:263
    #7 0x4637d7 in __run_perf_stat /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:600
    #8 0x4648e3 in run_perf_stat /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:767
    #9 0x46e1bc in cmd_stat /home/ak/hle/linux-hle-2.6/tools/perf/builtin-stat.c:2785
    #10 0x52f83d in run_builtin /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:296
    #11 0x52fd49 in handle_internal_command /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:348
    #12 0x5300de in run_argv /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:392
    #13 0x5308f3 in main /home/ak/hle/linux-hle-2.6/tools/perf/perf.c:530
    #14 0x7f0672d13400 in __libc_start_main (/lib64/libc.so.6+0x20400)

The event is allocated with cpus == 1, but freed with cpus == real number
When the evsel close function walks the file descriptors it exceeds the
fd xyarray boundaries and reads random memory.

v2:

Now that xyarrays save their original dimensions we can use these to
iterate the two dimensional fd arrays. Fix some users (close, ioctl) in
evsel.c to use these fields directly. This allows simplifying the code
and dropping quite a few function arguments. Adjust all callers by
removing the unneeded arguments.

The actual perf event reading still uses the original values from the
evsel list.

Signed-off-by: Andi Kleen <ak@linux.intel.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Link: http://lkml.kernel.org/r/20170811232634.30465-2-andi@firstfloor.org
[ Fix up xy_max_[xy]() -> xyarray__max_[xy]() ]
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
2017-08-22 11:51:31 -03:00

124 lines
3.1 KiB
C

#include <errno.h>
#include <inttypes.h>
/* For the CPU_* macros */
#include <pthread.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <api/fs/fs.h>
#include <linux/err.h>
#include <api/fs/tracing_path.h>
#include "evsel.h"
#include "tests.h"
#include "thread_map.h"
#include "cpumap.h"
#include "debug.h"
#include "stat.h"
int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
{
int err = -1, fd, cpu;
struct cpu_map *cpus;
struct perf_evsel *evsel;
unsigned int nr_openat_calls = 111, i;
cpu_set_t cpu_set;
struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
char sbuf[STRERR_BUFSIZE];
char errbuf[BUFSIZ];
if (threads == NULL) {
pr_debug("thread_map__new\n");
return -1;
}
cpus = cpu_map__new(NULL);
if (cpus == NULL) {
pr_debug("cpu_map__new\n");
goto out_thread_map_delete;
}
CPU_ZERO(&cpu_set);
evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
if (IS_ERR(evsel)) {
tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
pr_debug("%s\n", errbuf);
goto out_thread_map_delete;
}
if (perf_evsel__open(evsel, cpus, threads) < 0) {
pr_debug("failed to open counter: %s, "
"tweak /proc/sys/kernel/perf_event_paranoid?\n",
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_evsel_delete;
}
for (cpu = 0; cpu < cpus->nr; ++cpu) {
unsigned int ncalls = nr_openat_calls + cpu;
/*
* XXX eventually lift this restriction in a way that
* keeps perf building on older glibc installations
* without CPU_ALLOC. 1024 cpus in 2010 still seems
* a reasonable upper limit tho :-)
*/
if (cpus->map[cpu] >= CPU_SETSIZE) {
pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
continue;
}
CPU_SET(cpus->map[cpu], &cpu_set);
if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
pr_debug("sched_setaffinity() failed on CPU %d: %s ",
cpus->map[cpu],
str_error_r(errno, sbuf, sizeof(sbuf)));
goto out_close_fd;
}
for (i = 0; i < ncalls; ++i) {
fd = openat(0, "/etc/passwd", O_RDONLY);
close(fd);
}
CPU_CLR(cpus->map[cpu], &cpu_set);
}
/*
* Here we need to explicitly preallocate the counts, as if
* we use the auto allocation it will allocate just for 1 cpu,
* as we start by cpu 0.
*/
if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
goto out_close_fd;
}
err = 0;
for (cpu = 0; cpu < cpus->nr; ++cpu) {
unsigned int expected;
if (cpus->map[cpu] >= CPU_SETSIZE)
continue;
if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
pr_debug("perf_evsel__read_on_cpu\n");
err = -1;
break;
}
expected = nr_openat_calls + cpu;
if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
err = -1;
}
}
perf_evsel__free_counts(evsel);
out_close_fd:
perf_evsel__close_fd(evsel);
out_evsel_delete:
perf_evsel__delete(evsel);
out_thread_map_delete:
thread_map__put(threads);
return err;
}