linux-stable/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c
Andrii Nakryiko ff943683f8 selftests/bpf: fix uprobe offset calculation in selftests
Fix how selftests determine relative offset of a function that is
uprobed. Previously, there was an assumption that uprobed function is
always in the first executable region, which is not always the case
(libbpf CI hits this case now). So get_base_addr() approach in isolation
doesn't work anymore. So teach get_uprobe_offset() to determine correct
memory mapping and calculate uprobe offset correctly.

While at it, I merged together two implementations of
get_uprobe_offset() helper, moving powerpc64-specific logic inside (had
to add extra {} block to avoid unused variable error for insn).

Also ensured that uprobed functions are never inlined, but are still
static (and thus local to each selftest), by using a no-op asm volatile
block internally. I didn't want to keep them global __weak, because some
tests use uprobe's ref counter offset (to test USDT-like logic) which is
not compatible with non-refcounted uprobe. So it's nicer to have each
test uprobe target local to the file and guaranteed to not be inlined or
skipped by the compiler (which can happen with static functions,
especially if compiling selftests with -O2).

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/r/20220126193058.3390292-1-andrii@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2022-01-26 20:04:01 -08:00

260 lines
7 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#define _GNU_SOURCE
#include <pthread.h>
#include <sched.h>
#include <sys/syscall.h>
#include <unistd.h>
#include <test_progs.h>
#include "test_bpf_cookie.skel.h"
/* uprobe attach point */
static void trigger_func(void)
{
asm volatile ("");
}
static void kprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
struct bpf_link *link1 = NULL, *link2 = NULL;
struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
/* attach two kprobes */
opts.bpf_cookie = 0x1;
opts.retprobe = false;
link1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(link1, "link1"))
goto cleanup;
opts.bpf_cookie = 0x2;
opts.retprobe = false;
link2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(link2, "link2"))
goto cleanup;
/* attach two kretprobes */
opts.bpf_cookie = 0x10;
opts.retprobe = true;
retlink1 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(retlink1, "retlink1"))
goto cleanup;
opts.bpf_cookie = 0x20;
opts.retprobe = true;
retlink2 = bpf_program__attach_kprobe_opts(skel->progs.handle_kretprobe,
SYS_NANOSLEEP_KPROBE_NAME, &opts);
if (!ASSERT_OK_PTR(retlink2, "retlink2"))
goto cleanup;
/* trigger kprobe && kretprobe */
usleep(1);
ASSERT_EQ(skel->bss->kprobe_res, 0x1 | 0x2, "kprobe_res");
ASSERT_EQ(skel->bss->kretprobe_res, 0x10 | 0x20, "kretprobe_res");
cleanup:
bpf_link__destroy(link1);
bpf_link__destroy(link2);
bpf_link__destroy(retlink1);
bpf_link__destroy(retlink2);
}
static void uprobe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
struct bpf_link *link1 = NULL, *link2 = NULL;
struct bpf_link *retlink1 = NULL, *retlink2 = NULL;
ssize_t uprobe_offset;
uprobe_offset = get_uprobe_offset(&trigger_func);
if (!ASSERT_GE(uprobe_offset, 0, "uprobe_offset"))
goto cleanup;
/* attach two uprobes */
opts.bpf_cookie = 0x100;
opts.retprobe = false;
link1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, 0 /* self pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(link1, "link1"))
goto cleanup;
opts.bpf_cookie = 0x200;
opts.retprobe = false;
link2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uprobe, -1 /* any pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(link2, "link2"))
goto cleanup;
/* attach two uretprobes */
opts.bpf_cookie = 0x1000;
opts.retprobe = true;
retlink1 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, -1 /* any pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(retlink1, "retlink1"))
goto cleanup;
opts.bpf_cookie = 0x2000;
opts.retprobe = true;
retlink2 = bpf_program__attach_uprobe_opts(skel->progs.handle_uretprobe, 0 /* self pid */,
"/proc/self/exe", uprobe_offset, &opts);
if (!ASSERT_OK_PTR(retlink2, "retlink2"))
goto cleanup;
/* trigger uprobe && uretprobe */
trigger_func();
ASSERT_EQ(skel->bss->uprobe_res, 0x100 | 0x200, "uprobe_res");
ASSERT_EQ(skel->bss->uretprobe_res, 0x1000 | 0x2000, "uretprobe_res");
cleanup:
bpf_link__destroy(link1);
bpf_link__destroy(link2);
bpf_link__destroy(retlink1);
bpf_link__destroy(retlink2);
}
static void tp_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_tracepoint_opts, opts);
struct bpf_link *link1 = NULL, *link2 = NULL, *link3 = NULL;
/* attach first tp prog */
opts.bpf_cookie = 0x10000;
link1 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp1,
"syscalls", "sys_enter_nanosleep", &opts);
if (!ASSERT_OK_PTR(link1, "link1"))
goto cleanup;
/* attach second tp prog */
opts.bpf_cookie = 0x20000;
link2 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp2,
"syscalls", "sys_enter_nanosleep", &opts);
if (!ASSERT_OK_PTR(link2, "link2"))
goto cleanup;
/* trigger tracepoints */
usleep(1);
ASSERT_EQ(skel->bss->tp_res, 0x10000 | 0x20000, "tp_res1");
/* now we detach first prog and will attach third one, which causes
* two internal calls to bpf_prog_array_copy(), shuffling
* bpf_prog_array_items around. We test here that we don't lose track
* of associated bpf_cookies.
*/
bpf_link__destroy(link1);
link1 = NULL;
kern_sync_rcu();
skel->bss->tp_res = 0;
/* attach third tp prog */
opts.bpf_cookie = 0x40000;
link3 = bpf_program__attach_tracepoint_opts(skel->progs.handle_tp3,
"syscalls", "sys_enter_nanosleep", &opts);
if (!ASSERT_OK_PTR(link3, "link3"))
goto cleanup;
/* trigger tracepoints */
usleep(1);
ASSERT_EQ(skel->bss->tp_res, 0x20000 | 0x40000, "tp_res2");
cleanup:
bpf_link__destroy(link1);
bpf_link__destroy(link2);
bpf_link__destroy(link3);
}
static void burn_cpu(void)
{
volatile int j = 0;
cpu_set_t cpu_set;
int i, err;
/* generate some branches on cpu 0 */
CPU_ZERO(&cpu_set);
CPU_SET(0, &cpu_set);
err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
ASSERT_OK(err, "set_thread_affinity");
/* spin the loop for a while (random high number) */
for (i = 0; i < 1000000; ++i)
++j;
}
static void pe_subtest(struct test_bpf_cookie *skel)
{
DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, opts);
struct bpf_link *link = NULL;
struct perf_event_attr attr;
int pfd = -1;
/* create perf event */
memset(&attr, 0, sizeof(attr));
attr.size = sizeof(attr);
attr.type = PERF_TYPE_SOFTWARE;
attr.config = PERF_COUNT_SW_CPU_CLOCK;
attr.freq = 1;
attr.sample_freq = 4000;
pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
if (!ASSERT_GE(pfd, 0, "perf_fd"))
goto cleanup;
opts.bpf_cookie = 0x100000;
link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
if (!ASSERT_OK_PTR(link, "link1"))
goto cleanup;
burn_cpu(); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x100000, "pe_res1");
/* prevent bpf_link__destroy() closing pfd itself */
bpf_link__disconnect(link);
/* close BPF link's FD explicitly */
close(bpf_link__fd(link));
/* free up memory used by struct bpf_link */
bpf_link__destroy(link);
link = NULL;
kern_sync_rcu();
skel->bss->pe_res = 0;
opts.bpf_cookie = 0x200000;
link = bpf_program__attach_perf_event_opts(skel->progs.handle_pe, pfd, &opts);
if (!ASSERT_OK_PTR(link, "link2"))
goto cleanup;
burn_cpu(); /* trigger BPF prog */
ASSERT_EQ(skel->bss->pe_res, 0x200000, "pe_res2");
cleanup:
close(pfd);
bpf_link__destroy(link);
}
void test_bpf_cookie(void)
{
struct test_bpf_cookie *skel;
skel = test_bpf_cookie__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
skel->bss->my_tid = syscall(SYS_gettid);
if (test__start_subtest("kprobe"))
kprobe_subtest(skel);
if (test__start_subtest("uprobe"))
uprobe_subtest(skel);
if (test__start_subtest("tracepoint"))
tp_subtest(skel);
if (test__start_subtest("perf_event"))
pe_subtest(skel);
test_bpf_cookie__destroy(skel);
}