selftests/bpf: Test racing between bpf_timer_cancel_and_free and bpf_timer_cancel

This selftest is based on a Alexei's test adopted from an internal
user to troubleshoot another bug. During this exercise, a separate
racing bug was discovered between bpf_timer_cancel_and_free
and bpf_timer_cancel. The details can be found in the previous
patch.

This patch is to add a selftest that can trigger the bug.
I can trigger the UAF everytime in my qemu setup with KASAN. The idea
is to have multiple user space threads running in a tight loop to exercise
both bpf_map_update_elem (which calls into bpf_timer_cancel_and_free)
and bpf_timer_cancel.

Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Hou Tao <houtao1@huawei.com>
Link: https://lore.kernel.org/bpf/20240215211218.990808-2-martin.lau@linux.dev
This commit is contained in:
Martin KaFai Lau 2024-02-15 13:12:18 -08:00 committed by Daniel Borkmann
parent 0281b919e1
commit 3f00e4a9c9
2 changed files with 67 additions and 2 deletions

View file

@ -4,10 +4,29 @@
#include "timer.skel.h"
#include "timer_failure.skel.h"
#define NUM_THR 8
static void *spin_lock_thread(void *arg)
{
int i, err, prog_fd = *(int *)arg;
LIBBPF_OPTS(bpf_test_run_opts, topts);
for (i = 0; i < 10000; i++) {
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err") ||
!ASSERT_OK(topts.retval, "test_run_opts retval"))
break;
}
pthread_exit(arg);
}
static int timer(struct timer *timer_skel)
{
int err, prog_fd;
int i, err, prog_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
pthread_t thread_id[NUM_THR];
void *ret;
err = timer__attach(timer_skel);
if (!ASSERT_OK(err, "timer_attach"))
@ -43,6 +62,20 @@ static int timer(struct timer *timer_skel)
/* check that code paths completed */
ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
prog_fd = bpf_program__fd(timer_skel->progs.race);
for (i = 0; i < NUM_THR; i++) {
err = pthread_create(&thread_id[i], NULL,
&spin_lock_thread, &prog_fd);
if (!ASSERT_OK(err, "pthread_create"))
break;
}
while (i) {
err = pthread_join(thread_id[--i], &ret);
if (ASSERT_OK(err, "pthread_join"))
ASSERT_EQ(ret, (void *)&prog_fd, "pthread_join");
}
return 0;
}

View file

@ -51,7 +51,8 @@ struct {
__uint(max_entries, 1);
__type(key, int);
__type(value, struct elem);
} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps");
} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
race_array SEC(".maps");
__u64 bss_data;
__u64 abs_data;
@ -390,3 +391,34 @@ int BPF_PROG2(test5, int, a)
return 0;
}
static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
{
bpf_timer_start(timer, 1000000, 0);
return 0;
}
SEC("syscall")
int race(void *ctx)
{
struct bpf_timer *timer;
int err, race_key = 0;
struct elem init;
__builtin_memset(&init, 0, sizeof(struct elem));
bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
timer = bpf_map_lookup_elem(&race_array, &race_key);
if (!timer)
return 1;
err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
if (err && err != -EBUSY)
return 1;
bpf_timer_set_callback(timer, race_timer_callback);
bpf_timer_start(timer, 0, 0);
bpf_timer_cancel(timer);
return 0;
}