selftests/bpf: Test bpf_kptr_xchg stashing of bpf_rb_root

There was some confusion amongst Meta sched_ext folks regarding whether
stashing bpf_rb_root - the tree itself, rather than a single node - was
supported. This patch adds a small test which demonstrates this
functionality: a local kptr with rb_root is created, a node is created
and added to the tree, then the tree is kptr_xchg'd into a mapval.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/bpf/20231204211722.571346-1-davemarchevsky@fb.com
This commit is contained in:
Dave Marchevsky 2023-12-04 13:17:22 -08:00 committed by Daniel Borkmann
parent ce3c49da11
commit 1b4c7e20bf
2 changed files with 76 additions and 0 deletions

View File

@ -48,6 +48,27 @@ static void test_local_kptr_stash_plain(void)
local_kptr_stash__destroy(skel);
}
static void test_local_kptr_stash_local_with_root(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct local_kptr_stash *skel;
int ret;
skel = local_kptr_stash__open_and_load();
if (!ASSERT_OK_PTR(skel, "local_kptr_stash__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.stash_local_with_root), &opts);
ASSERT_OK(ret, "local_kptr_stash_add_local_with_root run");
ASSERT_OK(opts.retval, "local_kptr_stash_add_local_with_root retval");
local_kptr_stash__destroy(skel);
}
static void test_local_kptr_stash_unstash(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
@ -115,6 +136,8 @@ void test_local_kptr_stash(void)
test_local_kptr_stash_simple();
if (test__start_subtest("local_kptr_stash_plain"))
test_local_kptr_stash_plain();
if (test__start_subtest("local_kptr_stash_local_with_root"))
test_local_kptr_stash_local_with_root();
if (test__start_subtest("local_kptr_stash_unstash"))
test_local_kptr_stash_unstash();
if (test__start_subtest("refcount_acquire_without_unstash"))

View File

@ -37,11 +37,18 @@ struct plain_local {
long data;
};
struct local_with_root {
long key;
struct bpf_spin_lock l;
struct bpf_rb_root r __contains(node_data, node);
};
struct map_value {
struct prog_test_ref_kfunc *not_kptr;
struct prog_test_ref_kfunc __kptr *val;
struct node_data __kptr *node;
struct plain_local __kptr *plain;
struct local_with_root __kptr *local_root;
};
/* This is necessary so that LLVM generates BTF for node_data struct
@ -65,6 +72,17 @@ struct {
__uint(max_entries, 2);
} some_nodes SEC(".maps");
static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
{
struct node_data *node_a;
struct node_data *node_b;
node_a = container_of(a, struct node_data, node);
node_b = container_of(b, struct node_data, node);
return node_a->key < node_b->key;
}
static int create_and_stash(int idx, int val)
{
struct map_value *mapval;
@ -113,6 +131,41 @@ long stash_plain(void *ctx)
return 0;
}
SEC("tc")
long stash_local_with_root(void *ctx)
{
struct local_with_root *res;
struct map_value *mapval;
struct node_data *n;
int idx = 0;
mapval = bpf_map_lookup_elem(&some_nodes, &idx);
if (!mapval)
return 1;
res = bpf_obj_new(typeof(*res));
if (!res)
return 2;
res->key = 41;
n = bpf_obj_new(typeof(*n));
if (!n) {
bpf_obj_drop(res);
return 3;
}
bpf_spin_lock(&res->l);
bpf_rbtree_add(&res->r, &n->node, less);
bpf_spin_unlock(&res->l);
res = bpf_kptr_xchg(&mapval->local_root, res);
if (res) {
bpf_obj_drop(res);
return 4;
}
return 0;
}
SEC("tc")
long unstash_rb_node(void *ctx)
{