linux-stable/tools/testing/selftests/bpf/prog_tests/rbtree.c
Dave Marchevsky 404ad75a36 bpf: Migrate bpf_rbtree_remove to possibly fail
This patch modifies bpf_rbtree_remove to account for possible failure
due to the input rb_node already not being in any collection.
The function can now return NULL, and does when the aforementioned
scenario occurs. As before, on successful removal an owning reference to
the removed node is returned.

Adding KF_RET_NULL to bpf_rbtree_remove's kfunc flags - now KF_RET_NULL |
KF_ACQUIRE - provides the desired verifier semantics:

  * retval must be checked for NULL before use
  * if NULL, retval's ref_obj_id is released
  * retval is a "maybe acquired" owning ref, not a non-owning ref,
    so it will live past end of critical section (bpf_spin_unlock), and
    thus can be checked for NULL after the end of the CS

BPF programs must add checks
============================

This does change bpf_rbtree_remove's verifier behavior. BPF program
writers will need to add NULL checks to their programs, but the
resulting UX looks natural:

  bpf_spin_lock(&glock);

  n = bpf_rbtree_first(&ghead);
  if (!n) { /* ... */}
  res = bpf_rbtree_remove(&ghead, &n->node);

  bpf_spin_unlock(&glock);

  if (!res)  /* Newly-added check after this patch */
    return 1;

  n = container_of(res, /* ... */);
  /* Do something else with n */
  bpf_obj_drop(n);
  return 0;

The "if (!res)" check above is the only addition necessary for the above
program to pass verification after this patch.

bpf_rbtree_remove no longer clobbers non-owning refs
====================================================

An issue arises when bpf_rbtree_remove fails, though. Consider this
example:

  struct node_data {
    long key;
    struct bpf_list_node l;
    struct bpf_rb_node r;
    struct bpf_refcount ref;
  };

  long failed_sum;

  void bpf_prog()
  {
    struct node_data *n = bpf_obj_new(/* ... */);
    struct bpf_rb_node *res;
    n->key = 10;

    bpf_spin_lock(&glock);

    bpf_list_push_back(&some_list, &n->l); /* n is now a non-owning ref */
    res = bpf_rbtree_remove(&some_tree, &n->r, /* ... */);
    if (!res)
      failed_sum += n->key;  /* not possible */

    bpf_spin_unlock(&glock);
    /* if (res) { do something useful and drop } ... */
  }

The bpf_rbtree_remove in this example will always fail. Similarly to
bpf_spin_unlock, bpf_rbtree_remove is a non-owning reference
invalidation point. The verifier clobbers all non-owning refs after a
bpf_rbtree_remove call, so the "failed_sum += n->key" line will fail
verification, and in fact there's no good way to get information about
the node which failed to add after the invalidation. This patch removes
non-owning reference invalidation from bpf_rbtree_remove to allow the
above usecase to pass verification. The logic for why this is now
possible is as follows:

Before this series, bpf_rbtree_add couldn't fail and thus assumed that
its input, a non-owning reference, was in the tree. But it's easy to
construct an example where two non-owning references pointing to the same
underlying memory are acquired and passed to rbtree_remove one after
another (see rbtree_api_release_aliasing in
selftests/bpf/progs/rbtree_fail.c).

So it was necessary to clobber non-owning refs to prevent this
case and, more generally, to enforce "non-owning ref is definitely
in some collection" invariant. This series removes that invariant and
the failure / runtime checking added in this patch provide a clean way
to deal with the aliasing issue - just fail to remove.

Because the aliasing issue prevented by clobbering non-owning refs is no
longer an issue, this patch removes the invalidate_non_owning_refs
call from verifier handling of bpf_rbtree_remove. Note that
bpf_spin_unlock - the other caller of invalidate_non_owning_refs -
clobbers non-owning refs for a different reason, so its clobbering
behavior remains unchanged.

No BPF program changes are necessary for programs to remain valid as a
result of this clobbering change. A valid program before this patch
passed verification with its non-owning refs having shorter (or equal)
lifetimes due to more aggressive clobbering.

Also, update existing tests to check bpf_rbtree_remove retval for NULL
where necessary, and move rbtree_api_release_aliasing from
progs/rbtree_fail.c to progs/rbtree.c since it's now expected to pass
verification.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
Link: https://lore.kernel.org/r/20230415201811.343116-8-davemarchevsky@fb.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2023-04-15 17:36:50 -07:00

142 lines
4.1 KiB
C

// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
#include <test_progs.h>
#include <network_helpers.h>
#include "rbtree.skel.h"
#include "rbtree_fail.skel.h"
#include "rbtree_btf_fail__wrong_node_type.skel.h"
#include "rbtree_btf_fail__add_wrong_type.skel.h"
static void test_rbtree_add_nodes(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes), &opts);
ASSERT_OK(ret, "rbtree_add_nodes run");
ASSERT_OK(opts.retval, "rbtree_add_nodes retval");
ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes less_callback_ran");
rbtree__destroy(skel);
}
static void test_rbtree_add_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove), &opts);
ASSERT_OK(ret, "rbtree_add_and_remove");
ASSERT_OK(opts.retval, "rbtree_add_and_remove retval");
ASSERT_EQ(skel->data->removed_key, 5, "rbtree_add_and_remove first removed key");
rbtree__destroy(skel);
}
static void test_rbtree_first_and_remove(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_first_and_remove), &opts);
ASSERT_OK(ret, "rbtree_first_and_remove");
ASSERT_OK(opts.retval, "rbtree_first_and_remove retval");
ASSERT_EQ(skel->data->first_data[0], 2, "rbtree_first_and_remove first rbtree_first()");
ASSERT_EQ(skel->data->removed_key, 1, "rbtree_first_and_remove first removed key");
ASSERT_EQ(skel->data->first_data[1], 4, "rbtree_first_and_remove second rbtree_first()");
rbtree__destroy(skel);
}
static void test_rbtree_api_release_aliasing(void)
{
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
struct rbtree *skel;
int ret;
skel = rbtree__open_and_load();
if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load"))
return;
ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_api_release_aliasing), &opts);
ASSERT_OK(ret, "rbtree_api_release_aliasing");
ASSERT_OK(opts.retval, "rbtree_api_release_aliasing retval");
ASSERT_EQ(skel->data->first_data[0], 42, "rbtree_api_release_aliasing first rbtree_remove()");
ASSERT_EQ(skel->data->first_data[1], -1, "rbtree_api_release_aliasing second rbtree_remove()");
rbtree__destroy(skel);
}
void test_rbtree_success(void)
{
if (test__start_subtest("rbtree_add_nodes"))
test_rbtree_add_nodes();
if (test__start_subtest("rbtree_add_and_remove"))
test_rbtree_add_and_remove();
if (test__start_subtest("rbtree_first_and_remove"))
test_rbtree_first_and_remove();
if (test__start_subtest("rbtree_api_release_aliasing"))
test_rbtree_api_release_aliasing();
}
#define BTF_FAIL_TEST(suffix) \
void test_rbtree_btf_fail__##suffix(void) \
{ \
struct rbtree_btf_fail__##suffix *skel; \
\
skel = rbtree_btf_fail__##suffix##__open_and_load(); \
if (!ASSERT_ERR_PTR(skel, \
"rbtree_btf_fail__" #suffix "__open_and_load unexpected success")) \
rbtree_btf_fail__##suffix##__destroy(skel); \
}
#define RUN_BTF_FAIL_TEST(suffix) \
if (test__start_subtest("rbtree_btf_fail__" #suffix)) \
test_rbtree_btf_fail__##suffix();
BTF_FAIL_TEST(wrong_node_type);
BTF_FAIL_TEST(add_wrong_type);
void test_rbtree_btf_fail(void)
{
RUN_BTF_FAIL_TEST(wrong_node_type);
RUN_BTF_FAIL_TEST(add_wrong_type);
}
void test_rbtree_fail(void)
{
RUN_TESTS(rbtree_fail);
}