Daniel Borkmann says:

====================
pull-request: bpf 2022-06-17

We've added 12 non-merge commits during the last 4 day(s) which contain
a total of 14 files changed, 305 insertions(+), 107 deletions(-).

The main changes are:

1) Fix x86 JIT tailcall count offset on BPF-2-BPF call, from Jakub Sitnicki.

2) Fix a kprobe_multi link bug which misplaces BPF cookies, from Jiri Olsa.

3) Fix an infinite loop when processing a module's BTF, from Kumar Kartikeya Dwivedi.

4) Fix getting a rethook only in RCU available context, from Masami Hiramatsu.

5) Fix request socket refcount leak in sk lookup helpers, from Jon Maxwell.

6) Fix xsk xmit behavior which wrongly adds skb to already full cq, from Ciara Loftus.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf:
  rethook: Reject getting a rethook if RCU is not watching
  fprobe, samples: Add use_trace option and show hit/missed counter
  bpf, docs: Update some of the JIT/maintenance entries
  selftest/bpf: Fix kprobe_multi bench test
  bpf: Force cookies array to follow symbols sorting
  ftrace: Keep address offset in ftrace_lookup_symbols
  selftests/bpf: Shuffle cookies symbols in kprobe multi test
  selftests/bpf: Test tail call counting with bpf2bpf and data on stack
  bpf, x86: Fix tail call count offset calculation on bpf2bpf call
  bpf: Limit maximum modifier chain length in btf_check_type_tags
  bpf: Fix request_sock leak in sk lookup helpers
  xsk: Fix generic transmit when completion queue reservation fails
====================

Link: https://lore.kernel.org/r/20220617202119.2421-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-06-17 18:30:01 -07:00
commit 582573f1b2
14 changed files with 310 additions and 112 deletions

View File

@ -3662,7 +3662,7 @@ BPF JIT for ARM
M: Shubham Bansal <illusionist.neo@gmail.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
S: Odd Fixes
F: arch/arm/net/
BPF JIT for ARM64
@ -3686,14 +3686,15 @@ BPF JIT for NFP NICs
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Supported
S: Odd Fixes
F: drivers/net/ethernet/netronome/nfp/bpf/
BPF JIT for POWERPC (32-BIT AND 64-BIT)
M: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
M: Michael Ellerman <mpe@ellerman.id.au>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
S: Supported
F: arch/powerpc/net/
BPF JIT for RISC-V (32-bit)
@ -3719,7 +3720,7 @@ M: Heiko Carstens <hca@linux.ibm.com>
M: Vasily Gorbik <gor@linux.ibm.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
S: Supported
F: arch/s390/net/
X: arch/s390/net/pnet.c
@ -3727,14 +3728,14 @@ BPF JIT for SPARC (32-BIT AND 64-BIT)
M: David S. Miller <davem@davemloft.net>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
S: Odd Fixes
F: arch/sparc/net/
BPF JIT for X86 32-BIT
M: Wang YanQing <udknight@gmail.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
S: Odd Fixes
F: arch/x86/net/bpf_jit_comp32.c
BPF JIT for X86 64-BIT
@ -3757,6 +3758,19 @@ F: include/linux/bpf_lsm.h
F: kernel/bpf/bpf_lsm.c
F: security/bpf/
BPF L7 FRAMEWORK
M: John Fastabend <john.fastabend@gmail.com>
M: Jakub Sitnicki <jakub@cloudflare.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
F: include/linux/skmsg.h
F: net/core/skmsg.c
F: net/core/sock_map.c
F: net/ipv4/tcp_bpf.c
F: net/ipv4/udp_bpf.c
F: net/unix/unix_bpf.c
BPFTOOL
M: Quentin Monnet <quentin@isovalent.com>
L: bpf@vger.kernel.org
@ -11096,20 +11110,6 @@ S: Maintained
F: include/net/l3mdev.h
F: net/l3mdev
L7 BPF FRAMEWORK
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Jakub Sitnicki <jakub@cloudflare.com>
L: netdev@vger.kernel.org
L: bpf@vger.kernel.org
S: Maintained
F: include/linux/skmsg.h
F: net/core/skmsg.c
F: net/core/sock_map.c
F: net/ipv4/tcp_bpf.c
F: net/ipv4/udp_bpf.c
F: net/unix/unix_bpf.c
LANDLOCK SECURITY MODULE
M: Mickaël Salaün <mic@digikod.net>
L: linux-security-module@vger.kernel.org
@ -13952,7 +13952,6 @@ F: net/ipv6/tcp*.c
NETWORKING [TLS]
M: Boris Pismenny <borisp@nvidia.com>
M: John Fastabend <john.fastabend@gmail.com>
M: Daniel Borkmann <daniel@iogearbox.net>
M: Jakub Kicinski <kuba@kernel.org>
L: netdev@vger.kernel.org
S: Maintained

View File

@ -1420,8 +1420,9 @@ st: if (is_imm8(insn->off))
case BPF_JMP | BPF_CALL:
func = (u8 *) __bpf_call_base + imm32;
if (tail_call_reachable) {
/* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
EMIT3_off32(0x48, 0x8B, 0x85,
-(bpf_prog->aux->stack_depth + 8));
-round_up(bpf_prog->aux->stack_depth, 8) - 8);
if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
return -EINVAL;
} else {

View File

@ -4815,6 +4815,7 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
n = btf_nr_types(btf);
for (i = start_id; i < n; i++) {
const struct btf_type *t;
int chain_limit = 32;
u32 cur_id = i;
t = btf_type_by_id(btf, i);
@ -4827,6 +4828,10 @@ static int btf_check_type_tags(struct btf_verifier_env *env,
in_tags = btf_type_is_type_tag(t);
while (btf_type_is_modifier(t)) {
if (!chain_limit--) {
btf_verifier_log(env, "Max chain length or cycle detected");
return -ELOOP;
}
if (btf_type_is_type_tag(t)) {
if (!in_tags) {
btf_verifier_log(env, "Type tags don't precede modifiers");

View File

@ -2423,7 +2423,7 @@ kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip,
kprobe_multi_link_prog_run(link, entry_ip, regs);
}
static int symbols_cmp(const void *a, const void *b)
static int symbols_cmp_r(const void *a, const void *b, const void *priv)
{
const char **str_a = (const char **) a;
const char **str_b = (const char **) b;
@ -2431,6 +2431,28 @@ static int symbols_cmp(const void *a, const void *b)
return strcmp(*str_a, *str_b);
}
struct multi_symbols_sort {
const char **funcs;
u64 *cookies;
};
static void symbols_swap_r(void *a, void *b, int size, const void *priv)
{
const struct multi_symbols_sort *data = priv;
const char **name_a = a, **name_b = b;
swap(*name_a, *name_b);
/* If defined, swap also related cookies. */
if (data->cookies) {
u64 *cookie_a, *cookie_b;
cookie_a = data->cookies + (name_a - data->funcs);
cookie_b = data->cookies + (name_b - data->funcs);
swap(*cookie_a, *cookie_b);
}
}
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
struct bpf_kprobe_multi_link *link = NULL;
@ -2468,25 +2490,6 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
if (!addrs)
return -ENOMEM;
if (uaddrs) {
if (copy_from_user(addrs, uaddrs, size)) {
err = -EFAULT;
goto error;
}
} else {
struct user_syms us;
err = copy_user_syms(&us, usyms, cnt);
if (err)
goto error;
sort(us.syms, cnt, sizeof(*us.syms), symbols_cmp, NULL);
err = ftrace_lookup_symbols(us.syms, cnt, addrs);
free_user_syms(&us);
if (err)
goto error;
}
ucookies = u64_to_user_ptr(attr->link_create.kprobe_multi.cookies);
if (ucookies) {
cookies = kvmalloc_array(cnt, sizeof(*addrs), GFP_KERNEL);
@ -2500,6 +2503,33 @@ int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *pr
}
}
if (uaddrs) {
if (copy_from_user(addrs, uaddrs, size)) {
err = -EFAULT;
goto error;
}
} else {
struct multi_symbols_sort data = {
.cookies = cookies,
};
struct user_syms us;
err = copy_user_syms(&us, usyms, cnt);
if (err)
goto error;
if (cookies)
data.funcs = us.syms;
sort_r(us.syms, cnt, sizeof(*us.syms), symbols_cmp_r,
symbols_swap_r, &data);
err = ftrace_lookup_symbols(us.syms, cnt, addrs);
free_user_syms(&us);
if (err)
goto error;
}
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
err = -ENOMEM;

View File

@ -8029,15 +8029,23 @@ static int kallsyms_callback(void *data, const char *name,
struct module *mod, unsigned long addr)
{
struct kallsyms_data *args = data;
const char **sym;
int idx;
if (!bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp))
sym = bsearch(&name, args->syms, args->cnt, sizeof(*args->syms), symbols_cmp);
if (!sym)
return 0;
idx = sym - args->syms;
if (args->addrs[idx])
return 0;
addr = ftrace_location(addr);
if (!addr)
return 0;
args->addrs[args->found++] = addr;
args->addrs[idx] = addr;
args->found++;
return args->found == args->cnt ? 1 : 0;
}
@ -8062,6 +8070,7 @@ int ftrace_lookup_symbols(const char **sorted_syms, size_t cnt, unsigned long *a
struct kallsyms_data args;
int err;
memset(addrs, 0, sizeof(*addrs) * cnt);
args.addrs = addrs;
args.syms = sorted_syms;
args.cnt = cnt;

View File

@ -154,6 +154,15 @@ struct rethook_node *rethook_try_get(struct rethook *rh)
if (unlikely(!handler))
return NULL;
/*
* This expects the caller will set up a rethook on a function entry.
* When the function returns, the rethook will eventually be reclaimed
* or released in the rethook_recycle() with call_rcu().
* This means the caller must be run in the RCU-availabe context.
*/
if (unlikely(!rcu_is_watching()))
return NULL;
fn = freelist_try_get(&rh->pool);
if (!fn)
return NULL;

View File

@ -6516,10 +6516,21 @@ __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
ifindex, proto, netns_id, flags);
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
struct sock *sk2 = sk_to_full_sk(sk);
/* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
* sock refcnt is decremented to prevent a request_sock leak.
*/
if (!sk_fullsock(sk2))
sk2 = NULL;
if (sk2 != sk) {
sock_gen_put(sk);
return NULL;
/* Ensure there is no need to bump sk2 refcnt */
if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
return NULL;
}
sk = sk2;
}
}
@ -6553,10 +6564,21 @@ bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
flags);
if (sk) {
sk = sk_to_full_sk(sk);
if (!sk_fullsock(sk)) {
struct sock *sk2 = sk_to_full_sk(sk);
/* sk_to_full_sk() may return (sk)->rsk_listener, so make sure the original sk
* sock refcnt is decremented to prevent a request_sock leak.
*/
if (!sk_fullsock(sk2))
sk2 = NULL;
if (sk2 != sk) {
sock_gen_put(sk);
return NULL;
/* Ensure there is no need to bump sk2 refcnt */
if (unlikely(sk2 && !sock_flag(sk2, SOCK_RCU_FREE))) {
WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
return NULL;
}
sk = sk2;
}
}

View File

@ -538,12 +538,6 @@ static int xsk_generic_xmit(struct sock *sk)
goto out;
}
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
goto out;
}
/* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement
@ -552,11 +546,19 @@ static int xsk_generic_xmit(struct sock *sk)
spin_lock_irqsave(&xs->pool->cq_lock, flags);
if (xskq_prod_reserve(xs->pool->cq)) {
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
kfree_skb(skb);
goto out;
}
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
skb = xsk_build_skb(xs, &desc);
if (IS_ERR(skb)) {
err = PTR_ERR(skb);
spin_lock_irqsave(&xs->pool->cq_lock, flags);
xskq_prod_cancel(xs->pool->cq);
spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
goto out;
}
err = __dev_direct_xmit(skb, xs->queue_id);
if (err == NETDEV_TX_BUSY) {
/* Tell user-space to retry the send */

View File

@ -21,6 +21,7 @@
#define BACKTRACE_DEPTH 16
#define MAX_SYMBOL_LEN 4096
struct fprobe sample_probe;
static unsigned long nhit;
static char symbol[MAX_SYMBOL_LEN] = "kernel_clone";
module_param_string(symbol, symbol, sizeof(symbol), 0644);
@ -28,6 +29,8 @@ static char nosymbol[MAX_SYMBOL_LEN] = "";
module_param_string(nosymbol, nosymbol, sizeof(nosymbol), 0644);
static bool stackdump = true;
module_param(stackdump, bool, 0644);
static bool use_trace = false;
module_param(use_trace, bool, 0644);
static void show_backtrace(void)
{
@ -40,7 +43,15 @@ static void show_backtrace(void)
static void sample_entry_handler(struct fprobe *fp, unsigned long ip, struct pt_regs *regs)
{
pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
if (use_trace)
/*
* This is just an example, no kernel code should call
* trace_printk() except when actively debugging.
*/
trace_printk("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
else
pr_info("Enter <%pS> ip = 0x%p\n", (void *)ip, (void *)ip);
nhit++;
if (stackdump)
show_backtrace();
}
@ -49,8 +60,17 @@ static void sample_exit_handler(struct fprobe *fp, unsigned long ip, struct pt_r
{
unsigned long rip = instruction_pointer(regs);
pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
(void *)ip, (void *)ip, (void *)rip, (void *)rip);
if (use_trace)
/*
* This is just an example, no kernel code should call
* trace_printk() except when actively debugging.
*/
trace_printk("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
(void *)ip, (void *)ip, (void *)rip, (void *)rip);
else
pr_info("Return from <%pS> ip = 0x%p to rip = 0x%p (%pS)\n",
(void *)ip, (void *)ip, (void *)rip, (void *)rip);
nhit++;
if (stackdump)
show_backtrace();
}
@ -112,7 +132,8 @@ static void __exit fprobe_exit(void)
{
unregister_fprobe(&sample_probe);
pr_info("fprobe at %s unregistered\n", symbol);
pr_info("fprobe at %s unregistered. %ld times hit, %ld times missed\n",
symbol, nhit, sample_probe.nmissed);
}
module_init(fprobe_init)

View File

@ -121,24 +121,24 @@ static void kprobe_multi_link_api_subtest(void)
})
GET_ADDR("bpf_fentry_test1", addrs[0]);
GET_ADDR("bpf_fentry_test2", addrs[1]);
GET_ADDR("bpf_fentry_test3", addrs[2]);
GET_ADDR("bpf_fentry_test4", addrs[3]);
GET_ADDR("bpf_fentry_test5", addrs[4]);
GET_ADDR("bpf_fentry_test6", addrs[5]);
GET_ADDR("bpf_fentry_test7", addrs[6]);
GET_ADDR("bpf_fentry_test3", addrs[1]);
GET_ADDR("bpf_fentry_test4", addrs[2]);
GET_ADDR("bpf_fentry_test5", addrs[3]);
GET_ADDR("bpf_fentry_test6", addrs[4]);
GET_ADDR("bpf_fentry_test7", addrs[5]);
GET_ADDR("bpf_fentry_test2", addrs[6]);
GET_ADDR("bpf_fentry_test8", addrs[7]);
#undef GET_ADDR
cookies[0] = 1;
cookies[1] = 2;
cookies[2] = 3;
cookies[3] = 4;
cookies[4] = 5;
cookies[5] = 6;
cookies[6] = 7;
cookies[7] = 8;
cookies[0] = 1; /* bpf_fentry_test1 */
cookies[1] = 2; /* bpf_fentry_test3 */
cookies[2] = 3; /* bpf_fentry_test4 */
cookies[3] = 4; /* bpf_fentry_test5 */
cookies[4] = 5; /* bpf_fentry_test6 */
cookies[5] = 6; /* bpf_fentry_test7 */
cookies[6] = 7; /* bpf_fentry_test2 */
cookies[7] = 8; /* bpf_fentry_test8 */
opts.kprobe_multi.addrs = (const unsigned long *) &addrs;
opts.kprobe_multi.cnt = ARRAY_SIZE(addrs);
@ -149,14 +149,14 @@ static void kprobe_multi_link_api_subtest(void)
if (!ASSERT_GE(link1_fd, 0, "link1_fd"))
goto cleanup;
cookies[0] = 8;
cookies[1] = 7;
cookies[2] = 6;
cookies[3] = 5;
cookies[4] = 4;
cookies[5] = 3;
cookies[6] = 2;
cookies[7] = 1;
cookies[0] = 8; /* bpf_fentry_test1 */
cookies[1] = 7; /* bpf_fentry_test3 */
cookies[2] = 6; /* bpf_fentry_test4 */
cookies[3] = 5; /* bpf_fentry_test5 */
cookies[4] = 4; /* bpf_fentry_test6 */
cookies[5] = 3; /* bpf_fentry_test7 */
cookies[6] = 2; /* bpf_fentry_test2 */
cookies[7] = 1; /* bpf_fentry_test8 */
opts.kprobe_multi.flags = BPF_F_KPROBE_MULTI_RETURN;
prog_fd = bpf_program__fd(skel->progs.test_kretprobe);
@ -181,12 +181,12 @@ static void kprobe_multi_attach_api_subtest(void)
struct kprobe_multi *skel = NULL;
const char *syms[8] = {
"bpf_fentry_test1",
"bpf_fentry_test2",
"bpf_fentry_test3",
"bpf_fentry_test4",
"bpf_fentry_test5",
"bpf_fentry_test6",
"bpf_fentry_test7",
"bpf_fentry_test2",
"bpf_fentry_test8",
};
__u64 cookies[8];
@ -198,14 +198,14 @@ static void kprobe_multi_attach_api_subtest(void)
skel->bss->pid = getpid();
skel->bss->test_cookie = true;
cookies[0] = 1;
cookies[1] = 2;
cookies[2] = 3;
cookies[3] = 4;
cookies[4] = 5;
cookies[5] = 6;
cookies[6] = 7;
cookies[7] = 8;
cookies[0] = 1; /* bpf_fentry_test1 */
cookies[1] = 2; /* bpf_fentry_test3 */
cookies[2] = 3; /* bpf_fentry_test4 */
cookies[3] = 4; /* bpf_fentry_test5 */
cookies[4] = 5; /* bpf_fentry_test6 */
cookies[5] = 6; /* bpf_fentry_test7 */
cookies[6] = 7; /* bpf_fentry_test2 */
cookies[7] = 8; /* bpf_fentry_test8 */
opts.syms = syms;
opts.cnt = ARRAY_SIZE(syms);
@ -216,14 +216,14 @@ static void kprobe_multi_attach_api_subtest(void)
if (!ASSERT_OK_PTR(link1, "bpf_program__attach_kprobe_multi_opts"))
goto cleanup;
cookies[0] = 8;
cookies[1] = 7;
cookies[2] = 6;
cookies[3] = 5;
cookies[4] = 4;
cookies[5] = 3;
cookies[6] = 2;
cookies[7] = 1;
cookies[0] = 8; /* bpf_fentry_test1 */
cookies[1] = 7; /* bpf_fentry_test3 */
cookies[2] = 6; /* bpf_fentry_test4 */
cookies[3] = 5; /* bpf_fentry_test5 */
cookies[4] = 4; /* bpf_fentry_test6 */
cookies[5] = 3; /* bpf_fentry_test7 */
cookies[6] = 2; /* bpf_fentry_test2 */
cookies[7] = 1; /* bpf_fentry_test8 */
opts.retprobe = true;

View File

@ -364,6 +364,9 @@ static int get_syms(char ***symsp, size_t *cntp)
continue;
if (!strncmp(name, "rcu_", 4))
continue;
if (!strncmp(name, "__ftrace_invalid_address__",
sizeof("__ftrace_invalid_address__") - 1))
continue;
err = hashmap__add(map, name, NULL);
if (err) {
free(name);

View File

@ -831,6 +831,59 @@ out:
bpf_object__close(obj);
}
#include "tailcall_bpf2bpf6.skel.h"
/* Tail call counting works even when there is data on stack which is
* not aligned to 8 bytes.
*/
static void test_tailcall_bpf2bpf_6(void)
{
struct tailcall_bpf2bpf6 *obj;
int err, map_fd, prog_fd, main_fd, data_fd, i, val;
LIBBPF_OPTS(bpf_test_run_opts, topts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);
obj = tailcall_bpf2bpf6__open_and_load();
if (!ASSERT_OK_PTR(obj, "open and load"))
return;
main_fd = bpf_program__fd(obj->progs.entry);
if (!ASSERT_GE(main_fd, 0, "entry prog fd"))
goto out;
map_fd = bpf_map__fd(obj->maps.jmp_table);
if (!ASSERT_GE(map_fd, 0, "jmp_table map fd"))
goto out;
prog_fd = bpf_program__fd(obj->progs.classifier_0);
if (!ASSERT_GE(prog_fd, 0, "classifier_0 prog fd"))
goto out;
i = 0;
err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
if (!ASSERT_OK(err, "jmp_table map update"))
goto out;
err = bpf_prog_test_run_opts(main_fd, &topts);
ASSERT_OK(err, "entry prog test run");
ASSERT_EQ(topts.retval, 0, "tailcall retval");
data_fd = bpf_map__fd(obj->maps.bss);
if (!ASSERT_GE(map_fd, 0, "bss map fd"))
goto out;
i = 0;
err = bpf_map_lookup_elem(data_fd, &i, &val);
ASSERT_OK(err, "bss map lookup");
ASSERT_EQ(val, 1, "done flag is set");
out:
tailcall_bpf2bpf6__destroy(obj);
}
void test_tailcalls(void)
{
if (test__start_subtest("tailcall_1"))
@ -855,4 +908,6 @@ void test_tailcalls(void)
test_tailcall_bpf2bpf_4(false);
if (test__start_subtest("tailcall_bpf2bpf_5"))
test_tailcall_bpf2bpf_4(true);
if (test__start_subtest("tailcall_bpf2bpf_6"))
test_tailcall_bpf2bpf_6();
}

View File

@ -54,21 +54,21 @@ static void kprobe_multi_check(void *ctx, bool is_return)
if (is_return) {
SET(kretprobe_test1_result, &bpf_fentry_test1, 8);
SET(kretprobe_test2_result, &bpf_fentry_test2, 7);
SET(kretprobe_test3_result, &bpf_fentry_test3, 6);
SET(kretprobe_test4_result, &bpf_fentry_test4, 5);
SET(kretprobe_test5_result, &bpf_fentry_test5, 4);
SET(kretprobe_test6_result, &bpf_fentry_test6, 3);
SET(kretprobe_test7_result, &bpf_fentry_test7, 2);
SET(kretprobe_test2_result, &bpf_fentry_test2, 2);
SET(kretprobe_test3_result, &bpf_fentry_test3, 7);
SET(kretprobe_test4_result, &bpf_fentry_test4, 6);
SET(kretprobe_test5_result, &bpf_fentry_test5, 5);
SET(kretprobe_test6_result, &bpf_fentry_test6, 4);
SET(kretprobe_test7_result, &bpf_fentry_test7, 3);
SET(kretprobe_test8_result, &bpf_fentry_test8, 1);
} else {
SET(kprobe_test1_result, &bpf_fentry_test1, 1);
SET(kprobe_test2_result, &bpf_fentry_test2, 2);
SET(kprobe_test3_result, &bpf_fentry_test3, 3);
SET(kprobe_test4_result, &bpf_fentry_test4, 4);
SET(kprobe_test5_result, &bpf_fentry_test5, 5);
SET(kprobe_test6_result, &bpf_fentry_test6, 6);
SET(kprobe_test7_result, &bpf_fentry_test7, 7);
SET(kprobe_test2_result, &bpf_fentry_test2, 7);
SET(kprobe_test3_result, &bpf_fentry_test3, 2);
SET(kprobe_test4_result, &bpf_fentry_test4, 3);
SET(kprobe_test5_result, &bpf_fentry_test5, 4);
SET(kprobe_test6_result, &bpf_fentry_test6, 5);
SET(kprobe_test7_result, &bpf_fentry_test7, 6);
SET(kprobe_test8_result, &bpf_fentry_test8, 8);
}

View File

@ -0,0 +1,42 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#define __unused __attribute__((unused))
struct {
__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
__uint(max_entries, 1);
__uint(key_size, sizeof(__u32));
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
int done = 0;
SEC("tc")
int classifier_0(struct __sk_buff *skb __unused)
{
done = 1;
return 0;
}
static __noinline
int subprog_tail(struct __sk_buff *skb)
{
/* Don't propagate the constant to the caller */
volatile int ret = 1;
bpf_tail_call_static(skb, &jmp_table, 0);
return ret;
}
SEC("tc")
int entry(struct __sk_buff *skb)
{
/* Have data on stack which size is not a multiple of 8 */
volatile char arr[1] = {};
return subprog_tail(skb);
}
char __license[] SEC("license") = "GPL";