From 137df1189d128a6b5dee2f653e054b40ef36b94c Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Mon, 16 Oct 2023 11:28:40 -0700 Subject: [PATCH 01/51] libbpf: Don't assume SHT_GNU_verdef presence for SHT_GNU_versym section Fix too eager assumption that SHT_GNU_verdef ELF section is going to be present whenever binary has SHT_GNU_versym section. It seems like either SHT_GNU_verdef or SHT_GNU_verneed can be used, so failing on missing SHT_GNU_verdef actually breaks use cases in production. One specific reported issue, which was used to manually test this fix, was trying to attach to `readline` function in BASH binary. Fixes: bb7fa09399b9 ("libbpf: Support symbol versioning for uprobe") Reported-by: Liam Wisehart Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Tested-by: Manu Bretelle Reviewed-by: Fangrui Song Acked-by: Hengqi Chen Link: https://lore.kernel.org/bpf/20231016182840.4033346-1-andrii@kernel.org --- tools/lib/bpf/elf.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/tools/lib/bpf/elf.c b/tools/lib/bpf/elf.c index 2a158e8a8b7c..2a62bf411bb3 100644 --- a/tools/lib/bpf/elf.c +++ b/tools/lib/bpf/elf.c @@ -141,14 +141,15 @@ static int elf_sym_iter_new(struct elf_sym_iter *iter, iter->versyms = elf_getdata(scn, 0); scn = elf_find_next_scn_by_type(elf, SHT_GNU_verdef, NULL); - if (!scn) { - pr_debug("elf: failed to find verdef ELF sections in '%s'\n", binary_path); - return -ENOENT; - } - if (!gelf_getshdr(scn, &sh)) - return -EINVAL; - iter->verdef_strtabidx = sh.sh_link; + if (!scn) + return 0; + iter->verdefs = elf_getdata(scn, 0); + if (!iter->verdefs || !gelf_getshdr(scn, &sh)) { + pr_warn("elf: failed to get verdef ELF section in '%s'\n", binary_path); + return -EINVAL; + } + iter->verdef_strtabidx = sh.sh_link; return 0; } @@ -199,6 +200,9 @@ static const char *elf_get_vername(struct elf_sym_iter *iter, int ver) GElf_Verdef verdef; int offset; + if (!iter->verdefs) + return NULL; + offset = 0; while (gelf_getverdef(iter->verdefs, offset, &verdef)) { if (verdef.vd_ndx != ver) { From 9a675ba55a96a45a9fb69e6a5c43f80c6682e541 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 16 Oct 2023 14:57:38 +0200 Subject: [PATCH 02/51] net, bpf: Add a warning if NAPI cb missed xdp_do_flush(). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A few drivers were missing a xdp_do_flush() invocation after XDP_REDIRECT. Add three helper functions each for one of the per-CPU lists. Return true if the per-CPU list is non-empty and flush the list. Add xdp_do_check_flushed() which invokes each helper functions and creates a warning if one of the functions had a non-empty list. Hide everything behind CONFIG_DEBUG_NET. Suggested-by: Jesper Dangaard Brouer Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Daniel Borkmann Reviewed-by: Toke Høiland-Jørgensen Acked-by: Jakub Kicinski Acked-by: John Fastabend Link: https://lore.kernel.org/bpf/20231016125738.Yt79p1uF@linutronix.de --- include/linux/bpf.h | 3 +++ include/net/xdp_sock.h | 9 +++++++++ kernel/bpf/cpumap.c | 10 ++++++++++ kernel/bpf/devmap.c | 10 ++++++++++ net/core/dev.c | 2 ++ net/core/dev.h | 6 ++++++ net/core/filter.c | 16 ++++++++++++++++ net/xdp/xsk.c | 10 ++++++++++ 8 files changed, 66 insertions(+) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index d3c51a507508..b4b40b45962b 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2478,6 +2478,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, enum bpf_dynptr_type type, u32 offset, u32 size); void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); + +bool dev_check_flush(void); +bool cpu_map_check_flush(void); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 69b472604b86..7dd0df2f6f8e 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -109,4 +109,13 @@ static inline void __xsk_map_flush(void) #endif /* CONFIG_XDP_SOCKETS */ +#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET) +bool xsk_map_check_flush(void); +#else +static inline bool xsk_map_check_flush(void) +{ + return false; +} +#endif + #endif /* _LINUX_XDP_SOCK_H */ diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index e42a1bdb7f53..8a0bb80fe48a 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -764,6 +764,16 @@ void __cpu_map_flush(void) } } +#ifdef CONFIG_DEBUG_NET +bool cpu_map_check_flush(void) +{ + if (list_empty(this_cpu_ptr(&cpu_map_flush_list))) + return false; + __cpu_map_flush(); + return true; +} +#endif + static int __init cpu_map_init(void) { int cpu; diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 4d42f6ed6c11..a936c704d4e7 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -418,6 +418,16 @@ void __dev_flush(void) } } +#ifdef CONFIG_DEBUG_NET +bool dev_check_flush(void) +{ + if (list_empty(this_cpu_ptr(&dev_flush_list))) + return false; + __dev_flush(); + return true; +} +#endif + /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or * by local_bh_disable() (from XDP calls inside NAPI). The * rcu_read_lock_bh_held() below makes lockdep accept both. diff --git a/net/core/dev.c b/net/core/dev.c index 97e7b9833db9..4420831180c6 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -6535,6 +6535,8 @@ static int __napi_poll(struct napi_struct *n, bool *repoll) if (test_bit(NAPI_STATE_SCHED, &n->state)) { work = n->poll(n, weight); trace_napi_poll(n, work, weight); + + xdp_do_check_flushed(n); } if (unlikely(work > weight)) diff --git a/net/core/dev.h b/net/core/dev.h index e075e198092c..f66125857af7 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -136,4 +136,10 @@ static inline void netif_set_gro_ipv4_max_size(struct net_device *dev, } int rps_cpumask_housekeeping(struct cpumask *mask); + +#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) +void xdp_do_check_flushed(struct napi_struct *napi); +#else +static inline void xdp_do_check_flushed(struct napi_struct *napi) { } +#endif #endif diff --git a/net/core/filter.c b/net/core/filter.c index cc2e4babc85f..21d75108c2e9 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -83,6 +83,8 @@ #include #include +#include "dev.h" + static const struct bpf_func_proto * bpf_sk_base_func_proto(enum bpf_func_id func_id); @@ -4208,6 +4210,20 @@ void xdp_do_flush(void) } EXPORT_SYMBOL_GPL(xdp_do_flush); +#if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) +void xdp_do_check_flushed(struct napi_struct *napi) +{ + bool ret; + + ret = dev_check_flush(); + ret |= cpu_map_check_flush(); + ret |= xsk_map_check_flush(); + + WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n", + napi->poll); +} +#endif + void bpf_clear_redirect_map(struct bpf_map *map) { struct bpf_redirect_info *ri; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index f5e96e0d6e01..ba070fd37d24 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -391,6 +391,16 @@ void __xsk_map_flush(void) } } +#ifdef CONFIG_DEBUG_NET +bool xsk_map_check_flush(void) +{ + if (list_empty(this_cpu_ptr(&xskmap_flush_list))) + return false; + __xsk_map_flush(); + return true; +} +#endif + void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) { xskq_prod_submit_n(pool->cq, nb_entries); From 29a7e00ffadddd8d68eff311de1bf12ae10687bb Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sat, 7 Oct 2023 13:59:44 +0000 Subject: [PATCH 03/51] bpf: Fix missed rcu read lock in bpf_task_under_cgroup() When employed within a sleepable program not under RCU protection, the use of 'bpf_task_under_cgroup()' may trigger a warning in the kernel log, particularly when CONFIG_PROVE_RCU is enabled: [ 1259.662357] WARNING: suspicious RCU usage [ 1259.662358] 6.5.0+ #33 Not tainted [ 1259.662360] ----------------------------- [ 1259.662361] include/linux/cgroup.h:423 suspicious rcu_dereference_check() usage! Other info that might help to debug this: [ 1259.662366] rcu_scheduler_active = 2, debug_locks = 1 [ 1259.662368] 1 lock held by trace/72954: [ 1259.662369] #0: ffffffffb5e3eda0 (rcu_read_lock_trace){....}-{0:0}, at: __bpf_prog_enter_sleepable+0x0/0xb0 Stack backtrace: [ 1259.662385] CPU: 50 PID: 72954 Comm: trace Kdump: loaded Not tainted 6.5.0+ #33 [ 1259.662391] Call Trace: [ 1259.662393] [ 1259.662395] dump_stack_lvl+0x6e/0x90 [ 1259.662401] dump_stack+0x10/0x20 [ 1259.662404] lockdep_rcu_suspicious+0x163/0x1b0 [ 1259.662412] task_css_set.part.0+0x23/0x30 [ 1259.662417] bpf_task_under_cgroup+0xe7/0xf0 [ 1259.662422] bpf_prog_7fffba481a3bcf88_lsm_run+0x5c/0x93 [ 1259.662431] bpf_trampoline_6442505574+0x60/0x1000 [ 1259.662439] bpf_lsm_bpf+0x5/0x20 [ 1259.662443] ? security_bpf+0x32/0x50 [ 1259.662452] __sys_bpf+0xe6/0xdd0 [ 1259.662463] __x64_sys_bpf+0x1a/0x30 [ 1259.662467] do_syscall_64+0x38/0x90 [ 1259.662472] entry_SYSCALL_64_after_hwframe+0x6e/0xd8 [ 1259.662479] RIP: 0033:0x7f487baf8e29 [...] [ 1259.662504] This issue can be reproduced by executing a straightforward program, as demonstrated below: SEC("lsm.s/bpf") int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) { struct cgroup *cgrp = NULL; struct task_struct *task; int ret = 0; if (cmd != BPF_LINK_CREATE) return 0; // The cgroup2 should be mounted first cgrp = bpf_cgroup_from_id(1); if (!cgrp) goto out; task = bpf_get_current_task_btf(); if (bpf_task_under_cgroup(task, cgrp)) ret = -1; bpf_cgroup_release(cgrp); out: return ret; } After running the program, if you subsequently execute another BPF program, you will encounter the warning. It's worth noting that task_under_cgroup_hierarchy() is also utilized by bpf_current_task_under_cgroup(). However, bpf_current_task_under_cgroup() doesn't exhibit this issue because it cannot be used in sleepable BPF programs. Fixes: b5ad4cdc46c7 ("bpf: Add bpf_task_under_cgroup() kfunc") Signed-off-by: Yafang Shao Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Cc: Feng Zhou Cc: KP Singh Link: https://lore.kernel.org/bpf/20231007135945.4306-1-laoar.shao@gmail.com --- kernel/bpf/helpers.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 62a53ebfedf9..61f51dee8448 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2215,7 +2215,12 @@ __bpf_kfunc struct cgroup *bpf_cgroup_from_id(u64 cgid) __bpf_kfunc long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) { - return task_under_cgroup_hierarchy(task, ancestor); + long ret; + + rcu_read_lock(); + ret = task_under_cgroup_hierarchy(task, ancestor); + rcu_read_unlock(); + return ret; } #endif /* CONFIG_CGROUPS */ From 44cb03f19b38c11cfc5bf76ea6d6885da210ded2 Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Sat, 7 Oct 2023 13:59:45 +0000 Subject: [PATCH 04/51] selftests/bpf: Add selftest for bpf_task_under_cgroup() in sleepable prog The result is as follows: $ tools/testing/selftests/bpf/test_progs --name=task_under_cgroup #237 task_under_cgroup:OK Summary: 1/0 PASSED, 0 SKIPPED, 0 FAILED Without the previous patch, there will be RCU warnings in dmesg when CONFIG_PROVE_RCU is enabled. While with the previous patch, there will be no warnings. Signed-off-by: Yafang Shao Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20231007135945.4306-2-laoar.shao@gmail.com --- .../bpf/prog_tests/task_under_cgroup.c | 11 ++++++-- .../bpf/progs/test_task_under_cgroup.c | 28 ++++++++++++++++++- 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c index 4224727fb364..626d76fe43a2 100644 --- a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c +++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c @@ -30,8 +30,15 @@ void test_task_under_cgroup(void) if (!ASSERT_OK(ret, "test_task_under_cgroup__load")) goto cleanup; - ret = test_task_under_cgroup__attach(skel); - if (!ASSERT_OK(ret, "test_task_under_cgroup__attach")) + /* First, attach the LSM program, and then it will be triggered when the + * TP_BTF program is attached. + */ + skel->links.lsm_run = bpf_program__attach_lsm(skel->progs.lsm_run); + if (!ASSERT_OK_PTR(skel->links.lsm_run, "attach_lsm")) + goto cleanup; + + skel->links.tp_btf_run = bpf_program__attach_trace(skel->progs.tp_btf_run); + if (!ASSERT_OK_PTR(skel->links.tp_btf_run, "attach_tp_btf")) goto cleanup; pid = fork(); diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c index 56cdc0a553f0..7e750309ce27 100644 --- a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c +++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c @@ -18,7 +18,7 @@ const volatile __u64 cgid; int remote_pid; SEC("tp_btf/task_newtask") -int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags) +int BPF_PROG(tp_btf_run, struct task_struct *task, u64 clone_flags) { struct cgroup *cgrp = NULL; struct task_struct *acquired; @@ -48,4 +48,30 @@ out: return 0; } +SEC("lsm.s/bpf") +int BPF_PROG(lsm_run, int cmd, union bpf_attr *attr, unsigned int size) +{ + struct cgroup *cgrp = NULL; + struct task_struct *task; + int ret = 0; + + task = bpf_get_current_task_btf(); + if (local_pid != task->pid) + return 0; + + if (cmd != BPF_LINK_CREATE) + return 0; + + /* 1 is the root cgroup */ + cgrp = bpf_cgroup_from_id(1); + if (!cgrp) + goto out; + if (!bpf_task_under_cgroup(task, cgrp)) + ret = -1; + bpf_cgroup_release(cgrp); + +out: + return ret; +} + char _license[] SEC("license") = "GPL"; From 24516309e330cd592c04d0467313d885584af4e8 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 17 Oct 2023 10:17:28 +0200 Subject: [PATCH 05/51] selftests/bpf: Add additional mprog query test coverage Add several new test cases which assert corner cases on the mprog query mechanism, for example, around passing in a too small or a larger array than the current count. ./test_progs -t tc_opts #252 tc_opts_after:OK #253 tc_opts_append:OK #254 tc_opts_basic:OK #255 tc_opts_before:OK #256 tc_opts_chain_classic:OK #257 tc_opts_chain_mixed:OK #258 tc_opts_delete_empty:OK #259 tc_opts_demixed:OK #260 tc_opts_detach:OK #261 tc_opts_detach_after:OK #262 tc_opts_detach_before:OK #263 tc_opts_dev_cleanup:OK #264 tc_opts_invalid:OK #265 tc_opts_max:OK #266 tc_opts_mixed:OK #267 tc_opts_prepend:OK #268 tc_opts_query:OK #269 tc_opts_query_attach:OK #270 tc_opts_replace:OK #271 tc_opts_revision:OK Summary: 20/0 PASSED, 0 SKIPPED, 0 FAILED Signed-off-by: Daniel Borkmann Signed-off-by: Andrii Nakryiko Reviewed-by: Alan Maguire Link: https://lore.kernel.org/bpf/20231017081728.24769-1-daniel@iogearbox.net --- .../selftests/bpf/prog_tests/tc_opts.c | 131 +++++++++++++++++- 1 file changed, 130 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/prog_tests/tc_opts.c b/tools/testing/selftests/bpf/prog_tests/tc_opts.c index ca506d2fcf58..51883ccb8020 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_opts.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_opts.c @@ -2471,7 +2471,7 @@ static void test_tc_opts_query_target(int target) __u32 fd1, fd2, fd3, fd4, id1, id2, id3, id4; struct test_tc_link *skel; union bpf_attr attr; - __u32 prog_ids[5]; + __u32 prog_ids[10]; int err; skel = test_tc_link__open_and_load(); @@ -2599,6 +2599,135 @@ static void test_tc_opts_query_target(int target) ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + /* Test 3: Query with smaller prog_ids array */ + memset(&attr, 0, attr_size); + attr.query.target_ifindex = loopback; + attr.query.attach_type = target; + + memset(prog_ids, 0, sizeof(prog_ids)); + attr.query.prog_ids = ptr_to_u64(prog_ids); + attr.query.count = 2; + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + ASSERT_EQ(err, -1, "prog_query_should_fail"); + ASSERT_EQ(errno, ENOSPC, "prog_query_should_fail"); + + ASSERT_EQ(attr.query.count, 4, "count"); + ASSERT_EQ(attr.query.revision, 5, "revision"); + ASSERT_EQ(attr.query.query_flags, 0, "query_flags"); + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags"); + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex"); + ASSERT_EQ(attr.query.attach_type, target, "attach_type"); + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids"); + ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]"); + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags"); + ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + + /* Test 4: Query with larger prog_ids array */ + memset(&attr, 0, attr_size); + attr.query.target_ifindex = loopback; + attr.query.attach_type = target; + + memset(prog_ids, 0, sizeof(prog_ids)); + attr.query.prog_ids = ptr_to_u64(prog_ids); + attr.query.count = 10; + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(attr.query.count, 4, "count"); + ASSERT_EQ(attr.query.revision, 5, "revision"); + ASSERT_EQ(attr.query.query_flags, 0, "query_flags"); + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags"); + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex"); + ASSERT_EQ(attr.query.attach_type, target, "attach_type"); + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids"); + ASSERT_EQ(prog_ids[0], id1, "prog_ids[0]"); + ASSERT_EQ(prog_ids[1], id2, "prog_ids[1]"); + ASSERT_EQ(prog_ids[2], id3, "prog_ids[2]"); + ASSERT_EQ(prog_ids[3], id4, "prog_ids[3]"); + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags"); + ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + + /* Test 5: Query with NULL prog_ids array but with count > 0 */ + memset(&attr, 0, attr_size); + attr.query.target_ifindex = loopback; + attr.query.attach_type = target; + + memset(prog_ids, 0, sizeof(prog_ids)); + attr.query.count = sizeof(prog_ids); + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(attr.query.count, 4, "count"); + ASSERT_EQ(attr.query.revision, 5, "revision"); + ASSERT_EQ(attr.query.query_flags, 0, "query_flags"); + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags"); + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex"); + ASSERT_EQ(attr.query.attach_type, target, "attach_type"); + ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]"); + ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]"); + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(attr.query.prog_ids, 0, "prog_ids"); + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags"); + ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + + /* Test 6: Query with non-NULL prog_ids array but with count == 0 */ + memset(&attr, 0, attr_size); + attr.query.target_ifindex = loopback; + attr.query.attach_type = target; + + memset(prog_ids, 0, sizeof(prog_ids)); + attr.query.prog_ids = ptr_to_u64(prog_ids); + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup4; + + ASSERT_EQ(attr.query.count, 4, "count"); + ASSERT_EQ(attr.query.revision, 5, "revision"); + ASSERT_EQ(attr.query.query_flags, 0, "query_flags"); + ASSERT_EQ(attr.query.attach_flags, 0, "attach_flags"); + ASSERT_EQ(attr.query.target_ifindex, loopback, "target_ifindex"); + ASSERT_EQ(attr.query.attach_type, target, "attach_type"); + ASSERT_EQ(prog_ids[0], 0, "prog_ids[0]"); + ASSERT_EQ(prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(prog_ids[3], 0, "prog_ids[3]"); + ASSERT_EQ(prog_ids[4], 0, "prog_ids[4]"); + ASSERT_EQ(attr.query.prog_ids, ptr_to_u64(prog_ids), "prog_ids"); + ASSERT_EQ(attr.query.prog_attach_flags, 0, "prog_attach_flags"); + ASSERT_EQ(attr.query.link_ids, 0, "link_ids"); + ASSERT_EQ(attr.query.link_attach_flags, 0, "link_attach_flags"); + + /* Test 7: Query with invalid flags */ + attr.query.attach_flags = 0; + attr.query.query_flags = 1; + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + ASSERT_EQ(err, -1, "prog_query_should_fail"); + ASSERT_EQ(errno, EINVAL, "prog_query_should_fail"); + + attr.query.attach_flags = 1; + attr.query.query_flags = 0; + + err = syscall(__NR_bpf, BPF_PROG_QUERY, &attr, attr_size); + ASSERT_EQ(err, -1, "prog_query_should_fail"); + ASSERT_EQ(errno, EINVAL, "prog_query_should_fail"); + cleanup4: err = bpf_prog_detach_opts(fd4, loopback, target, &optd); ASSERT_OK(err, "prog_detach"); From bb6a88885fde24835afdaa1c5bb976a1bf5c5d71 Mon Sep 17 00:00:00 2001 From: Larysa Zaremba Date: Tue, 17 Oct 2023 18:27:57 +0200 Subject: [PATCH 06/51] selftests/bpf: Add options and frags to xdp_hw_metadata This is a follow-up to the commit 9b2b86332a9b ("bpf: Allow to use kfunc XDP hints and frags together"). The are some possible implementations problems that may arise when providing metadata specifically for multi-buffer packets, therefore there must be a possibility to test such option separately. Add an option to use multi-buffer AF_XDP xdp_hw_metadata and mark used XDP program as capable to use frags. As for now, xdp_hw_metadata accepts no options, so add simple option parsing logic and a help message. For quick reference, also add an ingress packet generation command to the help message. The command comes from [0]. Example of output for multi-buffer packet: xsk_ring_cons__peek: 1 0xead018: rx_desc[15]->addr=10000000000f000 addr=f100 comp_addr=f000 rx_hash: 0x5789FCBB with RSS type:0x29 rx_timestamp: 1696856851535324697 (sec:1696856851.5353) XDP RX-time: 1696856843158256391 (sec:1696856843.1583) delta sec:-8.3771 (-8377068.306 usec) AF_XDP time: 1696856843158413078 (sec:1696856843.1584) delta sec:0.0002 (156.687 usec) 0xead018: complete idx=23 addr=f000 xsk_ring_cons__peek: 1 0xead018: rx_desc[16]->addr=100000000008000 addr=8100 comp_addr=8000 0xead018: complete idx=24 addr=8000 xsk_ring_cons__peek: 1 0xead018: rx_desc[17]->addr=100000000009000 addr=9100 comp_addr=9000 EoP 0xead018: complete idx=25 addr=9000 Metadata is printed for the first packet only. [0] https://lore.kernel.org/all/20230119221536.3349901-18-sdf@google.com/ Signed-off-by: Larysa Zaremba Signed-off-by: Daniel Borkmann Acked-by: Stanislav Fomichev Link: https://lore.kernel.org/bpf/20231017162800.24080-1-larysa.zaremba@intel.com --- .../selftests/bpf/progs/xdp_hw_metadata.c | 2 +- tools/testing/selftests/bpf/xdp_hw_metadata.c | 78 ++++++++++++++++--- 2 files changed, 67 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c index b2dfd7066c6e..f6d1cc9ad892 100644 --- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c @@ -21,7 +21,7 @@ extern int bpf_xdp_metadata_rx_timestamp(const struct xdp_md *ctx, extern int bpf_xdp_metadata_rx_hash(const struct xdp_md *ctx, __u32 *hash, enum xdp_rss_hash_type *rss_type) __ksym; -SEC("xdp") +SEC("xdp.frags") int rx(struct xdp_md *ctx) { void *data, *data_meta, *data_end; diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 17c980138796..17c0f92ff160 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -47,6 +48,7 @@ struct xsk { }; struct xdp_hw_metadata *bpf_obj; +__u16 bind_flags = XDP_COPY; struct xsk *rx_xsk; const char *ifname; int ifindex; @@ -60,7 +62,7 @@ static int open_xsk(int ifindex, struct xsk *xsk, __u32 queue_id) const struct xsk_socket_config socket_config = { .rx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, .tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, - .bind_flags = XDP_COPY, + .bind_flags = bind_flags, }; const struct xsk_umem_config umem_config = { .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, @@ -263,11 +265,14 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t verify_skb_metadata(server_fd); for (i = 0; i < rxq; i++) { + bool first_seg = true; + bool is_eop = true; + if (fds[i].revents == 0) continue; struct xsk *xsk = &rx_xsk[i]; - +peek: ret = xsk_ring_cons__peek(&xsk->rx, 1, &idx); printf("xsk_ring_cons__peek: %d\n", ret); if (ret != 1) @@ -276,12 +281,19 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t rx_desc = xsk_ring_cons__rx_desc(&xsk->rx, idx); comp_addr = xsk_umem__extract_addr(rx_desc->addr); addr = xsk_umem__add_offset_to_addr(rx_desc->addr); - printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n", - xsk, idx, rx_desc->addr, addr, comp_addr); - verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr), - clock_id); + is_eop = !(rx_desc->options & XDP_PKT_CONTD); + printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx%s\n", + xsk, idx, rx_desc->addr, addr, comp_addr, is_eop ? " EoP" : ""); + if (first_seg) { + verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr), + clock_id); + first_seg = false; + } + xsk_ring_cons__release(&xsk->rx, 1); refill_rx(xsk, comp_addr); + if (!is_eop) + goto peek; } } @@ -404,6 +416,53 @@ static void timestamping_enable(int fd, int val) error(1, errno, "setsockopt(SO_TIMESTAMPING)"); } +static void print_usage(void) +{ + const char *usage = + "Usage: xdp_hw_metadata [OPTIONS] [IFNAME]\n" + " -m Enable multi-buffer XDP for larger MTU\n" + " -h Display this help and exit\n\n" + "Generate test packets on the other machine with:\n" + " echo -n xdp | nc -u -q1 9091\n"; + + printf("%s", usage); +} + +static void read_args(int argc, char *argv[]) +{ + char opt; + + while ((opt = getopt(argc, argv, "mh")) != -1) { + switch (opt) { + case 'm': + bind_flags |= XDP_USE_SG; + break; + case 'h': + print_usage(); + exit(0); + case '?': + if (isprint(optopt)) + fprintf(stderr, "Unknown option: -%c\n", optopt); + fallthrough; + default: + print_usage(); + error(-1, opterr, "Command line options error"); + } + } + + if (optind >= argc) { + fprintf(stderr, "No device name provided\n"); + print_usage(); + exit(-1); + } + + ifname = argv[optind]; + ifindex = if_nametoindex(ifname); + + if (!ifname) + error(-1, errno, "Invalid interface name"); +} + int main(int argc, char *argv[]) { clockid_t clock_id = CLOCK_TAI; @@ -413,13 +472,8 @@ int main(int argc, char *argv[]) struct bpf_program *prog; - if (argc != 2) { - fprintf(stderr, "pass device name\n"); - return -1; - } + read_args(argc, argv); - ifname = argv[1]; - ifindex = if_nametoindex(ifname); rxq = rxq_num(ifname); printf("rxq: %d\n", rxq); From 0e133a13370389d3894891eafe54fec2c44ad735 Mon Sep 17 00:00:00 2001 From: Dave Thaler Date: Tue, 17 Oct 2023 20:30:20 +0000 Subject: [PATCH 07/51] bpf, docs: Define signed modulo as using truncated division There's different mathematical definitions (truncated, floored, rounded, etc.) and different languages have chosen different definitions [0][1]. E.g., languages/libraries that follow Knuth use a different mathematical definition than C uses. This patch specifies which definition BPF uses, as verified by Eduard [2] and others. [0] https://en.wikipedia.org/wiki/Modulo#Variants_of_the_definition [1] https://torstencurdt.com/tech/posts/modulo-of-negative-numbers/ [2] https://lore.kernel.org/bpf/57e6fefadaf3b2995bb259fa8e711c7220ce5290.camel@gmail.com/ Signed-off-by: Dave Thaler Signed-off-by: Daniel Borkmann Acked-by: David Vernet Acked-by: Eduard Zingerman Link: https://lore.kernel.org/bpf/20231017203020.1500-1-dthaler1968@googlemail.com --- Documentation/bpf/standardization/instruction-set.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst index c5d53a6e8c79..245b6defc298 100644 --- a/Documentation/bpf/standardization/instruction-set.rst +++ b/Documentation/bpf/standardization/instruction-set.rst @@ -283,6 +283,14 @@ For signed operations (``BPF_SDIV`` and ``BPF_SMOD``), for ``BPF_ALU``, is first :term:`sign extended` from 32 to 64 bits, and then interpreted as a 64-bit signed value. +Note that there are varying definitions of the signed modulo operation +when the dividend or divisor are negative, where implementations often +vary by language such that Python, Ruby, etc. differ from C, Go, Java, +etc. This specification requires that signed modulo use truncated division +(where -13 % 3 == -1) as implemented in C, Go, etc.: + + a % n = a - n * trunc(a / n) + The ``BPF_MOVSX`` instruction does a move operation with sign extension. ``BPF_ALU | BPF_MOVSX`` :term:`sign extends` 8-bit and 16-bit operands into 32 bit operands, and zeroes the remaining upper 32 bits. From 90704b4be0b0d6d0a7a9369d4b9aae6a579602c7 Mon Sep 17 00:00:00 2001 From: Manu Bretelle Date: Wed, 18 Oct 2023 16:01:32 -0700 Subject: [PATCH 08/51] bpftool: Fix printing of pointer value When printing a pointer value, "%p" will either print the hexadecimal value of the pointer (e.g `0x1234`), or `(nil)` when NULL. Both of those are invalid json "integer" values and need to be wrapped in quotes. Before: ``` $ sudo bpftool struct_ops dump name ned_dummy_cca | grep next "next": (nil), $ sudo bpftool struct_ops dump name ned_dummy_cca | \ jq '.[1].bpf_struct_ops_tcp_congestion_ops.data.list.next' parse error: Invalid numeric literal at line 29, column 34 ``` After: ``` $ sudo ./bpftool struct_ops dump name ned_dummy_cca | grep next "next": "(nil)", $ sudo ./bpftool struct_ops dump name ned_dummy_cca | \ jq '.[1].bpf_struct_ops_tcp_congestion_ops.data.list.next' "(nil)" ``` Signed-off-by: Manu Bretelle Signed-off-by: Daniel Borkmann Tested-by: Eduard Zingerman Acked-by: Eduard Zingerman Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20231018230133.1593152-2-chantr4@gmail.com --- tools/bpf/bpftool/btf_dumper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/bpf/bpftool/btf_dumper.c b/tools/bpf/bpftool/btf_dumper.c index 1b7f69714604..527fe867a8fb 100644 --- a/tools/bpf/bpftool/btf_dumper.c +++ b/tools/bpf/bpftool/btf_dumper.c @@ -127,7 +127,7 @@ static void btf_dumper_ptr(const struct btf_dumper *d, print_ptr_value: if (d->is_plain_text) - jsonw_printf(d->jw, "%p", (void *)value); + jsonw_printf(d->jw, "\"%p\"", (void *)value); else jsonw_printf(d->jw, "%lu", value); } From 6bd5e167af2e9d1aa79e4a1a2598abcdc8fafd59 Mon Sep 17 00:00:00 2001 From: Manu Bretelle Date: Wed, 18 Oct 2023 16:01:33 -0700 Subject: [PATCH 09/51] bpftool: Wrap struct_ops dump in an array When dumping a struct_ops, 2 dictionaries are emitted. When using `name`, they were already wrapped in an array, but not when using `id`. Causing `jq` to fail at parsing the payload as it reached the comma following the first dict. This change wraps those dictionaries in an array so valid json is emitted. Before, jq fails to parse the output: ``` $ sudo bpftool struct_ops dump id 1523612 | jq . > /dev/null parse error: Expected value before ',' at line 19, column 2 ``` After, no error parsing the output: ``` sudo ./bpftool struct_ops dump id 1523612 | jq . > /dev/null ``` Signed-off-by: Manu Bretelle Signed-off-by: Daniel Borkmann Tested-by: Eduard Zingerman Acked-by: Eduard Zingerman Acked-by: Quentin Monnet Link: https://lore.kernel.org/bpf/20231018230133.1593152-3-chantr4@gmail.com --- tools/bpf/bpftool/struct_ops.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c index 3ebc9fe91e0e..d573f2640d8e 100644 --- a/tools/bpf/bpftool/struct_ops.c +++ b/tools/bpf/bpftool/struct_ops.c @@ -276,6 +276,9 @@ static struct res do_one_id(const char *id_str, work_func func, void *data, res.nr_maps++; + if (wtr) + jsonw_start_array(wtr); + if (func(fd, info, data, wtr)) res.nr_errs++; else if (!wtr && json_output) @@ -288,6 +291,9 @@ static struct res do_one_id(const char *id_str, work_func func, void *data, */ jsonw_null(json_wtr); + if (wtr) + jsonw_end_array(wtr); + done: free(info); close(fd); From 6da88306811b40a207c94c9da9faf07bdb20776e Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:39 +0800 Subject: [PATCH 10/51] cgroup: Prepare for using css_task_iter_*() in BPF This patch makes some preparations for using css_task_iter_*() in BPF Program. 1. Flags CSS_TASK_ITER_* are #define-s and it's not easy for bpf prog to use them. Convert them to enum so bpf prog can take them from vmlinux.h. 2. In the next patch we will add css_task_iter_*() in common kfuncs which is not safe. Since css_task_iter_*() does spin_unlock_irq() which might screw up irq flags depending on the context where bpf prog is running. So we should use irqsave/irqrestore here and the switching is harmless. Suggested-by: Alexei Starovoitov Signed-off-by: Chuyi Zhou Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20231018061746.111364-2-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- include/linux/cgroup.h | 12 +++++------- kernel/cgroup/cgroup.c | 18 ++++++++++++------ 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c6c..0ef0af66080e 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -40,13 +40,11 @@ struct kernel_clone_args; #define CGROUP_WEIGHT_DFL 100 #define CGROUP_WEIGHT_MAX 10000 -/* walk only threadgroup leaders */ -#define CSS_TASK_ITER_PROCS (1U << 0) -/* walk all threaded css_sets in the domain */ -#define CSS_TASK_ITER_THREADED (1U << 1) - -/* internal flags */ -#define CSS_TASK_ITER_SKIPPED (1U << 16) +enum { + CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */ + CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */ + CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */ +}; /* a css_task_iter should be treated as an opaque object */ struct css_task_iter { diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 1fb7f562289d..b6d64f3b8888 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4917,9 +4917,11 @@ repeat: void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, struct css_task_iter *it) { + unsigned long irqflags; + memset(it, 0, sizeof(*it)); - spin_lock_irq(&css_set_lock); + spin_lock_irqsave(&css_set_lock, irqflags); it->ss = css->ss; it->flags = flags; @@ -4933,7 +4935,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, css_task_iter_advance(it); - spin_unlock_irq(&css_set_lock); + spin_unlock_irqrestore(&css_set_lock, irqflags); } /** @@ -4946,12 +4948,14 @@ void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, */ struct task_struct *css_task_iter_next(struct css_task_iter *it) { + unsigned long irqflags; + if (it->cur_task) { put_task_struct(it->cur_task); it->cur_task = NULL; } - spin_lock_irq(&css_set_lock); + spin_lock_irqsave(&css_set_lock, irqflags); /* @it may be half-advanced by skips, finish advancing */ if (it->flags & CSS_TASK_ITER_SKIPPED) @@ -4964,7 +4968,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) css_task_iter_advance(it); } - spin_unlock_irq(&css_set_lock); + spin_unlock_irqrestore(&css_set_lock, irqflags); return it->cur_task; } @@ -4977,11 +4981,13 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it) */ void css_task_iter_end(struct css_task_iter *it) { + unsigned long irqflags; + if (it->cur_cset) { - spin_lock_irq(&css_set_lock); + spin_lock_irqsave(&css_set_lock, irqflags); list_del(&it->iters_node); put_css_set_locked(it->cur_cset); - spin_unlock_irq(&css_set_lock); + spin_unlock_irqrestore(&css_set_lock, irqflags); } if (it->cur_dcset) From 9c66dc94b62aef23300f05f63404afb8990920b4 Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:40 +0800 Subject: [PATCH 11/51] bpf: Introduce css_task open-coded iterator kfuncs This patch adds kfuncs bpf_iter_css_task_{new,next,destroy} which allow creation and manipulation of struct bpf_iter_css_task in open-coded iterator style. These kfuncs actually wrapps css_task_iter_{start,next, end}. BPF programs can use these kfuncs through bpf_for_each macro for iteration of all tasks under a css. css_task_iter_*() would try to get the global spin-lock *css_set_lock*, so the bpf side has to be careful in where it allows to use this iter. Currently we only allow it in bpf_lsm and bpf iter-s. Signed-off-by: Chuyi Zhou Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20231018061746.111364-3-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 3 + kernel/bpf/task_iter.c | 58 +++++++++++++++++++ kernel/bpf/verifier.c | 23 ++++++++ .../testing/selftests/bpf/bpf_experimental.h | 8 +++ 4 files changed, 92 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 61f51dee8448..c01441db9fd5 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2560,6 +2560,9 @@ BTF_ID_FLAGS(func, bpf_iter_num_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_task_vma_new, KF_ITER_NEW | KF_RCU) BTF_ID_FLAGS(func, bpf_iter_task_vma_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) +BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_dynptr_adjust) BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index fef17628341f..e4126698cecf 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -894,6 +894,64 @@ __bpf_kfunc void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __diag_pop(); +struct bpf_iter_css_task { + __u64 __opaque[1]; +} __attribute__((aligned(8))); + +struct bpf_iter_css_task_kern { + struct css_task_iter *css_it; +} __attribute__((aligned(8))); + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +__bpf_kfunc int bpf_iter_css_task_new(struct bpf_iter_css_task *it, + struct cgroup_subsys_state *css, unsigned int flags) +{ + struct bpf_iter_css_task_kern *kit = (void *)it; + + BUILD_BUG_ON(sizeof(struct bpf_iter_css_task_kern) != sizeof(struct bpf_iter_css_task)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_css_task_kern) != + __alignof__(struct bpf_iter_css_task)); + kit->css_it = NULL; + switch (flags) { + case CSS_TASK_ITER_PROCS | CSS_TASK_ITER_THREADED: + case CSS_TASK_ITER_PROCS: + case 0: + break; + default: + return -EINVAL; + } + + kit->css_it = bpf_mem_alloc(&bpf_global_ma, sizeof(struct css_task_iter)); + if (!kit->css_it) + return -ENOMEM; + css_task_iter_start(css, flags, kit->css_it); + return 0; +} + +__bpf_kfunc struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) +{ + struct bpf_iter_css_task_kern *kit = (void *)it; + + if (!kit->css_it) + return NULL; + return css_task_iter_next(kit->css_it); +} + +__bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) +{ + struct bpf_iter_css_task_kern *kit = (void *)it; + + if (!kit->css_it) + return; + css_task_iter_end(kit->css_it); + bpf_mem_free(&bpf_global_ma, kit->css_it); +} + +__diag_pop(); + DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work); static void do_mmap_read_unlock(struct irq_work *entry) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index bb58987e4844..974713185269 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10472,6 +10472,7 @@ enum special_kfunc_type { KF_bpf_percpu_obj_new_impl, KF_bpf_percpu_obj_drop_impl, KF_bpf_throw, + KF_bpf_iter_css_task_new, }; BTF_SET_START(special_kfunc_set) @@ -10495,6 +10496,7 @@ BTF_ID(func, bpf_dynptr_clone) BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) +BTF_ID(func, bpf_iter_css_task_new) BTF_SET_END(special_kfunc_set) BTF_ID_LIST(special_kfunc_list) @@ -10520,6 +10522,7 @@ BTF_ID(func, bpf_dynptr_clone) BTF_ID(func, bpf_percpu_obj_new_impl) BTF_ID(func, bpf_percpu_obj_drop_impl) BTF_ID(func, bpf_throw) +BTF_ID(func, bpf_iter_css_task_new) static bool is_kfunc_ret_null(struct bpf_kfunc_call_arg_meta *meta) { @@ -11050,6 +11053,20 @@ static int process_kf_arg_ptr_to_rbtree_node(struct bpf_verifier_env *env, &meta->arg_rbtree_root.field); } +static bool check_css_task_iter_allowlist(struct bpf_verifier_env *env) +{ + enum bpf_prog_type prog_type = resolve_prog_type(env->prog); + + switch (prog_type) { + case BPF_PROG_TYPE_LSM: + return true; + case BPF_TRACE_ITER: + return env->prog->aux->sleepable; + default: + return false; + } +} + static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_arg_meta *meta, int insn_idx) { @@ -11300,6 +11317,12 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ break; } case KF_ARG_PTR_TO_ITER: + if (meta->func_id == special_kfunc_list[KF_bpf_iter_css_task_new]) { + if (!check_css_task_iter_allowlist(env)) { + verbose(env, "css_task_iter is only allowed in bpf_lsm and bpf iter-s\n"); + return -EINVAL; + } + } ret = process_iter_arg(env, regno, insn_idx, meta); if (ret < 0) return ret; diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 2c8cb3f61529..6792ed2b45d7 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -458,4 +458,12 @@ extern void bpf_throw(u64 cookie) __ksym; __bpf_assert_op(LHS, <=, END, value, false); \ }) +struct bpf_iter_css_task; +struct cgroup_subsys_state; +extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it, + struct cgroup_subsys_state *css, unsigned int flags) __weak __ksym; +extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym; +extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym; + + #endif From c68a78ffe2cb4207f64fd0f4262818c728c67be0 Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:41 +0800 Subject: [PATCH 12/51] bpf: Introduce task open coded iterator kfuncs This patch adds kfuncs bpf_iter_task_{new,next,destroy} which allow creation and manipulation of struct bpf_iter_task in open-coded iterator style. BPF programs can use these kfuncs or through bpf_for_each macro to iterate all processes in the system. The API design keep consistent with SEC("iter/task"). bpf_iter_task_new() accepts a specific task and iterating type which allows: 1. iterating all process in the system (BPF_TASK_ITER_ALL_PROCS) 2. iterating all threads in the system (BPF_TASK_ITER_ALL_THREADS) 3. iterating all threads of a specific task (BPF_TASK_ITER_PROC_THREADS) Signed-off-by: Chuyi Zhou Link: https://lore.kernel.org/r/20231018061746.111364-4-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/helpers.c | 3 + kernel/bpf/task_iter.c | 90 +++++++++++++++++++ .../testing/selftests/bpf/bpf_experimental.h | 5 ++ 3 files changed, 98 insertions(+) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index c01441db9fd5..c25941531265 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2563,6 +2563,9 @@ BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) +BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_dynptr_adjust) BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index e4126698cecf..faa1712c1df5 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -952,6 +952,96 @@ __bpf_kfunc void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __diag_pop(); +struct bpf_iter_task { + __u64 __opaque[3]; +} __attribute__((aligned(8))); + +struct bpf_iter_task_kern { + struct task_struct *task; + struct task_struct *pos; + unsigned int flags; +} __attribute__((aligned(8))); + +enum { + /* all process in the system */ + BPF_TASK_ITER_ALL_PROCS, + /* all threads in the system */ + BPF_TASK_ITER_ALL_THREADS, + /* all threads of a specific process */ + BPF_TASK_ITER_PROC_THREADS +}; + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +__bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it, + struct task_struct *task, unsigned int flags) +{ + struct bpf_iter_task_kern *kit = (void *)it; + + BUILD_BUG_ON(sizeof(struct bpf_iter_task_kern) > sizeof(struct bpf_iter_task)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_task_kern) != + __alignof__(struct bpf_iter_task)); + + kit->task = kit->pos = NULL; + switch (flags) { + case BPF_TASK_ITER_ALL_THREADS: + case BPF_TASK_ITER_ALL_PROCS: + case BPF_TASK_ITER_PROC_THREADS: + break; + default: + return -EINVAL; + } + + if (flags == BPF_TASK_ITER_PROC_THREADS) + kit->task = task; + else + kit->task = &init_task; + kit->pos = kit->task; + kit->flags = flags; + return 0; +} + +__bpf_kfunc struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) +{ + struct bpf_iter_task_kern *kit = (void *)it; + struct task_struct *pos; + unsigned int flags; + + flags = kit->flags; + pos = kit->pos; + + if (!pos) + return pos; + + if (flags == BPF_TASK_ITER_ALL_PROCS) + goto get_next_task; + + kit->pos = next_thread(kit->pos); + if (kit->pos == kit->task) { + if (flags == BPF_TASK_ITER_PROC_THREADS) { + kit->pos = NULL; + return pos; + } + } else + return pos; + +get_next_task: + kit->pos = next_task(kit->pos); + kit->task = kit->pos; + if (kit->pos == &init_task) + kit->pos = NULL; + + return pos; +} + +__bpf_kfunc void bpf_iter_task_destroy(struct bpf_iter_task *it) +{ +} + +__diag_pop(); + DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work); static void do_mmap_read_unlock(struct irq_work *entry) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 6792ed2b45d7..2f6c747aa874 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -465,5 +465,10 @@ extern int bpf_iter_css_task_new(struct bpf_iter_css_task *it, extern struct task_struct *bpf_iter_css_task_next(struct bpf_iter_css_task *it) __weak __ksym; extern void bpf_iter_css_task_destroy(struct bpf_iter_css_task *it) __weak __ksym; +struct bpf_iter_task; +extern int bpf_iter_task_new(struct bpf_iter_task *it, + struct task_struct *task, unsigned int flags) __weak __ksym; +extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym; +extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym; #endif From 7251d0905e7518bcb990c8e9a3615b1bb23c78f2 Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:42 +0800 Subject: [PATCH 13/51] bpf: Introduce css open-coded iterator kfuncs This Patch adds kfuncs bpf_iter_css_{new,next,destroy} which allow creation and manipulation of struct bpf_iter_css in open-coded iterator style. These kfuncs actually wrapps css_next_descendant_{pre, post}. css_iter can be used to: 1) iterating a sepcific cgroup tree with pre/post/up order 2) iterating cgroup_subsystem in BPF Prog, like for_each_mem_cgroup_tree/cpuset_for_each_descendant_pre in kernel. The API design is consistent with cgroup_iter. bpf_iter_css_new accepts parameters defining iteration order and starting css. Here we also reuse BPF_CGROUP_ITER_DESCENDANTS_PRE, BPF_CGROUP_ITER_DESCENDANTS_POST, BPF_CGROUP_ITER_ANCESTORS_UP enums. Signed-off-by: Chuyi Zhou Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20231018061746.111364-5-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/cgroup_iter.c | 65 +++++++++++++++++++ kernel/bpf/helpers.c | 3 + .../testing/selftests/bpf/bpf_experimental.h | 6 ++ 3 files changed, 74 insertions(+) diff --git a/kernel/bpf/cgroup_iter.c b/kernel/bpf/cgroup_iter.c index 810378f04fbc..209e5135f9fb 100644 --- a/kernel/bpf/cgroup_iter.c +++ b/kernel/bpf/cgroup_iter.c @@ -294,3 +294,68 @@ static int __init bpf_cgroup_iter_init(void) } late_initcall(bpf_cgroup_iter_init); + +struct bpf_iter_css { + __u64 __opaque[3]; +} __attribute__((aligned(8))); + +struct bpf_iter_css_kern { + struct cgroup_subsys_state *start; + struct cgroup_subsys_state *pos; + unsigned int flags; +} __attribute__((aligned(8))); + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in vmlinux BTF"); + +__bpf_kfunc int bpf_iter_css_new(struct bpf_iter_css *it, + struct cgroup_subsys_state *start, unsigned int flags) +{ + struct bpf_iter_css_kern *kit = (void *)it; + + BUILD_BUG_ON(sizeof(struct bpf_iter_css_kern) > sizeof(struct bpf_iter_css)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_css_kern) != __alignof__(struct bpf_iter_css)); + + kit->start = NULL; + switch (flags) { + case BPF_CGROUP_ITER_DESCENDANTS_PRE: + case BPF_CGROUP_ITER_DESCENDANTS_POST: + case BPF_CGROUP_ITER_ANCESTORS_UP: + break; + default: + return -EINVAL; + } + + kit->start = start; + kit->pos = NULL; + kit->flags = flags; + return 0; +} + +__bpf_kfunc struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) +{ + struct bpf_iter_css_kern *kit = (void *)it; + + if (!kit->start) + return NULL; + + switch (kit->flags) { + case BPF_CGROUP_ITER_DESCENDANTS_PRE: + kit->pos = css_next_descendant_pre(kit->pos, kit->start); + break; + case BPF_CGROUP_ITER_DESCENDANTS_POST: + kit->pos = css_next_descendant_post(kit->pos, kit->start); + break; + case BPF_CGROUP_ITER_ANCESTORS_UP: + kit->pos = kit->pos ? kit->pos->parent : kit->start; + } + + return kit->pos; +} + +__bpf_kfunc void bpf_iter_css_destroy(struct bpf_iter_css *it) +{ +} + +__diag_pop(); \ No newline at end of file diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index c25941531265..b1d285ed4796 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2566,6 +2566,9 @@ BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) +BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_dynptr_adjust) BTF_ID_FLAGS(func, bpf_dynptr_is_null) BTF_ID_FLAGS(func, bpf_dynptr_is_rdonly) diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 2f6c747aa874..1386baf9ae4a 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -471,4 +471,10 @@ extern int bpf_iter_task_new(struct bpf_iter_task *it, extern struct task_struct *bpf_iter_task_next(struct bpf_iter_task *it) __weak __ksym; extern void bpf_iter_task_destroy(struct bpf_iter_task *it) __weak __ksym; +struct bpf_iter_css; +extern int bpf_iter_css_new(struct bpf_iter_css *it, + struct cgroup_subsys_state *start, unsigned int flags) __weak __ksym; +extern struct cgroup_subsys_state *bpf_iter_css_next(struct bpf_iter_css *it) __weak __ksym; +extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; + #endif From dfab99df147b0d364f0c199f832ff2aedfb2265a Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:43 +0800 Subject: [PATCH 14/51] bpf: teach the verifier to enforce css_iter and task_iter in RCU CS css_iter and task_iter should be used in rcu section. Specifically, in sleepable progs explicit bpf_rcu_read_lock() is needed before use these iters. In normal bpf progs that have implicit rcu_read_lock(), it's OK to use them directly. This patch adds a new a KF flag KF_RCU_PROTECTED for bpf_iter_task_new and bpf_iter_css_new. It means the kfunc should be used in RCU CS. We check whether we are in rcu cs before we want to invoke this kfunc. If the rcu protection is guaranteed, we would let st->type = PTR_TO_STACK | MEM_RCU. Once user do rcu_unlock during the iteration, state MEM_RCU of regs would be cleared. is_iter_reg_valid_init() will reject if reg->type is UNTRUSTED. It is worth noting that currently, bpf_rcu_read_unlock does not clear the state of the STACK_ITER reg, since bpf_for_each_spilled_reg only considers STACK_SPILL. This patch also let bpf_for_each_spilled_reg search STACK_ITER. Signed-off-by: Chuyi Zhou Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20231018061746.111364-6-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 19 ++++++++------ include/linux/btf.h | 1 + kernel/bpf/helpers.c | 4 +-- kernel/bpf/verifier.c | 50 ++++++++++++++++++++++++++++-------- 4 files changed, 53 insertions(+), 21 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 94ec766432f5..e67cd45a85be 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -386,19 +386,18 @@ struct bpf_verifier_state { u32 jmp_history_cnt; }; -#define bpf_get_spilled_reg(slot, frame) \ +#define bpf_get_spilled_reg(slot, frame, mask) \ (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ - (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ + ((1 << frame->stack[slot].slot_type[0]) & (mask))) \ ? &frame->stack[slot].spilled_ptr : NULL) /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ -#define bpf_for_each_spilled_reg(iter, frame, reg) \ - for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ +#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \ + for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \ iter < frame->allocated_stack / BPF_REG_SIZE; \ - iter++, reg = bpf_get_spilled_reg(iter, frame)) + iter++, reg = bpf_get_spilled_reg(iter, frame, mask)) -/* Invoke __expr over regsiters in __vst, setting __state and __reg */ -#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ +#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \ ({ \ struct bpf_verifier_state *___vstate = __vst; \ int ___i, ___j; \ @@ -410,7 +409,7 @@ struct bpf_verifier_state { __reg = &___regs[___j]; \ (void)(__expr); \ } \ - bpf_for_each_spilled_reg(___j, __state, __reg) { \ + bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \ if (!__reg) \ continue; \ (void)(__expr); \ @@ -418,6 +417,10 @@ struct bpf_verifier_state { } \ }) +/* Invoke __expr over regsiters in __vst, setting __state and __reg */ +#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \ + bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr) + /* linked list of verifier states used to prune search */ struct bpf_verifier_state_list { struct bpf_verifier_state state; diff --git a/include/linux/btf.h b/include/linux/btf.h index 928113a80a95..c2231c64d60b 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -74,6 +74,7 @@ #define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */ #define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */ #define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */ +#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */ /* * Tag marking a kernel function as a kfunc. This is meant to minimize the diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index b1d285ed4796..da058aead20c 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2563,10 +2563,10 @@ BTF_ID_FLAGS(func, bpf_iter_task_vma_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_iter_css_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) BTF_ID_FLAGS(func, bpf_iter_css_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_task_destroy, KF_ITER_DESTROY) -BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_task_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) BTF_ID_FLAGS(func, bpf_iter_task_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_task_destroy, KF_ITER_DESTROY) -BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS) +BTF_ID_FLAGS(func, bpf_iter_css_new, KF_ITER_NEW | KF_TRUSTED_ARGS | KF_RCU_PROTECTED) BTF_ID_FLAGS(func, bpf_iter_css_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_css_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_dynptr_adjust) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 974713185269..fcdf2382153a 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1173,7 +1173,12 @@ static bool is_dynptr_type_expected(struct bpf_verifier_env *env, struct bpf_reg static void __mark_reg_known_zero(struct bpf_reg_state *reg); +static bool in_rcu_cs(struct bpf_verifier_env *env); + +static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta); + static int mark_stack_slots_iter(struct bpf_verifier_env *env, + struct bpf_kfunc_call_arg_meta *meta, struct bpf_reg_state *reg, int insn_idx, struct btf *btf, u32 btf_id, int nr_slots) { @@ -1194,6 +1199,12 @@ static int mark_stack_slots_iter(struct bpf_verifier_env *env, __mark_reg_known_zero(st); st->type = PTR_TO_STACK; /* we don't have dedicated reg type */ + if (is_kfunc_rcu_protected(meta)) { + if (in_rcu_cs(env)) + st->type |= MEM_RCU; + else + st->type |= PTR_UNTRUSTED; + } st->live |= REG_LIVE_WRITTEN; st->ref_obj_id = i == 0 ? id : 0; st->iter.btf = btf; @@ -1268,7 +1279,7 @@ static bool is_iter_reg_valid_uninit(struct bpf_verifier_env *env, return true; } -static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, +static int is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_state *reg, struct btf *btf, u32 btf_id, int nr_slots) { struct bpf_func_state *state = func(env, reg); @@ -1276,26 +1287,28 @@ static bool is_iter_reg_valid_init(struct bpf_verifier_env *env, struct bpf_reg_ spi = iter_get_spi(env, reg, nr_slots); if (spi < 0) - return false; + return -EINVAL; for (i = 0; i < nr_slots; i++) { struct bpf_stack_state *slot = &state->stack[spi - i]; struct bpf_reg_state *st = &slot->spilled_ptr; + if (st->type & PTR_UNTRUSTED) + return -EPROTO; /* only main (first) slot has ref_obj_id set */ if (i == 0 && !st->ref_obj_id) - return false; + return -EINVAL; if (i != 0 && st->ref_obj_id) - return false; + return -EINVAL; if (st->iter.btf != btf || st->iter.btf_id != btf_id) - return false; + return -EINVAL; for (j = 0; j < BPF_REG_SIZE; j++) if (slot->slot_type[j] != STACK_ITER) - return false; + return -EINVAL; } - return true; + return 0; } /* Check if given stack slot is "special": @@ -7640,15 +7653,24 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id return err; } - err = mark_stack_slots_iter(env, reg, insn_idx, meta->btf, btf_id, nr_slots); + err = mark_stack_slots_iter(env, meta, reg, insn_idx, meta->btf, btf_id, nr_slots); if (err) return err; } else { /* iter_next() or iter_destroy() expect initialized iter state*/ - if (!is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots)) { + err = is_iter_reg_valid_init(env, reg, meta->btf, btf_id, nr_slots); + switch (err) { + case 0: + break; + case -EINVAL: verbose(env, "expected an initialized iter_%s as arg #%d\n", iter_type_str(meta->btf, btf_id), regno); - return -EINVAL; + return err; + case -EPROTO: + verbose(env, "expected an RCU CS when using %s\n", meta->func_name); + return err; + default: + return err; } spi = iter_get_spi(env, reg, nr_slots); @@ -10231,6 +10253,11 @@ static bool is_kfunc_rcu(struct bpf_kfunc_call_arg_meta *meta) return meta->kfunc_flags & KF_RCU; } +static bool is_kfunc_rcu_protected(struct bpf_kfunc_call_arg_meta *meta) +{ + return meta->kfunc_flags & KF_RCU_PROTECTED; +} + static bool __kfunc_param_match_suffix(const struct btf *btf, const struct btf_param *arg, const char *suffix) @@ -11582,6 +11609,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, if (env->cur_state->active_rcu_lock) { struct bpf_func_state *state; struct bpf_reg_state *reg; + u32 clear_mask = (1 << STACK_SPILL) | (1 << STACK_ITER); if (in_rbtree_lock_required_cb(env) && (rcu_lock || rcu_unlock)) { verbose(env, "Calling bpf_rcu_read_{lock,unlock} in unnecessary rbtree callback\n"); @@ -11592,7 +11620,7 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, verbose(env, "nested rcu read lock (kernel function %s)\n", func_name); return -EINVAL; } else if (rcu_unlock) { - bpf_for_each_reg_in_vstate(env->cur_state, state, reg, ({ + bpf_for_each_reg_in_vstate_mask(env->cur_state, state, reg, clear_mask, ({ if (reg->type & MEM_RCU) { reg->type &= ~(MEM_RCU | PTR_MAYBE_NULL); reg->type |= PTR_UNTRUSTED; From cb3ecf7915a1d7ce5304402f4d8616d9fa5193f7 Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:44 +0800 Subject: [PATCH 15/51] bpf: Let bpf_iter_task_new accept null task ptr When using task_iter to iterate all threads of a specific task, we enforce that the user must pass a valid task pointer to ensure safety. However, when iterating all threads/process in the system, BPF verifier still require a valid ptr instead of "nullable" pointer, even though it's pointless, which is a kind of surprising from usability standpoint. It would be nice if we could let that kfunc accept a explicit null pointer when we are using BPF_TASK_ITER_ALL_{PROCS, THREADS} and a valid pointer when using BPF_TASK_ITER_THREAD. Given a trival kfunc: __bpf_kfunc void FN(struct TYPE_A *obj); BPF Prog would reject a nullptr for obj. The error info is: "arg#x pointer type xx xx must point to scalar, or struct with scalar" reported by get_kfunc_ptr_arg_type(). The reg->type is SCALAR_VALUE and the btf type of ref_t is not scalar or scalar_struct which leads to the rejection of get_kfunc_ptr_arg_type. This patch add "__nullable" annotation: __bpf_kfunc void FN(struct TYPE_A *obj__nullable); Here __nullable indicates obj can be optional, user can pass a explicit nullptr or a normal TYPE_A pointer. In get_kfunc_ptr_arg_type(), we will detect whether the current arg is optional and register is null, If so, return a new kfunc_ptr_arg_type KF_ARG_PTR_TO_NULL and skip to the next arg in check_kfunc_args(). Signed-off-by: Chuyi Zhou Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20231018061746.111364-7-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/task_iter.c | 7 +++++-- kernel/bpf/verifier.c | 13 ++++++++++++- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index faa1712c1df5..59e747938bdb 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -976,7 +976,7 @@ __diag_ignore_all("-Wmissing-prototypes", "Global functions as their definitions will be in vmlinux BTF"); __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it, - struct task_struct *task, unsigned int flags) + struct task_struct *task__nullable, unsigned int flags) { struct bpf_iter_task_kern *kit = (void *)it; @@ -988,14 +988,17 @@ __bpf_kfunc int bpf_iter_task_new(struct bpf_iter_task *it, switch (flags) { case BPF_TASK_ITER_ALL_THREADS: case BPF_TASK_ITER_ALL_PROCS: + break; case BPF_TASK_ITER_PROC_THREADS: + if (!task__nullable) + return -EINVAL; break; default: return -EINVAL; } if (flags == BPF_TASK_ITER_PROC_THREADS) - kit->task = task; + kit->task = task__nullable; else kit->task = &init_task; kit->pos = kit->task; diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index fcdf2382153a..e9bc5d4a25a1 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -10332,6 +10332,11 @@ static bool is_kfunc_arg_refcounted_kptr(const struct btf *btf, const struct btf return __kfunc_param_match_suffix(btf, arg, "__refcounted_kptr"); } +static bool is_kfunc_arg_nullable(const struct btf *btf, const struct btf_param *arg) +{ + return __kfunc_param_match_suffix(btf, arg, "__nullable"); +} + static bool is_kfunc_arg_scalar_with_name(const struct btf *btf, const struct btf_param *arg, const char *name) @@ -10474,6 +10479,7 @@ enum kfunc_ptr_arg_type { KF_ARG_PTR_TO_CALLBACK, KF_ARG_PTR_TO_RB_ROOT, KF_ARG_PTR_TO_RB_NODE, + KF_ARG_PTR_TO_NULL, }; enum special_kfunc_type { @@ -10630,6 +10636,8 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) return KF_ARG_PTR_TO_CALLBACK; + if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) + return KF_ARG_PTR_TO_NULL; if (argno + 1 < nargs && (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || @@ -11180,7 +11188,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ } if ((is_kfunc_trusted_args(meta) || is_kfunc_rcu(meta)) && - (register_is_null(reg) || type_may_be_null(reg->type))) { + (register_is_null(reg) || type_may_be_null(reg->type)) && + !is_kfunc_arg_nullable(meta->btf, &args[i])) { verbose(env, "Possibly NULL pointer passed to trusted arg%d\n", i); return -EACCES; } @@ -11205,6 +11214,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return kf_arg_type; switch (kf_arg_type) { + case KF_ARG_PTR_TO_NULL: + continue; case KF_ARG_PTR_TO_ALLOC_BTF_ID: case KF_ARG_PTR_TO_BTF_ID: if (!is_kfunc_trusted_args(meta) && !is_kfunc_rcu(meta)) From ddab78cbb52f81f7f7598482602342955b2ff8b8 Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:45 +0800 Subject: [PATCH 16/51] selftests/bpf: rename bpf_iter_task.c to bpf_iter_tasks.c The newly-added struct bpf_iter_task has a name collision with a selftest for the seq_file task iter's bpf skel, so the selftests/bpf/progs file is renamed in order to avoid the collision. Signed-off-by: Chuyi Zhou Acked-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20231018061746.111364-8-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/bpf_iter.c | 18 +++++++++--------- .../{bpf_iter_task.c => bpf_iter_tasks.c} | 0 2 files changed, 9 insertions(+), 9 deletions(-) rename tools/testing/selftests/bpf/progs/{bpf_iter_task.c => bpf_iter_tasks.c} (100%) diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c index 41aba139b20b..e3498f607b49 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c @@ -7,7 +7,7 @@ #include "bpf_iter_ipv6_route.skel.h" #include "bpf_iter_netlink.skel.h" #include "bpf_iter_bpf_map.skel.h" -#include "bpf_iter_task.skel.h" +#include "bpf_iter_tasks.skel.h" #include "bpf_iter_task_stack.skel.h" #include "bpf_iter_task_file.skel.h" #include "bpf_iter_task_vmas.skel.h" @@ -215,12 +215,12 @@ static void *do_nothing_wait(void *arg) static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, int *num_unknown, int *num_known) { - struct bpf_iter_task *skel; + struct bpf_iter_tasks *skel; pthread_t thread_id; void *ret; - skel = bpf_iter_task__open_and_load(); - if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) + skel = bpf_iter_tasks__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load")) return; ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock"); @@ -239,7 +239,7 @@ static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts, ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL, "pthread_join"); - bpf_iter_task__destroy(skel); + bpf_iter_tasks__destroy(skel); } static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known) @@ -307,10 +307,10 @@ static void test_task_pidfd(void) static void test_task_sleepable(void) { - struct bpf_iter_task *skel; + struct bpf_iter_tasks *skel; - skel = bpf_iter_task__open_and_load(); - if (!ASSERT_OK_PTR(skel, "bpf_iter_task__open_and_load")) + skel = bpf_iter_tasks__open_and_load(); + if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load")) return; do_dummy_read(skel->progs.dump_task_sleepable); @@ -320,7 +320,7 @@ static void test_task_sleepable(void) ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0, "num_success_copy_from_user_task"); - bpf_iter_task__destroy(skel); + bpf_iter_tasks__destroy(skel); } static void test_task_stack(void) diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_task.c b/tools/testing/selftests/bpf/progs/bpf_iter_tasks.c similarity index 100% rename from tools/testing/selftests/bpf/progs/bpf_iter_task.c rename to tools/testing/selftests/bpf/progs/bpf_iter_tasks.c From 130e0f7af9fc7388b90fb016ca13ff3840c48d4a Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 18 Oct 2023 14:17:46 +0800 Subject: [PATCH 17/51] selftests/bpf: Add tests for open-coded task and css iter This patch adds 4 subtests to demonstrate these patterns and validating correctness. subtest1: 1) We use task_iter to iterate all process in the system and search for the current process with a given pid. 2) We create some threads in current process context, and use BPF_TASK_ITER_PROC_THREADS to iterate all threads of current process. As expected, we would find all the threads of current process. 3) We create some threads and use BPF_TASK_ITER_ALL_THREADS to iterate all threads in the system. As expected, we would find all the threads which was created. subtest2: We create a cgroup and add the current task to the cgroup. In the BPF program, we would use bpf_for_each(css_task, task, css) to iterate all tasks under the cgroup. As expected, we would find the current process. subtest3: 1) We create a cgroup tree. In the BPF program, we use bpf_for_each(css, pos, root, XXX) to iterate all descendant under the root with pre and post order. As expected, we would find all descendant and the last iterating cgroup in post-order is root cgroup, the first iterating cgroup in pre-order is root cgroup. 2) We wse BPF_CGROUP_ITER_ANCESTORS_UP to traverse the cgroup tree starting from leaf and root separately, and record the height. The diff of the hights would be the total tree-high - 1. subtest4: Add some failure testcase when using css_task, task and css iters, e.g, unlock when using task-iters to iterate tasks. Signed-off-by: Chuyi Zhou Link: https://lore.kernel.org/r/20231018061746.111364-9-zhouchuyi@bytedance.com Signed-off-by: Alexei Starovoitov --- .../testing/selftests/bpf/prog_tests/iters.c | 150 ++++++++++++++++++ tools/testing/selftests/bpf/progs/iters_css.c | 72 +++++++++ .../selftests/bpf/progs/iters_css_task.c | 47 ++++++ .../testing/selftests/bpf/progs/iters_task.c | 41 +++++ .../selftests/bpf/progs/iters_task_failure.c | 105 ++++++++++++ 5 files changed, 415 insertions(+) create mode 100644 tools/testing/selftests/bpf/progs/iters_css.c create mode 100644 tools/testing/selftests/bpf/progs/iters_css_task.c create mode 100644 tools/testing/selftests/bpf/progs/iters_task.c create mode 100644 tools/testing/selftests/bpf/progs/iters_task_failure.c diff --git a/tools/testing/selftests/bpf/prog_tests/iters.c b/tools/testing/selftests/bpf/prog_tests/iters.c index b696873c5455..c2425791c923 100644 --- a/tools/testing/selftests/bpf/prog_tests/iters.c +++ b/tools/testing/selftests/bpf/prog_tests/iters.c @@ -1,7 +1,14 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include +#include +#include +#include #include +#include "cgroup_helpers.h" #include "iters.skel.h" #include "iters_state_safety.skel.h" @@ -9,6 +16,10 @@ #include "iters_num.skel.h" #include "iters_testmod_seq.skel.h" #include "iters_task_vma.skel.h" +#include "iters_task.skel.h" +#include "iters_css_task.skel.h" +#include "iters_css.skel.h" +#include "iters_task_failure.skel.h" static void subtest_num_iters(void) { @@ -146,6 +157,138 @@ cleanup: iters_task_vma__destroy(skel); } +static pthread_mutex_t do_nothing_mutex; + +static void *do_nothing_wait(void *arg) +{ + pthread_mutex_lock(&do_nothing_mutex); + pthread_mutex_unlock(&do_nothing_mutex); + + pthread_exit(arg); +} + +#define thread_num 2 + +static void subtest_task_iters(void) +{ + struct iters_task *skel = NULL; + pthread_t thread_ids[thread_num]; + void *ret; + int err; + + skel = iters_task__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto cleanup; + skel->bss->target_pid = getpid(); + err = iters_task__attach(skel); + if (!ASSERT_OK(err, "iters_task__attach")) + goto cleanup; + pthread_mutex_lock(&do_nothing_mutex); + for (int i = 0; i < thread_num; i++) + ASSERT_OK(pthread_create(&thread_ids[i], NULL, &do_nothing_wait, NULL), + "pthread_create"); + + syscall(SYS_getpgid); + iters_task__detach(skel); + ASSERT_EQ(skel->bss->procs_cnt, 1, "procs_cnt"); + ASSERT_EQ(skel->bss->threads_cnt, thread_num + 1, "threads_cnt"); + ASSERT_EQ(skel->bss->proc_threads_cnt, thread_num + 1, "proc_threads_cnt"); + pthread_mutex_unlock(&do_nothing_mutex); + for (int i = 0; i < thread_num; i++) + ASSERT_OK(pthread_join(thread_ids[i], &ret), "pthread_join"); +cleanup: + iters_task__destroy(skel); +} + +extern int stack_mprotect(void); + +static void subtest_css_task_iters(void) +{ + struct iters_css_task *skel = NULL; + int err, cg_fd, cg_id; + const char *cgrp_path = "/cg1"; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + goto cleanup; + cg_fd = create_and_get_cgroup(cgrp_path); + if (!ASSERT_GE(cg_fd, 0, "create_and_get_cgroup")) + goto cleanup; + cg_id = get_cgroup_id(cgrp_path); + err = join_cgroup(cgrp_path); + if (!ASSERT_OK(err, "join_cgroup")) + goto cleanup; + + skel = iters_css_task__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto cleanup; + + skel->bss->target_pid = getpid(); + skel->bss->cg_id = cg_id; + err = iters_css_task__attach(skel); + if (!ASSERT_OK(err, "iters_task__attach")) + goto cleanup; + err = stack_mprotect(); + if (!ASSERT_EQ(err, -1, "stack_mprotect") || + !ASSERT_EQ(errno, EPERM, "stack_mprotect")) + goto cleanup; + iters_css_task__detach(skel); + ASSERT_EQ(skel->bss->css_task_cnt, 1, "css_task_cnt"); + +cleanup: + cleanup_cgroup_environment(); + iters_css_task__destroy(skel); +} + +static void subtest_css_iters(void) +{ + struct iters_css *skel = NULL; + struct { + const char *path; + int fd; + } cgs[] = { + { "/cg1" }, + { "/cg1/cg2" }, + { "/cg1/cg2/cg3" }, + { "/cg1/cg2/cg3/cg4" }, + }; + int err, cg_nr = ARRAY_SIZE(cgs); + int i; + + err = setup_cgroup_environment(); + if (!ASSERT_OK(err, "setup_cgroup_environment")) + goto cleanup; + for (i = 0; i < cg_nr; i++) { + cgs[i].fd = create_and_get_cgroup(cgs[i].path); + if (!ASSERT_GE(cgs[i].fd, 0, "create_and_get_cgroup")) + goto cleanup; + } + + skel = iters_css__open_and_load(); + if (!ASSERT_OK_PTR(skel, "open_and_load")) + goto cleanup; + + skel->bss->target_pid = getpid(); + skel->bss->root_cg_id = get_cgroup_id(cgs[0].path); + skel->bss->leaf_cg_id = get_cgroup_id(cgs[cg_nr - 1].path); + err = iters_css__attach(skel); + + if (!ASSERT_OK(err, "iters_task__attach")) + goto cleanup; + + syscall(SYS_getpgid); + ASSERT_EQ(skel->bss->pre_order_cnt, cg_nr, "pre_order_cnt"); + ASSERT_EQ(skel->bss->first_cg_id, get_cgroup_id(cgs[0].path), "first_cg_id"); + + ASSERT_EQ(skel->bss->post_order_cnt, cg_nr, "post_order_cnt"); + ASSERT_EQ(skel->bss->last_cg_id, get_cgroup_id(cgs[0].path), "last_cg_id"); + ASSERT_EQ(skel->bss->tree_high, cg_nr - 1, "tree_high"); + iters_css__detach(skel); +cleanup: + cleanup_cgroup_environment(); + iters_css__destroy(skel); +} + void test_iters(void) { RUN_TESTS(iters_state_safety); @@ -161,4 +304,11 @@ void test_iters(void) subtest_testmod_seq_iters(); if (test__start_subtest("task_vma")) subtest_task_vma_iters(); + if (test__start_subtest("task")) + subtest_task_iters(); + if (test__start_subtest("css_task")) + subtest_css_task_iters(); + if (test__start_subtest("css")) + subtest_css_iters(); + RUN_TESTS(iters_task_failure); } diff --git a/tools/testing/selftests/bpf/progs/iters_css.c b/tools/testing/selftests/bpf/progs/iters_css.c new file mode 100644 index 000000000000..ec1f6c2f590b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_css.c @@ -0,0 +1,72 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou */ + +#include "vmlinux.h" +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid; +u64 root_cg_id, leaf_cg_id; +u64 first_cg_id, last_cg_id; + +int pre_order_cnt, post_order_cnt, tree_high; + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("fentry.s/" SYS_PREFIX "sys_getpgid") +int iter_css_for_each(const void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct cgroup_subsys_state *root_css, *leaf_css, *pos; + struct cgroup *root_cgrp, *leaf_cgrp, *cur_cgrp; + + if (cur_task->pid != target_pid) + return 0; + + root_cgrp = bpf_cgroup_from_id(root_cg_id); + + if (!root_cgrp) + return 0; + + leaf_cgrp = bpf_cgroup_from_id(leaf_cg_id); + + if (!leaf_cgrp) { + bpf_cgroup_release(root_cgrp); + return 0; + } + root_css = &root_cgrp->self; + leaf_css = &leaf_cgrp->self; + pre_order_cnt = post_order_cnt = tree_high = 0; + first_cg_id = last_cg_id = 0; + + bpf_rcu_read_lock(); + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) { + cur_cgrp = pos->cgroup; + post_order_cnt++; + last_cg_id = cur_cgrp->kn->id; + } + + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_PRE) { + cur_cgrp = pos->cgroup; + pre_order_cnt++; + if (!first_cg_id) + first_cg_id = cur_cgrp->kn->id; + } + + bpf_for_each(css, pos, leaf_css, BPF_CGROUP_ITER_ANCESTORS_UP) + tree_high++; + + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_ANCESTORS_UP) + tree_high--; + bpf_rcu_read_unlock(); + bpf_cgroup_release(root_cgrp); + bpf_cgroup_release(leaf_cgrp); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_css_task.c b/tools/testing/selftests/bpf/progs/iters_css_task.c new file mode 100644 index 000000000000..5089ce384a1c --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_css_task.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou */ + +#include "vmlinux.h" +#include +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; + +pid_t target_pid; +int css_task_cnt; +u64 cg_id; + +SEC("lsm/file_mprotect") +int BPF_PROG(iter_css_task_for_each, struct vm_area_struct *vma, + unsigned long reqprot, unsigned long prot, int ret) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct cgroup_subsys_state *css; + struct task_struct *task; + struct cgroup *cgrp; + + if (cur_task->pid != target_pid) + return ret; + + cgrp = bpf_cgroup_from_id(cg_id); + + if (!cgrp) + return -EPERM; + + css = &cgrp->self; + css_task_cnt = 0; + + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) + if (task->pid == target_pid) + css_task_cnt++; + + bpf_cgroup_release(cgrp); + + return -EPERM; +} diff --git a/tools/testing/selftests/bpf/progs/iters_task.c b/tools/testing/selftests/bpf/progs/iters_task.c new file mode 100644 index 000000000000..c9b4055cd410 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_task.c @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou */ + +#include "vmlinux.h" +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +pid_t target_pid; +int procs_cnt, threads_cnt, proc_threads_cnt; + +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("fentry.s/" SYS_PREFIX "sys_getpgid") +int iter_task_for_each_sleep(void *ctx) +{ + struct task_struct *cur_task = bpf_get_current_task_btf(); + struct task_struct *pos; + + if (cur_task->pid != target_pid) + return 0; + procs_cnt = threads_cnt = proc_threads_cnt = 0; + + bpf_rcu_read_lock(); + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) + if (pos->pid == target_pid) + procs_cnt++; + + bpf_for_each(task, pos, cur_task, BPF_TASK_ITER_PROC_THREADS) + proc_threads_cnt++; + + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_THREADS) + if (pos->tgid == target_pid) + threads_cnt++; + bpf_rcu_read_unlock(); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/iters_task_failure.c b/tools/testing/selftests/bpf/progs/iters_task_failure.c new file mode 100644 index 000000000000..c3bf96a67dba --- /dev/null +++ b/tools/testing/selftests/bpf/progs/iters_task_failure.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (C) 2023 Chuyi Zhou */ + +#include "vmlinux.h" +#include +#include +#include "bpf_misc.h" +#include "bpf_experimental.h" + +char _license[] SEC("license") = "GPL"; + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +void bpf_rcu_read_lock(void) __ksym; +void bpf_rcu_read_unlock(void) __ksym; + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_task_next") +int BPF_PROG(iter_tasks_without_lock) +{ + struct task_struct *pos; + + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) { + + } + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_css_next") +int BPF_PROG(iter_css_without_lock) +{ + u64 cg_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + struct cgroup_subsys_state *root_css, *pos; + + if (!cgrp) + return 0; + root_css = &cgrp->self; + + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) { + + } + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_task_next") +int BPF_PROG(iter_tasks_lock_and_unlock) +{ + struct task_struct *pos; + + bpf_rcu_read_lock(); + bpf_for_each(task, pos, NULL, BPF_TASK_ITER_ALL_PROCS) { + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + } + bpf_rcu_read_unlock(); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("expected an RCU CS when using bpf_iter_css_next") +int BPF_PROG(iter_css_lock_and_unlock) +{ + u64 cg_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + struct cgroup_subsys_state *root_css, *pos; + + if (!cgrp) + return 0; + root_css = &cgrp->self; + + bpf_rcu_read_lock(); + bpf_for_each(css, pos, root_css, BPF_CGROUP_ITER_DESCENDANTS_POST) { + bpf_rcu_read_unlock(); + + bpf_rcu_read_lock(); + } + bpf_rcu_read_unlock(); + bpf_cgroup_release(cgrp); + return 0; +} + +SEC("?fentry.s/" SYS_PREFIX "sys_getpgid") +__failure __msg("css_task_iter is only allowed in bpf_lsm and bpf iter-s") +int BPF_PROG(iter_css_task_for_each) +{ + u64 cg_id = bpf_get_current_cgroup_id(); + struct cgroup *cgrp = bpf_cgroup_from_id(cg_id); + struct cgroup_subsys_state *css; + struct task_struct *task; + + if (cgrp == NULL) + return 0; + css = &cgrp->self; + + bpf_for_each(css_task, task, css, CSS_TASK_ITER_PROCS) { + + } + bpf_cgroup_release(cgrp); + return 0; +} From da1055b673f3baac2249571c9882ce767a0aa746 Mon Sep 17 00:00:00 2001 From: Kumar Kartikeya Dwivedi Date: Fri, 20 Oct 2023 14:48:39 +0000 Subject: [PATCH 18/51] selftests/bpf: Make linked_list failure test more robust The linked list failure test 'pop_front_off' and 'pop_back_off' currently rely on matching exact instruction and register values. The purpose of the test is to ensure the offset is correctly incremented for the returned pointers from list pop helpers, which can then be used with container_of to obtain the real object. Hence, somehow obtaining the information that the offset is 48 will work for us. Make the test more robust by relying on verifier error string of bpf_spin_lock and remove dependence on fragile instruction index or register number, which can be affected by different clang versions used to build the selftests. Fixes: 300f19dcdb99 ("selftests/bpf: Add BPF linked list API tests") Reported-by: Andrii Nakryiko Signed-off-by: Kumar Kartikeya Dwivedi Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20231020144839.2734006-1-memxor@gmail.com --- tools/testing/selftests/bpf/prog_tests/linked_list.c | 10 ++-------- tools/testing/selftests/bpf/progs/linked_list_fail.c | 4 +++- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 69dc31383b78..2fb89de63bd2 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -94,14 +94,8 @@ static struct { { "incorrect_head_var_off2", "variable ptr_ access var_off=(0x0; 0xffffffff) disallowed" }, { "incorrect_head_off1", "bpf_list_head not found at offset=25" }, { "incorrect_head_off2", "bpf_list_head not found at offset=1" }, - { "pop_front_off", - "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " - "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n" - "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, - { "pop_back_off", - "15: (bf) r1 = r6 ; R1_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) " - "R6_w=ptr_or_null_foo(id=4,ref_obj_id=4,off=48,imm=0) refs=2,4\n" - "16: (85) call bpf_this_cpu_ptr#154\nR1 type=ptr_or_null_ expected=percpu_ptr_" }, + { "pop_front_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" }, + { "pop_back_off", "off 48 doesn't point to 'struct bpf_spin_lock' that is at 40" }, }; static void test_linked_list_fail_prog(const char *prog_name, const char *err_msg) diff --git a/tools/testing/selftests/bpf/progs/linked_list_fail.c b/tools/testing/selftests/bpf/progs/linked_list_fail.c index f4c63daba229..6438982b928b 100644 --- a/tools/testing/selftests/bpf/progs/linked_list_fail.c +++ b/tools/testing/selftests/bpf/progs/linked_list_fail.c @@ -591,7 +591,9 @@ int pop_ptr_off(void *(*op)(void *head)) n = op(&p->head); bpf_spin_unlock(&p->lock); - bpf_this_cpu_ptr(n); + if (!n) + return 0; + bpf_spin_lock((void *)n); return 0; } From 394e6869f0185e89cb815db29bf819474df858ae Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:31:56 +0800 Subject: [PATCH 19/51] mm/percpu.c: don't acquire pcpu_lock for pcpu_chunk_addr_search() There is no need to acquire pcpu_lock for pcpu_chunk_addr_search(): 1) both pcpu_first_chunk & pcpu_reserved_chunk must have been initialized before the invocation of free_percpu(). 2) The dynamically-created chunk must be valid before the per-cpu pointers allocated from it are freed. So acquire pcpu_lock() after the invocation of pcpu_chunk_addr_search(). Acked-by: Dennis Zhou Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-2-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- mm/percpu.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/mm/percpu.c b/mm/percpu.c index a7665de8485f..ea607078368d 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2267,12 +2267,10 @@ void free_percpu(void __percpu *ptr) kmemleak_free_percpu(ptr); addr = __pcpu_ptr_to_addr(ptr); - - spin_lock_irqsave(&pcpu_lock, flags); - chunk = pcpu_chunk_addr_search(addr); off = addr - chunk->base_addr; + spin_lock_irqsave(&pcpu_lock, flags); size = pcpu_free_area(chunk, off); pcpu_memcg_free_hook(chunk, off, size); From b460bc8302f222d346f0c15bba980eb8c36d6278 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:31:57 +0800 Subject: [PATCH 20/51] mm/percpu.c: introduce pcpu_alloc_size() Introduce pcpu_alloc_size() to get the size of the dynamic per-cpu area. It will be used by bpf memory allocator in the following patches. BPF memory allocator maintains per-cpu area caches for multiple area sizes and its free API only has the to-be-freed per-cpu pointer, so it needs the size of dynamic per-cpu area to select the corresponding cache when bpf program frees the dynamic per-cpu pointer. Acked-by: Dennis Zhou Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-3-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- include/linux/percpu.h | 1 + mm/percpu.c | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 68fac2e7cbe6..8c677f185901 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -132,6 +132,7 @@ extern void __init setup_per_cpu_areas(void); extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1); extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1); extern void free_percpu(void __percpu *__pdata); +extern size_t pcpu_alloc_size(void __percpu *__pdata); DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T)) diff --git a/mm/percpu.c b/mm/percpu.c index ea607078368d..60ed078e4cd0 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -2244,6 +2244,37 @@ static void pcpu_balance_workfn(struct work_struct *work) mutex_unlock(&pcpu_alloc_mutex); } +/** + * pcpu_alloc_size - the size of the dynamic percpu area + * @ptr: pointer to the dynamic percpu area + * + * Returns the size of the @ptr allocation. This is undefined for statically + * defined percpu variables as there is no corresponding chunk->bound_map. + * + * RETURNS: + * The size of the dynamic percpu area. + * + * CONTEXT: + * Can be called from atomic context. + */ +size_t pcpu_alloc_size(void __percpu *ptr) +{ + struct pcpu_chunk *chunk; + unsigned long bit_off, end; + void *addr; + + if (!ptr) + return 0; + + addr = __pcpu_ptr_to_addr(ptr); + /* No pcpu_lock here: ptr has not been freed, so chunk is still alive */ + chunk = pcpu_chunk_addr_search(addr); + bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE; + end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk), + bit_off + 1); + return (end - bit_off) * PCPU_MIN_ALLOC_SIZE; +} + /** * free_percpu - free percpu area * @ptr: pointer to area to free From baa8fdecd87bb8751237b45e3bcb5a179e5a12ca Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:31:58 +0800 Subject: [PATCH 21/51] bpf: Re-enable unit_size checking for global per-cpu allocator With pcpu_alloc_size() in place, check whether or not the size of the dynamic per-cpu area is matched with unit_size. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-4-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/memalloc.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 39ea316c55e7..776bdf5ffd80 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -491,21 +491,17 @@ static int check_obj_size(struct bpf_mem_cache *c, unsigned int idx) struct llist_node *first; unsigned int obj_size; - /* For per-cpu allocator, the size of free objects in free list doesn't - * match with unit_size and now there is no way to get the size of - * per-cpu pointer saved in free object, so just skip the checking. - */ - if (c->percpu_size) - return 0; - first = c->free_llist.first; if (!first) return 0; - obj_size = ksize(first); + if (c->percpu_size) + obj_size = pcpu_alloc_size(((void **)first)[1]); + else + obj_size = ksize(first); if (obj_size != c->unit_size) { - WARN_ONCE(1, "bpf_mem_cache[%u]: unexpected object size %u, expect %u\n", - idx, obj_size, c->unit_size); + WARN_ONCE(1, "bpf_mem_cache[%u]: percpu %d, unexpected object size %u, expect %u\n", + idx, c->percpu_size, obj_size, c->unit_size); return -EINVAL; } return 0; @@ -973,6 +969,12 @@ void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) return !ret ? NULL : ret + LLIST_NODE_SZ; } +/* The alignment of dynamic per-cpu area is 8, so c->unit_size and the + * actual size of dynamic per-cpu area will always be matched and there is + * no need to adjust size_index for per-cpu allocation. However for the + * simplicity of the implementation, use an unified size_index for both + * kmalloc and per-cpu allocation. + */ static __init int bpf_mem_cache_adjust_size(void) { unsigned int size; From 3f2189e4f77b7a3e979d143dc4ff586488c7e8a5 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:31:59 +0800 Subject: [PATCH 22/51] bpf: Use pcpu_alloc_size() in bpf_mem_free{_rcu}() For bpf_global_percpu_ma, the pointer passed to bpf_mem_free_rcu() is allocated by kmalloc() and its size is fixed (16-bytes on x86-64). So no matter which cache allocates the dynamic per-cpu area, on x86-64 cache[2] will always be used to free the per-cpu area. Fix the unbalance by checking whether the bpf memory allocator is per-cpu or not and use pcpu_alloc_size() instead of ksize() to find the correct cache for per-cpu free. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-5-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_mem_alloc.h | 1 + kernel/bpf/memalloc.c | 16 ++++++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h index d644bbb298af..bb1223b21308 100644 --- a/include/linux/bpf_mem_alloc.h +++ b/include/linux/bpf_mem_alloc.h @@ -11,6 +11,7 @@ struct bpf_mem_caches; struct bpf_mem_alloc { struct bpf_mem_caches __percpu *caches; struct bpf_mem_cache __percpu *cache; + bool percpu; struct work_struct work; }; diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 776bdf5ffd80..5308e386380a 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -525,6 +525,7 @@ int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) /* room for llist_node and per-cpu pointer */ if (percpu) percpu_size = LLIST_NODE_SZ + sizeof(void *); + ma->percpu = percpu; if (size) { pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); @@ -874,6 +875,17 @@ void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) return !ret ? NULL : ret + LLIST_NODE_SZ; } +static notrace int bpf_mem_free_idx(void *ptr, bool percpu) +{ + size_t size; + + if (percpu) + size = pcpu_alloc_size(*((void **)ptr)); + else + size = ksize(ptr - LLIST_NODE_SZ); + return bpf_mem_cache_idx(size); +} + void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) { int idx; @@ -881,7 +893,7 @@ void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) if (!ptr) return; - idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); + idx = bpf_mem_free_idx(ptr, ma->percpu); if (idx < 0) return; @@ -895,7 +907,7 @@ void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr) if (!ptr) return; - idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); + idx = bpf_mem_free_idx(ptr, ma->percpu); if (idx < 0) return; From e581a3461de3f129cfe888a67d9f31086328271f Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:32:00 +0800 Subject: [PATCH 23/51] bpf: Move the declaration of __bpf_obj_drop_impl() to bpf.h both syscall.c and helpers.c have the declaration of __bpf_obj_drop_impl(), so just move it to a common header file. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-6-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 1 + kernel/bpf/helpers.c | 2 -- kernel/bpf/syscall.c | 2 -- 3 files changed, 1 insertion(+), 4 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index b4b40b45962b..ebd412179771 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2058,6 +2058,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec); bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); void bpf_obj_free_timer(const struct btf_record *rec, void *obj); void bpf_obj_free_fields(const struct btf_record *rec, void *obj); +void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index da058aead20c..c814bb44d2d1 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1811,8 +1811,6 @@ bpf_base_func_proto(enum bpf_func_id func_id) } } -void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); - void bpf_list_head_free(const struct btf_field *field, void *list_head, struct bpf_spin_lock *spin_lock) { diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 341f8cb4405c..69998f84f7c8 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -626,8 +626,6 @@ void bpf_obj_free_timer(const struct btf_record *rec, void *obj) bpf_timer_cancel_and_free(obj + rec->timer_off); } -extern void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); - void bpf_obj_free_fields(const struct btf_record *rec, void *obj) { const struct btf_field *fields; From e383a45902337356d9ccad797094a27c6b2150f9 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:32:01 +0800 Subject: [PATCH 24/51] bpf: Use bpf_global_percpu_ma for per-cpu kptr in __bpf_obj_drop_impl() The following warning was reported when running "./test_progs -t test_bpf_ma/percpu_free_through_map_free": ------------[ cut here ]------------ WARNING: CPU: 1 PID: 68 at kernel/bpf/memalloc.c:342 CPU: 1 PID: 68 Comm: kworker/u16:2 Not tainted 6.6.0-rc2+ #222 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) Workqueue: events_unbound bpf_map_free_deferred RIP: 0010:bpf_mem_refill+0x21c/0x2a0 ...... Call Trace: ? bpf_mem_refill+0x21c/0x2a0 irq_work_single+0x27/0x70 irq_work_run_list+0x2a/0x40 irq_work_run+0x18/0x40 __sysvec_irq_work+0x1c/0xc0 sysvec_irq_work+0x73/0x90 asm_sysvec_irq_work+0x1b/0x20 RIP: 0010:unit_free+0x50/0x80 ...... bpf_mem_free+0x46/0x60 __bpf_obj_drop_impl+0x40/0x90 bpf_obj_free_fields+0x17d/0x1a0 array_map_free+0x6b/0x170 bpf_map_free_deferred+0x54/0xa0 process_scheduled_works+0xba/0x370 worker_thread+0x16d/0x2e0 kthread+0x105/0x140 ret_from_fork+0x39/0x60 ret_from_fork_asm+0x1b/0x30 ---[ end trace 0000000000000000 ]--- The reason is simple: __bpf_obj_drop_impl() does not know the freeing field is a per-cpu pointer and it uses bpf_global_ma to free the pointer. Because bpf_global_ma is not a per-cpu allocator, so ksize() is used to select the corresponding cache. The bpf_mem_cache with 16-bytes unit_size will always be selected to do the unmatched free and it will trigger the warning in free_bulk() eventually. Because per-cpu kptr doesn't support list or rb-tree now, so fix the problem by only checking whether or not the type of kptr is per-cpu in bpf_obj_free_fields(), and using bpf_global_percpu_ma to these kptrs. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-7-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf.h | 2 +- kernel/bpf/helpers.c | 24 +++++++++++++++--------- kernel/bpf/syscall.c | 4 ++-- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index ebd412179771..b4825d3cdb29 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -2058,7 +2058,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec); bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); void bpf_obj_free_timer(const struct btf_record *rec, void *obj); void bpf_obj_free_fields(const struct btf_record *rec, void *obj); -void __bpf_obj_drop_impl(void *p, const struct btf_record *rec); +void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index c814bb44d2d1..e46ac288a108 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1842,7 +1842,7 @@ unlock: * bpf_list_head which needs to be freed. */ migrate_disable(); - __bpf_obj_drop_impl(obj, field->graph_root.value_rec); + __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); migrate_enable(); } } @@ -1881,7 +1881,7 @@ void bpf_rb_root_free(const struct btf_field *field, void *rb_root, migrate_disable(); - __bpf_obj_drop_impl(obj, field->graph_root.value_rec); + __bpf_obj_drop_impl(obj, field->graph_root.value_rec, false); migrate_enable(); } } @@ -1913,8 +1913,10 @@ __bpf_kfunc void *bpf_percpu_obj_new_impl(u64 local_type_id__k, void *meta__ign) } /* Must be called under migrate_disable(), as required by bpf_mem_free */ -void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) +void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu) { + struct bpf_mem_alloc *ma; + if (rec && rec->refcount_off >= 0 && !refcount_dec_and_test((refcount_t *)(p + rec->refcount_off))) { /* Object is refcounted and refcount_dec didn't result in 0 @@ -1926,10 +1928,14 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec) if (rec) bpf_obj_free_fields(rec, p); - if (rec && rec->refcount_off >= 0) - bpf_mem_free_rcu(&bpf_global_ma, p); + if (percpu) + ma = &bpf_global_percpu_ma; else - bpf_mem_free(&bpf_global_ma, p); + ma = &bpf_global_ma; + if (rec && rec->refcount_off >= 0) + bpf_mem_free_rcu(ma, p); + else + bpf_mem_free(ma, p); } __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) @@ -1937,7 +1943,7 @@ __bpf_kfunc void bpf_obj_drop_impl(void *p__alloc, void *meta__ign) struct btf_struct_meta *meta = meta__ign; void *p = p__alloc; - __bpf_obj_drop_impl(p, meta ? meta->record : NULL); + __bpf_obj_drop_impl(p, meta ? meta->record : NULL, false); } __bpf_kfunc void bpf_percpu_obj_drop_impl(void *p__alloc, void *meta__ign) @@ -1981,7 +1987,7 @@ static int __bpf_list_add(struct bpf_list_node_kern *node, */ if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { /* Only called from BPF prog, no need to migrate_disable */ - __bpf_obj_drop_impl((void *)n - off, rec); + __bpf_obj_drop_impl((void *)n - off, rec, false); return -EINVAL; } @@ -2080,7 +2086,7 @@ static int __bpf_rbtree_add(struct bpf_rb_root *root, */ if (cmpxchg(&node->owner, NULL, BPF_PTR_POISON)) { /* Only called from BPF prog, no need to migrate_disable */ - __bpf_obj_drop_impl((void *)n - off, rec); + __bpf_obj_drop_impl((void *)n - off, rec, false); return -EINVAL; } diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 69998f84f7c8..a9b2cb500bf7 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -660,8 +660,8 @@ void bpf_obj_free_fields(const struct btf_record *rec, void *obj) field->kptr.btf_id); migrate_disable(); __bpf_obj_drop_impl(xchgd_field, pointee_struct_meta ? - pointee_struct_meta->record : - NULL); + pointee_struct_meta->record : NULL, + fields[i].type == BPF_KPTR_PERCPU); migrate_enable(); } else { field->kptr.dtor(xchgd_field); From d440ba91ca4d3004709876779d40713a99e21f6a Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Fri, 20 Oct 2023 21:32:02 +0800 Subject: [PATCH 25/51] selftests/bpf: Add more test cases for bpf memory allocator Add the following 3 test cases for bpf memory allocator: 1) Do allocation in bpf program and free through map free 2) Do batch per-cpu allocation and per-cpu free in bpf program 3) Do per-cpu allocation in bpf program and free through map free For per-cpu allocation, because per-cpu allocation can not refill timely sometimes, so test 2) and test 3) consider it is OK for bpf_percpu_obj_new_impl() to return NULL. Signed-off-by: Hou Tao Link: https://lore.kernel.org/r/20231020133202.4043247-8-houtao@huaweicloud.com Signed-off-by: Alexei Starovoitov --- .../selftests/bpf/prog_tests/test_bpf_ma.c | 20 +- .../testing/selftests/bpf/progs/test_bpf_ma.c | 180 +++++++++++++++++- 2 files changed, 193 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c index 0cca4e8ae38e..d3491a84b3b9 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/prog_tests/test_bpf_ma.c @@ -9,9 +9,10 @@ #include "test_bpf_ma.skel.h" -void test_test_bpf_ma(void) +static void do_bpf_ma_test(const char *name) { struct test_bpf_ma *skel; + struct bpf_program *prog; struct btf *btf; int i, err; @@ -34,6 +35,11 @@ void test_test_bpf_ma(void) skel->rodata->data_btf_ids[i] = id; } + prog = bpf_object__find_program_by_name(skel->obj, name); + if (!ASSERT_OK_PTR(prog, "invalid prog name")) + goto out; + bpf_program__set_autoload(prog, true); + err = test_bpf_ma__load(skel); if (!ASSERT_OK(err, "load")) goto out; @@ -48,3 +54,15 @@ void test_test_bpf_ma(void) out: test_bpf_ma__destroy(skel); } + +void test_test_bpf_ma(void) +{ + if (test__start_subtest("batch_alloc_free")) + do_bpf_ma_test("test_batch_alloc_free"); + if (test__start_subtest("free_through_map_free")) + do_bpf_ma_test("test_free_through_map_free"); + if (test__start_subtest("batch_percpu_alloc_free")) + do_bpf_ma_test("test_batch_percpu_alloc_free"); + if (test__start_subtest("percpu_free_through_map_free")) + do_bpf_ma_test("test_percpu_free_through_map_free"); +} diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c index ecde41ae0fc8..b685a4aba6bd 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c @@ -37,10 +37,20 @@ int pid = 0; __type(key, int); \ __type(value, struct map_value_##_size); \ __uint(max_entries, 128); \ - } array_##_size SEC(".maps"); + } array_##_size SEC(".maps") -static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int batch, - unsigned int idx) +#define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \ + struct map_value_percpu_##_size { \ + struct bin_data_##_size __percpu_kptr * data; \ + }; \ + struct { \ + __uint(type, BPF_MAP_TYPE_ARRAY); \ + __type(key, int); \ + __type(value, struct map_value_percpu_##_size); \ + __uint(max_entries, 128); \ + } array_percpu_##_size SEC(".maps") + +static __always_inline void batch_alloc(struct bpf_map *map, unsigned int batch, unsigned int idx) { struct generic_map_value *value; unsigned int i, key; @@ -65,6 +75,14 @@ static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int b return; } } +} + +static __always_inline void batch_free(struct bpf_map *map, unsigned int batch, unsigned int idx) +{ + struct generic_map_value *value; + unsigned int i, key; + void *old; + for (i = 0; i < batch; i++) { key = i; value = bpf_map_lookup_elem(map, &key); @@ -81,8 +99,72 @@ static __always_inline void batch_alloc_free(struct bpf_map *map, unsigned int b } } +static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int batch, + unsigned int idx) +{ + struct generic_map_value *value; + unsigned int i, key; + void *old, *new; + + for (i = 0; i < batch; i++) { + key = i; + value = bpf_map_lookup_elem(map, &key); + if (!value) { + err = 1; + return; + } + /* per-cpu allocator may not be able to refill in time */ + new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL); + if (!new) + continue; + + old = bpf_kptr_xchg(&value->data, new); + if (old) { + bpf_percpu_obj_drop(old); + err = 2; + return; + } + } +} + +static __always_inline void batch_percpu_free(struct bpf_map *map, unsigned int batch, + unsigned int idx) +{ + struct generic_map_value *value; + unsigned int i, key; + void *old; + + for (i = 0; i < batch; i++) { + key = i; + value = bpf_map_lookup_elem(map, &key); + if (!value) { + err = 3; + return; + } + old = bpf_kptr_xchg(&value->data, NULL); + if (!old) + continue; + bpf_percpu_obj_drop(old); + } +} + +#define CALL_BATCH_ALLOC(size, batch, idx) \ + batch_alloc((struct bpf_map *)(&array_##size), batch, idx) + #define CALL_BATCH_ALLOC_FREE(size, batch, idx) \ - batch_alloc_free((struct bpf_map *)(&array_##size), batch, idx) + do { \ + batch_alloc((struct bpf_map *)(&array_##size), batch, idx); \ + batch_free((struct bpf_map *)(&array_##size), batch, idx); \ + } while (0) + +#define CALL_BATCH_PERCPU_ALLOC(size, batch, idx) \ + batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx) + +#define CALL_BATCH_PERCPU_ALLOC_FREE(size, batch, idx) \ + do { \ + batch_percpu_alloc((struct bpf_map *)(&array_percpu_##size), batch, idx); \ + batch_percpu_free((struct bpf_map *)(&array_percpu_##size), batch, idx); \ + } while (0) DEFINE_ARRAY_WITH_KPTR(8); DEFINE_ARRAY_WITH_KPTR(16); @@ -97,8 +179,21 @@ DEFINE_ARRAY_WITH_KPTR(1024); DEFINE_ARRAY_WITH_KPTR(2048); DEFINE_ARRAY_WITH_KPTR(4096); -SEC("fentry/" SYS_PREFIX "sys_nanosleep") -int test_bpf_mem_alloc_free(void *ctx) +/* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */ +DEFINE_ARRAY_WITH_PERCPU_KPTR(16); +DEFINE_ARRAY_WITH_PERCPU_KPTR(32); +DEFINE_ARRAY_WITH_PERCPU_KPTR(64); +DEFINE_ARRAY_WITH_PERCPU_KPTR(96); +DEFINE_ARRAY_WITH_PERCPU_KPTR(128); +DEFINE_ARRAY_WITH_PERCPU_KPTR(192); +DEFINE_ARRAY_WITH_PERCPU_KPTR(256); +DEFINE_ARRAY_WITH_PERCPU_KPTR(512); +DEFINE_ARRAY_WITH_PERCPU_KPTR(1024); +DEFINE_ARRAY_WITH_PERCPU_KPTR(2048); +DEFINE_ARRAY_WITH_PERCPU_KPTR(4096); + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_batch_alloc_free(void *ctx) { if ((u32)bpf_get_current_pid_tgid() != pid) return 0; @@ -121,3 +216,76 @@ int test_bpf_mem_alloc_free(void *ctx) return 0; } + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_free_through_map_free(void *ctx) +{ + if ((u32)bpf_get_current_pid_tgid() != pid) + return 0; + + /* Alloc 128 8-bytes objects in batch to trigger refilling, + * then free these objects through map free. + */ + CALL_BATCH_ALLOC(8, 128, 0); + CALL_BATCH_ALLOC(16, 128, 1); + CALL_BATCH_ALLOC(32, 128, 2); + CALL_BATCH_ALLOC(64, 128, 3); + CALL_BATCH_ALLOC(96, 128, 4); + CALL_BATCH_ALLOC(128, 128, 5); + CALL_BATCH_ALLOC(192, 128, 6); + CALL_BATCH_ALLOC(256, 128, 7); + CALL_BATCH_ALLOC(512, 64, 8); + CALL_BATCH_ALLOC(1024, 32, 9); + CALL_BATCH_ALLOC(2048, 16, 10); + CALL_BATCH_ALLOC(4096, 8, 11); + + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_batch_percpu_alloc_free(void *ctx) +{ + if ((u32)bpf_get_current_pid_tgid() != pid) + return 0; + + /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, + * then free 128 16-bytes per-cpu objects in batch to trigger freeing. + */ + CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1); + CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2); + CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3); + CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4); + CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5); + CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6); + CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7); + CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8); + CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 9); + CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 10); + CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 11); + + return 0; +} + +SEC("?fentry/" SYS_PREFIX "sys_nanosleep") +int test_percpu_free_through_map_free(void *ctx) +{ + if ((u32)bpf_get_current_pid_tgid() != pid) + return 0; + + /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, + * then free these object through map free. + */ + CALL_BATCH_PERCPU_ALLOC(16, 128, 1); + CALL_BATCH_PERCPU_ALLOC(32, 128, 2); + CALL_BATCH_PERCPU_ALLOC(64, 128, 3); + CALL_BATCH_PERCPU_ALLOC(96, 128, 4); + CALL_BATCH_PERCPU_ALLOC(128, 128, 5); + CALL_BATCH_PERCPU_ALLOC(192, 128, 6); + CALL_BATCH_PERCPU_ALLOC(256, 128, 7); + CALL_BATCH_PERCPU_ALLOC(512, 64, 8); + CALL_BATCH_PERCPU_ALLOC(1024, 32, 9); + CALL_BATCH_PERCPU_ALLOC(2048, 16, 10); + CALL_BATCH_PERCPU_ALLOC(4096, 8, 11); + + return 0; +} From 69a19170303ff2f802049be94cfcf62f714002a3 Mon Sep 17 00:00:00 2001 From: Denys Zagorui Date: Thu, 19 Oct 2023 04:35:21 -0700 Subject: [PATCH 26/51] samples: bpf: Fix syscall_tp openat argument This modification doesn't change behaviour of the syscall_tp But such code is often used as a reference so it should be correct anyway Signed-off-by: Denys Zagorui Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20231019113521.4103825-1-dzagorui@cisco.com --- samples/bpf/syscall_tp_kern.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/samples/bpf/syscall_tp_kern.c b/samples/bpf/syscall_tp_kern.c index 090fecfe641a..58fef969a60e 100644 --- a/samples/bpf/syscall_tp_kern.c +++ b/samples/bpf/syscall_tp_kern.c @@ -4,6 +4,7 @@ #include #include +#if !defined(__aarch64__) struct syscalls_enter_open_args { unsigned long long unused; long syscall_nr; @@ -11,6 +12,7 @@ struct syscalls_enter_open_args { long flags; long mode; }; +#endif struct syscalls_exit_open_args { unsigned long long unused; @@ -18,6 +20,15 @@ struct syscalls_exit_open_args { long ret; }; +struct syscalls_enter_open_at_args { + unsigned long long unused; + long syscall_nr; + long long dfd; + long filename_ptr; + long flags; + long mode; +}; + struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, u32); @@ -54,14 +65,14 @@ int trace_enter_open(struct syscalls_enter_open_args *ctx) #endif SEC("tracepoint/syscalls/sys_enter_openat") -int trace_enter_open_at(struct syscalls_enter_open_args *ctx) +int trace_enter_open_at(struct syscalls_enter_open_at_args *ctx) { count(&enter_open_map); return 0; } SEC("tracepoint/syscalls/sys_enter_openat2") -int trace_enter_open_at2(struct syscalls_enter_open_args *ctx) +int trace_enter_open_at2(struct syscalls_enter_open_at_args *ctx) { count(&enter_open_map); return 0; From b63dadd6f97522513fe9497b5fde84a154e39a0b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Mon, 23 Oct 2023 20:50:15 +0200 Subject: [PATCH 27/51] bpf, tcx: Get rid of tcx_link_const Small clean up to get rid of the extra tcx_link_const() and only retain the tcx_link(). Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/r/20231023185015.21152-1-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- include/net/tcx.h | 7 +------ kernel/bpf/tcx.c | 4 ++-- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/include/net/tcx.h b/include/net/tcx.h index 264f147953ba..04be9377785d 100644 --- a/include/net/tcx.h +++ b/include/net/tcx.h @@ -38,16 +38,11 @@ static inline struct tcx_entry *tcx_entry(struct bpf_mprog_entry *entry) return container_of(bundle, struct tcx_entry, bundle); } -static inline struct tcx_link *tcx_link(struct bpf_link *link) +static inline struct tcx_link *tcx_link(const struct bpf_link *link) { return container_of(link, struct tcx_link, link); } -static inline const struct tcx_link *tcx_link_const(const struct bpf_link *link) -{ - return tcx_link((struct bpf_link *)link); -} - void tcx_inc(void); void tcx_dec(void); diff --git a/kernel/bpf/tcx.c b/kernel/bpf/tcx.c index 1338a13a8b64..2e4885e7781f 100644 --- a/kernel/bpf/tcx.c +++ b/kernel/bpf/tcx.c @@ -250,7 +250,7 @@ static void tcx_link_dealloc(struct bpf_link *link) static void tcx_link_fdinfo(const struct bpf_link *link, struct seq_file *seq) { - const struct tcx_link *tcx = tcx_link_const(link); + const struct tcx_link *tcx = tcx_link(link); u32 ifindex = 0; rtnl_lock(); @@ -267,7 +267,7 @@ static void tcx_link_fdinfo(const struct bpf_link *link, struct seq_file *seq) static int tcx_link_fill_info(const struct bpf_link *link, struct bpf_link_info *info) { - const struct tcx_link *tcx = tcx_link_const(link); + const struct tcx_link *tcx = tcx_link(link); u32 ifindex = 0; rtnl_lock(); From 3c4e420cb6536026ddd50eaaff5f30e4f144200d Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:11 +0300 Subject: [PATCH 28/51] bpf: move explored_state() closer to the beginning of verifier.c Subsequent patches would make use of explored_state() function. Move it up to avoid adding unnecessary prototype. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-2-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 28 +++++++++++++--------------- 1 file changed, 13 insertions(+), 15 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e9bc5d4a25a1..e6232b5d3964 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1817,6 +1817,19 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, return 0; } +static u32 state_htab_size(struct bpf_verifier_env *env) +{ + return env->prog->len; +} + +static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env *env, int idx) +{ + struct bpf_verifier_state *cur = env->cur_state; + struct bpf_func_state *state = cur->frame[cur->curframe]; + + return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; +} + static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { while (st) { @@ -15020,21 +15033,6 @@ enum { BRANCH = 2, }; -static u32 state_htab_size(struct bpf_verifier_env *env) -{ - return env->prog->len; -} - -static struct bpf_verifier_state_list **explored_state( - struct bpf_verifier_env *env, - int idx) -{ - struct bpf_verifier_state *cur = env->cur_state; - struct bpf_func_state *state = cur->frame[cur->curframe]; - - return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; -} - static void mark_prune_point(struct bpf_verifier_env *env, int idx) { env->insn_aux_data[idx].prune_point = true; From 4c97259abc9bc8df7712f76f58ce385581876857 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:12 +0300 Subject: [PATCH 29/51] bpf: extract same_callsites() as utility function Extract same_callsites() from clean_live_states() as a utility function. This function would be used by the next patch in the set. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-3-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e6232b5d3964..366029e484a0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1830,6 +1830,20 @@ static struct bpf_verifier_state_list **explored_state(struct bpf_verifier_env * return &env->explored_states[(idx ^ state->callsite) % state_htab_size(env)]; } +static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_state *b) +{ + int fr; + + if (a->curframe != b->curframe) + return false; + + for (fr = a->curframe; fr >= 0; fr--) + if (a->frame[fr]->callsite != b->frame[fr]->callsite) + return false; + + return true; +} + static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { while (st) { @@ -15909,18 +15923,14 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn, struct bpf_verifier_state *cur) { struct bpf_verifier_state_list *sl; - int i; sl = *explored_state(env, insn); while (sl) { if (sl->state.branches) goto next; if (sl->state.insn_idx != insn || - sl->state.curframe != cur->curframe) + !same_callsites(&sl->state, cur)) goto next; - for (i = 0; i <= cur->curframe; i++) - if (sl->state.frame[i]->callsite != cur->frame[i]->callsite) - goto next; clean_verifier_state(env, &sl->state); next: sl = sl->next; From 2793a8b015f7f1caadb9bce9c63dc659f7522676 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:13 +0300 Subject: [PATCH 30/51] bpf: exact states comparison for iterator convergence checks Convergence for open coded iterators is computed in is_state_visited() by examining states with branches count > 1 and using states_equal(). states_equal() computes sub-state relation using read and precision marks. Read and precision marks are propagated from children states, thus are not guaranteed to be complete inside a loop when branches count > 1. This could be demonstrated using the following unsafe program: 1. r7 = -16 2. r6 = bpf_get_prandom_u32() 3. while (bpf_iter_num_next(&fp[-8])) { 4. if (r6 != 42) { 5. r7 = -32 6. r6 = bpf_get_prandom_u32() 7. continue 8. } 9. r0 = r10 10. r0 += r7 11. r8 = *(u64 *)(r0 + 0) 12. r6 = bpf_get_prandom_u32() 13. } Here verifier would first visit path 1-3, create a checkpoint at 3 with r7=-16, continue to 4-7,3 with r7=-32. Because instructions at 9-12 had not been visitied yet existing checkpoint at 3 does not have read or precision mark for r7. Thus states_equal() would return true and verifier would discard current state, thus unsafe memory access at 11 would not be caught. This commit fixes this loophole by introducing exact state comparisons for iterator convergence logic: - registers are compared using regs_exact() regardless of read or precision marks; - stack slots have to have identical type. Unfortunately, this is too strict even for simple programs like below: i = 0; while(iter_next(&it)) i++; At each iteration step i++ would produce a new distinct state and eventually instruction processing limit would be reached. To avoid such behavior speculatively forget (widen) range for imprecise scalar registers, if those registers were not precise at the end of the previous iteration and do not match exactly. This a conservative heuristic that allows to verify wide range of programs, however it precludes verification of programs that conjure an imprecise value on the first loop iteration and use it as precise on the second. Test case iter_task_vma_for_each() presents one of such cases: unsigned int seen = 0; ... bpf_for_each(task_vma, vma, task, 0) { if (seen >= 1000) break; ... seen++; } Here clang generates the following code: : 24: r8 = r6 ; stash current value of ... body ... 'seen' 29: r1 = r10 30: r1 += -0x8 31: call bpf_iter_task_vma_next 32: r6 += 0x1 ; seen++; 33: if r0 == 0x0 goto +0x2 ; exit on next() == NULL 34: r7 += 0x10 35: if r8 < 0x3e7 goto -0xc ; loop on seen < 1000 : ... exit ... Note that counter in r6 is copied to r8 and then incremented, conditional jump is done using r8. Because of this precision mark for r6 lags one state behind of precision mark on r8 and widening logic kicks in. Adding barrier_var(seen) after conditional is sufficient to force clang use the same register for both counting and conditional jump. This issue was discussed in the thread [1] which was started by Andrew Werner demonstrating a similar bug in callback functions handling. The callbacks would be addressed in a followup patch. [1] https://lore.kernel.org/bpf/97a90da09404c65c8e810cf83c94ac703705dc0e.camel@gmail.com/ Co-developed-by: Andrii Nakryiko Co-developed-by: Alexei Starovoitov Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-4-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 1 + kernel/bpf/verifier.c | 218 +++++++++++++++--- .../selftests/bpf/progs/iters_task_vma.c | 1 + 3 files changed, 189 insertions(+), 31 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e67cd45a85be..38b788228594 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -384,6 +384,7 @@ struct bpf_verifier_state { */ struct bpf_idx_pair *jmp_history; u32 jmp_history_cnt; + u32 dfs_depth; }; #define bpf_get_spilled_reg(slot, frame, mask) \ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 366029e484a0..6573e8c77329 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1802,6 +1802,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->parent = src->parent; dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; + dst_state->dfs_depth = src->dfs_depth; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { @@ -7723,6 +7724,81 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id return 0; } +/* Look for a previous loop entry at insn_idx: nearest parent state + * stopped at insn_idx with callsites matching those in cur->frame. + */ +static struct bpf_verifier_state *find_prev_entry(struct bpf_verifier_env *env, + struct bpf_verifier_state *cur, + int insn_idx) +{ + struct bpf_verifier_state_list *sl; + struct bpf_verifier_state *st; + + /* Explored states are pushed in stack order, most recent states come first */ + sl = *explored_state(env, insn_idx); + for (; sl; sl = sl->next) { + /* If st->branches != 0 state is a part of current DFS verification path, + * hence cur & st for a loop. + */ + st = &sl->state; + if (st->insn_idx == insn_idx && st->branches && same_callsites(st, cur) && + st->dfs_depth < cur->dfs_depth) + return st; + } + + return NULL; +} + +static void reset_idmap_scratch(struct bpf_verifier_env *env); +static bool regs_exact(const struct bpf_reg_state *rold, + const struct bpf_reg_state *rcur, + struct bpf_idmap *idmap); + +static void maybe_widen_reg(struct bpf_verifier_env *env, + struct bpf_reg_state *rold, struct bpf_reg_state *rcur, + struct bpf_idmap *idmap) +{ + if (rold->type != SCALAR_VALUE) + return; + if (rold->type != rcur->type) + return; + if (rold->precise || rcur->precise || regs_exact(rold, rcur, idmap)) + return; + __mark_reg_unknown(env, rcur); +} + +static int widen_imprecise_scalars(struct bpf_verifier_env *env, + struct bpf_verifier_state *old, + struct bpf_verifier_state *cur) +{ + struct bpf_func_state *fold, *fcur; + int i, fr; + + reset_idmap_scratch(env); + for (fr = old->curframe; fr >= 0; fr--) { + fold = old->frame[fr]; + fcur = cur->frame[fr]; + + for (i = 0; i < MAX_BPF_REG; i++) + maybe_widen_reg(env, + &fold->regs[i], + &fcur->regs[i], + &env->idmap_scratch); + + for (i = 0; i < fold->allocated_stack / BPF_REG_SIZE; i++) { + if (!is_spilled_reg(&fold->stack[i]) || + !is_spilled_reg(&fcur->stack[i])) + continue; + + maybe_widen_reg(env, + &fold->stack[i].spilled_ptr, + &fcur->stack[i].spilled_ptr, + &env->idmap_scratch); + } + } + return 0; +} + /* process_iter_next_call() is called when verifier gets to iterator's next * "method" (e.g., bpf_iter_num_next() for numbers iterator) call. We'll refer * to it as just "iter_next()" in comments below. @@ -7764,25 +7840,47 @@ static int process_iter_arg(struct bpf_verifier_env *env, int regno, int insn_id * is some statically known limit on number of iterations (e.g., if there is * an explicit `if n > 100 then break;` statement somewhere in the loop). * - * One very subtle but very important aspect is that we *always* simulate NULL - * condition first (as the current state) before we simulate non-NULL case. - * This has to do with intricacies of scalar precision tracking. By simulating - * "exit condition" of iter_next() returning NULL first, we make sure all the - * relevant precision marks *that will be set **after** we exit iterator loop* - * are propagated backwards to common parent state of NULL and non-NULL - * branches. Thanks to that, state equivalence checks done later in forked - * state, when reaching iter_next() for ACTIVE iterator, can assume that - * precision marks are finalized and won't change. Because simulating another - * ACTIVE iterator iteration won't change them (because given same input - * states we'll end up with exactly same output states which we are currently - * comparing; and verification after the loop already propagated back what - * needs to be **additionally** tracked as precise). It's subtle, grok - * precision tracking for more intuitive understanding. + * Iteration convergence logic in is_state_visited() relies on exact + * states comparison, which ignores read and precision marks. + * This is necessary because read and precision marks are not finalized + * while in the loop. Exact comparison might preclude convergence for + * simple programs like below: + * + * i = 0; + * while(iter_next(&it)) + * i++; + * + * At each iteration step i++ would produce a new distinct state and + * eventually instruction processing limit would be reached. + * + * To avoid such behavior speculatively forget (widen) range for + * imprecise scalar registers, if those registers were not precise at the + * end of the previous iteration and do not match exactly. + * + * This is a conservative heuristic that allows to verify wide range of programs, + * however it precludes verification of programs that conjure an + * imprecise value on the first loop iteration and use it as precise on a second. + * For example, the following safe program would fail to verify: + * + * struct bpf_num_iter it; + * int arr[10]; + * int i = 0, a = 0; + * bpf_iter_num_new(&it, 0, 10); + * while (bpf_iter_num_next(&it)) { + * if (a == 0) { + * a = 1; + * i = 7; // Because i changed verifier would forget + * // it's range on second loop entry. + * } else { + * arr[i] = 42; // This would fail to verify. + * } + * } + * bpf_iter_num_destroy(&it); */ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, struct bpf_kfunc_call_arg_meta *meta) { - struct bpf_verifier_state *cur_st = env->cur_state, *queued_st; + struct bpf_verifier_state *cur_st = env->cur_state, *queued_st, *prev_st; struct bpf_func_state *cur_fr = cur_st->frame[cur_st->curframe], *queued_fr; struct bpf_reg_state *cur_iter, *queued_iter; int iter_frameno = meta->iter.frameno; @@ -7800,6 +7898,19 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, } if (cur_iter->iter.state == BPF_ITER_STATE_ACTIVE) { + /* Because iter_next() call is a checkpoint is_state_visitied() + * should guarantee parent state with same call sites and insn_idx. + */ + if (!cur_st->parent || cur_st->parent->insn_idx != insn_idx || + !same_callsites(cur_st->parent, cur_st)) { + verbose(env, "bug: bad parent state for iter next call"); + return -EFAULT; + } + /* Note cur_st->parent in the call below, it is necessary to skip + * checkpoint created for cur_st by is_state_visited() + * right at this instruction. + */ + prev_st = find_prev_entry(env, cur_st->parent, insn_idx); /* branch out active iter state */ queued_st = push_stack(env, insn_idx + 1, insn_idx, false); if (!queued_st) @@ -7808,6 +7919,8 @@ static int process_iter_next_call(struct bpf_verifier_env *env, int insn_idx, queued_iter = &queued_st->frame[iter_frameno]->stack[iter_spi].spilled_ptr; queued_iter->iter.state = BPF_ITER_STATE_ACTIVE; queued_iter->iter.depth++; + if (prev_st) + widen_imprecise_scalars(env, prev_st, queued_st); queued_fr = queued_st->frame[queued_st->curframe]; mark_ptr_not_null_reg(&queued_fr->regs[BPF_REG_0]); @@ -15948,8 +16061,11 @@ static bool regs_exact(const struct bpf_reg_state *rold, /* Returns true if (rold safe implies rcur safe) */ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, - struct bpf_reg_state *rcur, struct bpf_idmap *idmap) + struct bpf_reg_state *rcur, struct bpf_idmap *idmap, bool exact) { + if (exact) + return regs_exact(rold, rcur, idmap); + if (!(rold->live & REG_LIVE_READ)) /* explored state didn't use this */ return true; @@ -16066,7 +16182,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, } static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, - struct bpf_func_state *cur, struct bpf_idmap *idmap) + struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact) { int i, spi; @@ -16079,7 +16195,12 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, spi = i / BPF_REG_SIZE; - if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) { + if (exact && + old->stack[spi].slot_type[i % BPF_REG_SIZE] != + cur->stack[spi].slot_type[i % BPF_REG_SIZE]) + return false; + + if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ) && !exact) { i += BPF_REG_SIZE - 1; /* explored state didn't use this */ continue; @@ -16129,7 +16250,7 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old, * return false to continue verification of this path */ if (!regsafe(env, &old->stack[spi].spilled_ptr, - &cur->stack[spi].spilled_ptr, idmap)) + &cur->stack[spi].spilled_ptr, idmap, exact)) return false; break; case STACK_DYNPTR: @@ -16211,16 +16332,16 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur, * the current state will reach 'bpf_exit' instruction safely */ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old, - struct bpf_func_state *cur) + struct bpf_func_state *cur, bool exact) { int i; for (i = 0; i < MAX_BPF_REG; i++) if (!regsafe(env, &old->regs[i], &cur->regs[i], - &env->idmap_scratch)) + &env->idmap_scratch, exact)) return false; - if (!stacksafe(env, old, cur, &env->idmap_scratch)) + if (!stacksafe(env, old, cur, &env->idmap_scratch, exact)) return false; if (!refsafe(old, cur, &env->idmap_scratch)) @@ -16229,17 +16350,23 @@ static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_stat return true; } +static void reset_idmap_scratch(struct bpf_verifier_env *env) +{ + env->idmap_scratch.tmp_id_gen = env->id_gen; + memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); +} + static bool states_equal(struct bpf_verifier_env *env, struct bpf_verifier_state *old, - struct bpf_verifier_state *cur) + struct bpf_verifier_state *cur, + bool exact) { int i; if (old->curframe != cur->curframe) return false; - env->idmap_scratch.tmp_id_gen = env->id_gen; - memset(&env->idmap_scratch.map, 0, sizeof(env->idmap_scratch.map)); + reset_idmap_scratch(env); /* Verification state from speculative execution simulation * must never prune a non-speculative execution one. @@ -16269,7 +16396,7 @@ static bool states_equal(struct bpf_verifier_env *env, for (i = 0; i <= old->curframe; i++) { if (old->frame[i]->callsite != cur->frame[i]->callsite) return false; - if (!func_states_equal(env, old->frame[i], cur->frame[i])) + if (!func_states_equal(env, old->frame[i], cur->frame[i], exact)) return false; } return true; @@ -16524,7 +16651,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl, **pprev; struct bpf_verifier_state *cur = env->cur_state, *new; - int i, j, err, states_cnt = 0; + int i, j, n, err, states_cnt = 0; bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); bool add_new_state = force_new_state; @@ -16579,9 +16706,33 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) * It's safe to assume that iterator loop will finish, taking into * account iter_next() contract of eventually returning * sticky NULL result. + * + * Note, that states have to be compared exactly in this case because + * read and precision marks might not be finalized inside the loop. + * E.g. as in the program below: + * + * 1. r7 = -16 + * 2. r6 = bpf_get_prandom_u32() + * 3. while (bpf_iter_num_next(&fp[-8])) { + * 4. if (r6 != 42) { + * 5. r7 = -32 + * 6. r6 = bpf_get_prandom_u32() + * 7. continue + * 8. } + * 9. r0 = r10 + * 10. r0 += r7 + * 11. r8 = *(u64 *)(r0 + 0) + * 12. r6 = bpf_get_prandom_u32() + * 13. } + * + * Here verifier would first visit path 1-3, create a checkpoint at 3 + * with r7=-16, continue to 4-7,3. Existing checkpoint at 3 does + * not have read or precision mark for r7 yet, thus inexact states + * comparison would discard current state with r7=-32 + * => unsafe memory access at 11 would not be caught. */ if (is_iter_next_insn(env, insn_idx)) { - if (states_equal(env, &sl->state, cur)) { + if (states_equal(env, &sl->state, cur, true)) { struct bpf_func_state *cur_frame; struct bpf_reg_state *iter_state, *iter_reg; int spi; @@ -16604,7 +16755,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) } /* attempt to detect infinite loop to avoid unnecessary doomed work */ if (states_maybe_looping(&sl->state, cur) && - states_equal(env, &sl->state, cur) && + states_equal(env, &sl->state, cur, false) && !iter_active_depths_differ(&sl->state, cur)) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); @@ -16629,7 +16780,7 @@ skip_inf_loop_check: add_new_state = false; goto miss; } - if (states_equal(env, &sl->state, cur)) { + if (states_equal(env, &sl->state, cur, false)) { hit: sl->hit_cnt++; /* reached equivalent register/stack state, @@ -16668,8 +16819,12 @@ miss: * to keep checking from state equivalence point of view. * Higher numbers increase max_states_per_insn and verification time, * but do not meaningfully decrease insn_processed. + * 'n' controls how many times state could miss before eviction. + * Use bigger 'n' for checkpoints because evicting checkpoint states + * too early would hinder iterator convergence. */ - if (sl->miss_cnt > sl->hit_cnt * 3 + 3) { + n = is_force_checkpoint(env, insn_idx) && sl->state.branches > 0 ? 64 : 3; + if (sl->miss_cnt > sl->hit_cnt * n + n) { /* the state is unlikely to be useful. Remove it to * speed up verification */ @@ -16743,6 +16898,7 @@ next: cur->parent = new; cur->first_insn_idx = insn_idx; + cur->dfs_depth = new->dfs_depth + 1; clear_jmp_history(cur); new_sl->next = *explored_state(env, insn_idx); *explored_state(env, insn_idx) = new_sl; diff --git a/tools/testing/selftests/bpf/progs/iters_task_vma.c b/tools/testing/selftests/bpf/progs/iters_task_vma.c index 44edecfdfaee..e085a51d153e 100644 --- a/tools/testing/selftests/bpf/progs/iters_task_vma.c +++ b/tools/testing/selftests/bpf/progs/iters_task_vma.c @@ -30,6 +30,7 @@ int iter_task_vma_for_each(const void *ctx) bpf_for_each(task_vma, vma, task, 0) { if (seen >= 1000) break; + barrier_var(seen); vm_ranges[seen].vm_start = vma->vm_start; vm_ranges[seen].vm_end = vma->vm_end; From 389ede06c2974b2f878a7ebff6b0f4f707f9db74 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:14 +0300 Subject: [PATCH 31/51] selftests/bpf: tests with delayed read/precision makrs in loop body These test cases try to hide read and precision marks from loop convergence logic: marks would only be assigned on subsequent loop iterations or after exploring states pushed to env->head stack first. Without verifier fix to use exact states comparison logic for iterators convergence these tests (except 'triple_continue') would be errorneously marked as safe. Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-5-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/iters.c | 518 ++++++++++++++++++++++ 1 file changed, 518 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 6b9b3c56f009..764a68420c3e 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -14,6 +14,13 @@ int my_pid; int arr[256]; int small_arr[16] SEC(".data.small_arr"); +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __uint(max_entries, 10); + __type(key, int); + __type(value, int); +} amap SEC(".maps"); + #ifdef REAL_TEST #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0 #else @@ -716,4 +723,515 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx) return 0; } +SEC("?raw_tp") +__failure +__msg("R1 type=scalar expected=fp") +__naked int delayed_read_mark(void) +{ + /* This is equivalent to C program below. + * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead. + * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch. + * At this point iterator next() call is reached with r7 that has no read mark. + * Loop body with r7=0xdead would only be visited if verifier would decide to continue + * with second loop iteration. Absence of read mark on r7 might affect state + * equivalent logic used for iterator convergence tracking. + * + * r7 = &fp[-16] + * fp[-16] = 0 + * r6 = bpf_get_prandom_u32() + * bpf_iter_num_new(&fp[-8], 0, 10) + * while (bpf_iter_num_next(&fp[-8])) { + * r6++ + * if (r6 != 42) { + * r7 = 0xdead + * continue; + * } + * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "r7 = r10;" + "r7 += -16;" + "r0 = 0;" + "*(u64 *)(r7 + 0) = r0;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "1:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto 2f;" + "r6 += 1;" + "if r6 != 42 goto 3f;" + "r7 = 0xdead;" + "goto 1b;" + "3:" + "r1 = r7;" + "r2 = 8;" + "r3 = 0xdeadbeef;" + "call %[bpf_probe_read_user];" + "goto 1b;" + "2:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_probe_read_user) + : __clobber_all + ); +} + +SEC("?raw_tp") +__failure +__msg("math between fp pointer and register with unbounded") +__naked int delayed_precision_mark(void) +{ + /* This is equivalent to C program below. + * The test is similar to delayed_iter_mark but verifies that incomplete + * precision don't fool verifier. + * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32. + * State with r7=-16 is visited first and follows r6 != 42 ... continue branch. + * At this point iterator next() call is reached with r7 that has no read + * and precision marks. + * Loop body with r7=-32 would only be visited if verifier would decide to continue + * with second loop iteration. Absence of precision mark on r7 might affect state + * equivalent logic used for iterator convergence tracking. + * + * r8 = 0 + * fp[-16] = 0 + * r7 = -16 + * r6 = bpf_get_prandom_u32() + * bpf_iter_num_new(&fp[-8], 0, 10) + * while (bpf_iter_num_next(&fp[-8])) { + * if (r6 != 42) { + * r7 = -32 + * r6 = bpf_get_prandom_u32() + * continue; + * } + * r0 = r10 + * r0 += r7 + * r8 = *(u64 *)(r0 + 0) // this is not safe + * r6 = bpf_get_prandom_u32() + * } + * bpf_iter_num_destroy(&fp[-8]) + * return r8 + */ + asm volatile ( + "r8 = 0;" + "*(u64 *)(r10 - 16) = r8;" + "r7 = -16;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "1:" + "r1 = r10;" + "r1 += -8;\n" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto 2f;" + "if r6 != 42 goto 3f;" + "r7 = -32;" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "goto 1b;\n" + "3:" + "r0 = r10;" + "r0 += r7;" + "r8 = *(u64 *)(r0 + 0);" + "call %[bpf_get_prandom_u32];" + "r6 = r0;" + "goto 1b;\n" + "2:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = r8;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm(bpf_probe_read_user) + : __clobber_all + ); +} + +SEC("?raw_tp") +__failure +__msg("math between fp pointer and register with unbounded") +__flag(BPF_F_TEST_STATE_FREQ) +__naked int loop_state_deps1(void) +{ + /* This is equivalent to C program below. + * + * The case turns out to be tricky in a sense that: + * - states with c=-25 are explored only on a second iteration + * of the outer loop; + * - states with read+precise mark on c are explored only on + * second iteration of the inner loop and in a state which + * is pushed to states stack first. + * + * Depending on the details of iterator convergence logic + * verifier might stop states traversal too early and miss + * unsafe c=-25 memory access. + * + * j = iter_new(); // fp[-16] + * a = 0; // r6 + * b = 0; // r7 + * c = -24; // r8 + * while (iter_next(j)) { + * i = iter_new(); // fp[-8] + * a = 0; // r6 + * b = 0; // r7 + * while (iter_next(i)) { + * if (a == 1) { + * a = 0; + * b = 1; + * } else if (a == 0) { + * a = 1; + * if (random() == 42) + * continue; + * if (b == 1) { + * *(r10 + c) = 7; // this is not safe + * iter_destroy(i); + * iter_destroy(j); + * return; + * } + * } + * } + * iter_destroy(i); + * a = 0; + * b = 0; + * c = -25; + * } + * iter_destroy(j); + * return; + */ + asm volatile ( + "r1 = r10;" + "r1 += -16;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r6 = 0;" + "r7 = 0;" + "r8 = -24;" + "j_loop_%=:" + "r1 = r10;" + "r1 += -16;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto j_loop_end_%=;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r6 = 0;" + "r7 = 0;" + "i_loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto i_loop_end_%=;" + "check_one_r6_%=:" + "if r6 != 1 goto check_zero_r6_%=;" + "r6 = 0;" + "r7 = 1;" + "goto i_loop_%=;" + "check_zero_r6_%=:" + "if r6 != 0 goto i_loop_%=;" + "r6 = 1;" + "call %[bpf_get_prandom_u32];" + "if r0 != 42 goto check_one_r7_%=;" + "goto i_loop_%=;" + "check_one_r7_%=:" + "if r7 != 1 goto i_loop_%=;" + "r0 = r10;" + "r0 += r8;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r1 = r10;" + "r1 += -16;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "i_loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r6 = 0;" + "r7 = 0;" + "r8 = -25;" + "goto j_loop_%=;" + "j_loop_end_%=:" + "r1 = r10;" + "r1 += -16;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_all + ); +} + +SEC("?raw_tp") +__success +__naked int triple_continue(void) +{ + /* This is equivalent to C program below. + * High branching factor of the loop body turned out to be + * problematic for one of the iterator convergence tracking + * algorithms explored. + * + * r6 = bpf_get_prandom_u32() + * bpf_iter_num_new(&fp[-8], 0, 10) + * while (bpf_iter_num_next(&fp[-8])) { + * if (bpf_get_prandom_u32() != 42) + * continue; + * if (bpf_get_prandom_u32() != 42) + * continue; + * if (bpf_get_prandom_u32() != 42) + * continue; + * r0 += 0; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 != 42 goto loop_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 != 42 goto loop_%=;" + "call %[bpf_get_prandom_u32];" + "if r0 != 42 goto loop_%=;" + "r0 += 0;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_all + ); +} + +SEC("?raw_tp") +__success +__naked int widen_spill(void) +{ + /* This is equivalent to C program below. + * The counter is stored in fp[-16], if this counter is not widened + * verifier states representing loop iterations would never converge. + * + * fp[-16] = 0 + * bpf_iter_num_new(&fp[-8], 0, 10) + * while (bpf_iter_num_next(&fp[-8])) { + * r0 = fp[-16]; + * r0 += 1; + * fp[-16] = r0; + * } + * bpf_iter_num_destroy(&fp[-8]) + * return 0 + */ + asm volatile ( + "r0 = 0;" + "*(u64 *)(r10 - 16) = r0;" + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + "r0 = *(u64 *)(r10 - 16);" + "r0 += 1;" + "*(u64 *)(r10 - 16) = r0;" + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_all + ); +} + +SEC("raw_tp") +__success +__naked int checkpoint_states_deletion(void) +{ + /* This is equivalent to C program below. + * + * int *a, *b, *c, *d, *e, *f; + * int i, sum = 0; + * bpf_for(i, 0, 10) { + * a = bpf_map_lookup_elem(&amap, &i); + * b = bpf_map_lookup_elem(&amap, &i); + * c = bpf_map_lookup_elem(&amap, &i); + * d = bpf_map_lookup_elem(&amap, &i); + * e = bpf_map_lookup_elem(&amap, &i); + * f = bpf_map_lookup_elem(&amap, &i); + * if (a) sum += 1; + * if (b) sum += 1; + * if (c) sum += 1; + * if (d) sum += 1; + * if (e) sum += 1; + * if (f) sum += 1; + * } + * return 0; + * + * The body of the loop spawns multiple simulation paths + * with different combination of NULL/non-NULL information for a/b/c/d/e/f. + * Each combination is unique from states_equal() point of view. + * Explored states checkpoint is created after each iterator next call. + * Iterator convergence logic expects that eventually current state + * would get equal to one of the explored states and thus loop + * exploration would be finished (at-least for a specific path). + * Verifier evicts explored states with high miss to hit ratio + * to to avoid comparing current state with too many explored + * states per instruction. + * This test is designed to "stress test" eviction policy defined using formula: + * + * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted + * + * Currently N is set to 64, which allows for 6 variables in this test. + */ + asm volatile ( + "r6 = 0;" /* a */ + "r7 = 0;" /* b */ + "r8 = 0;" /* c */ + "*(u64 *)(r10 - 24) = r6;" /* d */ + "*(u64 *)(r10 - 32) = r6;" /* e */ + "*(u64 *)(r10 - 40) = r6;" /* f */ + "r9 = 0;" /* sum */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto loop_end_%=;" + + "*(u64 *)(r10 - 16) = r0;" + + "r1 = %[amap] ll;" + "r2 = r10;" + "r2 += -16;" + "call %[bpf_map_lookup_elem];" + "r6 = r0;" + + "r1 = %[amap] ll;" + "r2 = r10;" + "r2 += -16;" + "call %[bpf_map_lookup_elem];" + "r7 = r0;" + + "r1 = %[amap] ll;" + "r2 = r10;" + "r2 += -16;" + "call %[bpf_map_lookup_elem];" + "r8 = r0;" + + "r1 = %[amap] ll;" + "r2 = r10;" + "r2 += -16;" + "call %[bpf_map_lookup_elem];" + "*(u64 *)(r10 - 24) = r0;" + + "r1 = %[amap] ll;" + "r2 = r10;" + "r2 += -16;" + "call %[bpf_map_lookup_elem];" + "*(u64 *)(r10 - 32) = r0;" + + "r1 = %[amap] ll;" + "r2 = r10;" + "r2 += -16;" + "call %[bpf_map_lookup_elem];" + "*(u64 *)(r10 - 40) = r0;" + + "if r6 == 0 goto +1;" + "r9 += 1;" + "if r7 == 0 goto +1;" + "r9 += 1;" + "if r8 == 0 goto +1;" + "r9 += 1;" + "r0 = *(u64 *)(r10 - 24);" + "if r0 == 0 goto +1;" + "r9 += 1;" + "r0 = *(u64 *)(r10 - 32);" + "if r0 == 0 goto +1;" + "r9 += 1;" + "r0 = *(u64 *)(r10 - 40);" + "if r0 == 0 goto +1;" + "r9 += 1;" + + "goto loop_%=;" + "loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_map_lookup_elem), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy), + __imm_addr(amap) + : __clobber_all + ); +} + char _license[] SEC("license") = "GPL"; From 2a0992829ea3864939d917a5c7b48be6629c6217 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:15 +0300 Subject: [PATCH 32/51] bpf: correct loop detection for iterators convergence It turns out that .branches > 0 in is_state_visited() is not a sufficient condition to identify if two verifier states form a loop when iterators convergence is computed. This commit adds logic to distinguish situations like below: (I) initial (II) initial | | V V .---------> hdr .. | | | | V V | .------... .------.. | | | | | | V V V V | ... ... .-> hdr .. | | | | | | | V V | V V | succ <- cur | succ <- cur | | | | | V | V | ... | ... | | | | '----' '----' For both (I) and (II) successor 'succ' of the current state 'cur' was previously explored and has branches count at 0. However, loop entry 'hdr' corresponding to 'succ' might be a part of current DFS path. If that is the case 'succ' and 'cur' are members of the same loop and have to be compared exactly. Co-developed-by: Andrii Nakryiko Co-developed-by: Alexei Starovoitov Reviewed-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-6-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- include/linux/bpf_verifier.h | 15 +++ kernel/bpf/verifier.c | 207 ++++++++++++++++++++++++++++++++++- 2 files changed, 218 insertions(+), 4 deletions(-) diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 38b788228594..24213a99cc79 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -373,10 +373,25 @@ struct bpf_verifier_state { struct bpf_active_lock active_lock; bool speculative; bool active_rcu_lock; + /* If this state was ever pointed-to by other state's loop_entry field + * this flag would be set to true. Used to avoid freeing such states + * while they are still in use. + */ + bool used_as_loop_entry; /* first and last insn idx of this verifier state */ u32 first_insn_idx; u32 last_insn_idx; + /* If this state is a part of states loop this field points to some + * parent of this state such that: + * - it is also a member of the same states loop; + * - DFS states traversal starting from initial state visits loop_entry + * state before this state. + * Used to compute topmost loop entry for state loops. + * State loops might appear because of open coded iterators logic. + * See get_loop_entry() for more information. + */ + struct bpf_verifier_state *loop_entry; /* jmp history recorded from first to last. * backtracking is using it to go from last to first. * For most states jmp_history_cnt is [0-3]. diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 6573e8c77329..f23fbfe82c59 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -1803,6 +1803,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state, dst_state->first_insn_idx = src->first_insn_idx; dst_state->last_insn_idx = src->last_insn_idx; dst_state->dfs_depth = src->dfs_depth; + dst_state->used_as_loop_entry = src->used_as_loop_entry; for (i = 0; i <= src->curframe; i++) { dst = dst_state->frame[i]; if (!dst) { @@ -1845,11 +1846,176 @@ static bool same_callsites(struct bpf_verifier_state *a, struct bpf_verifier_sta return true; } +/* Open coded iterators allow back-edges in the state graph in order to + * check unbounded loops that iterators. + * + * In is_state_visited() it is necessary to know if explored states are + * part of some loops in order to decide whether non-exact states + * comparison could be used: + * - non-exact states comparison establishes sub-state relation and uses + * read and precision marks to do so, these marks are propagated from + * children states and thus are not guaranteed to be final in a loop; + * - exact states comparison just checks if current and explored states + * are identical (and thus form a back-edge). + * + * Paper "A New Algorithm for Identifying Loops in Decompilation" + * by Tao Wei, Jian Mao, Wei Zou and Yu Chen [1] presents a convenient + * algorithm for loop structure detection and gives an overview of + * relevant terminology. It also has helpful illustrations. + * + * [1] https://api.semanticscholar.org/CorpusID:15784067 + * + * We use a similar algorithm but because loop nested structure is + * irrelevant for verifier ours is significantly simpler and resembles + * strongly connected components algorithm from Sedgewick's textbook. + * + * Define topmost loop entry as a first node of the loop traversed in a + * depth first search starting from initial state. The goal of the loop + * tracking algorithm is to associate topmost loop entries with states + * derived from these entries. + * + * For each step in the DFS states traversal algorithm needs to identify + * the following situations: + * + * initial initial initial + * | | | + * V V V + * ... ... .---------> hdr + * | | | | + * V V | V + * cur .-> succ | .------... + * | | | | | | + * V | V | V V + * succ '-- cur | ... ... + * | | | + * | V V + * | succ <- cur + * | | + * | V + * | ... + * | | + * '----' + * + * (A) successor state of cur (B) successor state of cur or it's entry + * not yet traversed are in current DFS path, thus cur and succ + * are members of the same outermost loop + * + * initial initial + * | | + * V V + * ... ... + * | | + * V V + * .------... .------... + * | | | | + * V V V V + * .-> hdr ... ... ... + * | | | | | + * | V V V V + * | succ <- cur succ <- cur + * | | | + * | V V + * | ... ... + * | | | + * '----' exit + * + * (C) successor state of cur is a part of some loop but this loop + * does not include cur or successor state is not in a loop at all. + * + * Algorithm could be described as the following python code: + * + * traversed = set() # Set of traversed nodes + * entries = {} # Mapping from node to loop entry + * depths = {} # Depth level assigned to graph node + * path = set() # Current DFS path + * + * # Find outermost loop entry known for n + * def get_loop_entry(n): + * h = entries.get(n, None) + * while h in entries and entries[h] != h: + * h = entries[h] + * return h + * + * # Update n's loop entry if h's outermost entry comes + * # before n's outermost entry in current DFS path. + * def update_loop_entry(n, h): + * n1 = get_loop_entry(n) or n + * h1 = get_loop_entry(h) or h + * if h1 in path and depths[h1] <= depths[n1]: + * entries[n] = h1 + * + * def dfs(n, depth): + * traversed.add(n) + * path.add(n) + * depths[n] = depth + * for succ in G.successors(n): + * if succ not in traversed: + * # Case A: explore succ and update cur's loop entry + * # only if succ's entry is in current DFS path. + * dfs(succ, depth + 1) + * h = get_loop_entry(succ) + * update_loop_entry(n, h) + * else: + * # Case B or C depending on `h1 in path` check in update_loop_entry(). + * update_loop_entry(n, succ) + * path.remove(n) + * + * To adapt this algorithm for use with verifier: + * - use st->branch == 0 as a signal that DFS of succ had been finished + * and cur's loop entry has to be updated (case A), handle this in + * update_branch_counts(); + * - use st->branch > 0 as a signal that st is in the current DFS path; + * - handle cases B and C in is_state_visited(); + * - update topmost loop entry for intermediate states in get_loop_entry(). + */ +static struct bpf_verifier_state *get_loop_entry(struct bpf_verifier_state *st) +{ + struct bpf_verifier_state *topmost = st->loop_entry, *old; + + while (topmost && topmost->loop_entry && topmost != topmost->loop_entry) + topmost = topmost->loop_entry; + /* Update loop entries for intermediate states to avoid this + * traversal in future get_loop_entry() calls. + */ + while (st && st->loop_entry != topmost) { + old = st->loop_entry; + st->loop_entry = topmost; + st = old; + } + return topmost; +} + +static void update_loop_entry(struct bpf_verifier_state *cur, struct bpf_verifier_state *hdr) +{ + struct bpf_verifier_state *cur1, *hdr1; + + cur1 = get_loop_entry(cur) ?: cur; + hdr1 = get_loop_entry(hdr) ?: hdr; + /* The head1->branches check decides between cases B and C in + * comment for get_loop_entry(). If hdr1->branches == 0 then + * head's topmost loop entry is not in current DFS path, + * hence 'cur' and 'hdr' are not in the same loop and there is + * no need to update cur->loop_entry. + */ + if (hdr1->branches && hdr1->dfs_depth <= cur1->dfs_depth) { + cur->loop_entry = hdr; + hdr->used_as_loop_entry = true; + } +} + static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifier_state *st) { while (st) { u32 br = --st->branches; + /* br == 0 signals that DFS exploration for 'st' is finished, + * thus it is necessary to update parent's loop entry if it + * turned out that st is a part of some loop. + * This is a part of 'case A' in get_loop_entry() comment. + */ + if (br == 0 && st->parent && st->loop_entry) + update_loop_entry(st->parent, st->loop_entry); + /* WARN_ON(br > 1) technically makes sense here, * but see comment in push_stack(), hence: */ @@ -16650,10 +16816,11 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) { struct bpf_verifier_state_list *new_sl; struct bpf_verifier_state_list *sl, **pprev; - struct bpf_verifier_state *cur = env->cur_state, *new; + struct bpf_verifier_state *cur = env->cur_state, *new, *loop_entry; int i, j, n, err, states_cnt = 0; bool force_new_state = env->test_state_freq || is_force_checkpoint(env, insn_idx); bool add_new_state = force_new_state; + bool force_exact; /* bpf progs typically have pruning point every 4 instructions * http://vger.kernel.org/bpfconf2019.html#session-1 @@ -16748,8 +16915,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) */ spi = __get_spi(iter_reg->off + iter_reg->var_off.value); iter_state = &func(env, iter_reg)->stack[spi].spilled_ptr; - if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) + if (iter_state->iter.state == BPF_ITER_STATE_ACTIVE) { + update_loop_entry(cur, &sl->state); goto hit; + } } goto skip_inf_loop_check; } @@ -16780,7 +16949,36 @@ skip_inf_loop_check: add_new_state = false; goto miss; } - if (states_equal(env, &sl->state, cur, false)) { + /* If sl->state is a part of a loop and this loop's entry is a part of + * current verification path then states have to be compared exactly. + * 'force_exact' is needed to catch the following case: + * + * initial Here state 'succ' was processed first, + * | it was eventually tracked to produce a + * V state identical to 'hdr'. + * .---------> hdr All branches from 'succ' had been explored + * | | and thus 'succ' has its .branches == 0. + * | V + * | .------... Suppose states 'cur' and 'succ' correspond + * | | | to the same instruction + callsites. + * | V V In such case it is necessary to check + * | ... ... if 'succ' and 'cur' are states_equal(). + * | | | If 'succ' and 'cur' are a part of the + * | V V same loop exact flag has to be set. + * | succ <- cur To check if that is the case, verify + * | | if loop entry of 'succ' is in current + * | V DFS path. + * | ... + * | | + * '----' + * + * Additional details are in the comment before get_loop_entry(). + */ + loop_entry = get_loop_entry(&sl->state); + force_exact = loop_entry && loop_entry->branches > 0; + if (states_equal(env, &sl->state, cur, force_exact)) { + if (force_exact) + update_loop_entry(cur, loop_entry); hit: sl->hit_cnt++; /* reached equivalent register/stack state, @@ -16829,7 +17027,8 @@ miss: * speed up verification */ *pprev = sl->next; - if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE) { + if (sl->state.frame[0]->regs[0].live & REG_LIVE_DONE && + !sl->state.used_as_loop_entry) { u32 br = sl->state.branches; WARN_ONCE(br, From 64870feebecb7130291a55caf0ce839a87405a70 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:16 +0300 Subject: [PATCH 33/51] selftests/bpf: test if state loops are detected in a tricky case A convoluted test case for iterators convergence logic that demonstrates that states with branch count equal to 0 might still be a part of not completely explored loop. E.g. consider the following state diagram: initial Here state 'succ' was processed first, | it was eventually tracked to produce a V state identical to 'hdr'. .---------> hdr All branches from 'succ' had been explored | | and thus 'succ' has its .branches == 0. | V | .------... Suppose states 'cur' and 'succ' correspond | | | to the same instruction + callsites. | V V In such case it is necessary to check | ... ... whether 'succ' and 'cur' are identical. | | | If 'succ' and 'cur' are a part of the same loop | V V they have to be compared exactly. | succ <- cur | | | V | ... | | '----' Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-7-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- tools/testing/selftests/bpf/progs/iters.c | 177 ++++++++++++++++++++++ 1 file changed, 177 insertions(+) diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index 764a68420c3e..c20c4e38b71c 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -998,6 +998,183 @@ __naked int loop_state_deps1(void) ); } +SEC("?raw_tp") +__failure +__msg("math between fp pointer and register with unbounded") +__flag(BPF_F_TEST_STATE_FREQ) +__naked int loop_state_deps2(void) +{ + /* This is equivalent to C program below. + * + * The case turns out to be tricky in a sense that: + * - states with read+precise mark on c are explored only on a second + * iteration of the first inner loop and in a state which is pushed to + * states stack first. + * - states with c=-25 are explored only on a second iteration of the + * second inner loop and in a state which is pushed to states stack + * first. + * + * Depending on the details of iterator convergence logic + * verifier might stop states traversal too early and miss + * unsafe c=-25 memory access. + * + * j = iter_new(); // fp[-16] + * a = 0; // r6 + * b = 0; // r7 + * c = -24; // r8 + * while (iter_next(j)) { + * i = iter_new(); // fp[-8] + * a = 0; // r6 + * b = 0; // r7 + * while (iter_next(i)) { + * if (a == 1) { + * a = 0; + * b = 1; + * } else if (a == 0) { + * a = 1; + * if (random() == 42) + * continue; + * if (b == 1) { + * *(r10 + c) = 7; // this is not safe + * iter_destroy(i); + * iter_destroy(j); + * return; + * } + * } + * } + * iter_destroy(i); + * i = iter_new(); // fp[-8] + * a = 0; // r6 + * b = 0; // r7 + * while (iter_next(i)) { + * if (a == 1) { + * a = 0; + * b = 1; + * } else if (a == 0) { + * a = 1; + * if (random() == 42) + * continue; + * if (b == 1) { + * a = 0; + * c = -25; + * } + * } + * } + * iter_destroy(i); + * } + * iter_destroy(j); + * return; + */ + asm volatile ( + "r1 = r10;" + "r1 += -16;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r6 = 0;" + "r7 = 0;" + "r8 = -24;" + "j_loop_%=:" + "r1 = r10;" + "r1 += -16;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto j_loop_end_%=;" + + /* first inner loop */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r6 = 0;" + "r7 = 0;" + "i_loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto i_loop_end_%=;" + "check_one_r6_%=:" + "if r6 != 1 goto check_zero_r6_%=;" + "r6 = 0;" + "r7 = 1;" + "goto i_loop_%=;" + "check_zero_r6_%=:" + "if r6 != 0 goto i_loop_%=;" + "r6 = 1;" + "call %[bpf_get_prandom_u32];" + "if r0 != 42 goto check_one_r7_%=;" + "goto i_loop_%=;" + "check_one_r7_%=:" + "if r7 != 1 goto i_loop_%=;" + "r0 = r10;" + "r0 += r8;" + "r1 = 7;" + "*(u64 *)(r0 + 0) = r1;" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + "r1 = r10;" + "r1 += -16;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + "i_loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + + /* second inner loop */ + "r1 = r10;" + "r1 += -8;" + "r2 = 0;" + "r3 = 10;" + "call %[bpf_iter_num_new];" + "r6 = 0;" + "r7 = 0;" + "i2_loop_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_next];" + "if r0 == 0 goto i2_loop_end_%=;" + "check2_one_r6_%=:" + "if r6 != 1 goto check2_zero_r6_%=;" + "r6 = 0;" + "r7 = 1;" + "goto i2_loop_%=;" + "check2_zero_r6_%=:" + "if r6 != 0 goto i2_loop_%=;" + "r6 = 1;" + "call %[bpf_get_prandom_u32];" + "if r0 != 42 goto check2_one_r7_%=;" + "goto i2_loop_%=;" + "check2_one_r7_%=:" + "if r7 != 1 goto i2_loop_%=;" + "r6 = 0;" + "r8 = -25;" + "goto i2_loop_%=;" + "i2_loop_end_%=:" + "r1 = r10;" + "r1 += -8;" + "call %[bpf_iter_num_destroy];" + + "r6 = 0;" + "r7 = 0;" + "goto j_loop_%=;" + "j_loop_end_%=:" + "r1 = r10;" + "r1 += -16;" + "call %[bpf_iter_num_destroy];" + "r0 = 0;" + "exit;" + : + : __imm(bpf_get_prandom_u32), + __imm(bpf_iter_num_new), + __imm(bpf_iter_num_next), + __imm(bpf_iter_num_destroy) + : __clobber_all + ); +} + SEC("?raw_tp") __success __naked int triple_continue(void) From b4d8239534fddc036abe4a0fdbf474d9894d4641 Mon Sep 17 00:00:00 2001 From: Eduard Zingerman Date: Tue, 24 Oct 2023 03:09:17 +0300 Subject: [PATCH 34/51] bpf: print full verifier states on infinite loop detection Additional logging in is_state_visited(): if infinite loop is detected print full verifier state for both current and equivalent states. Acked-by: Andrii Nakryiko Signed-off-by: Eduard Zingerman Link: https://lore.kernel.org/r/20231024000917.12153-8-eddyz87@gmail.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/verifier.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index f23fbfe82c59..98f9d0f35931 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -16928,6 +16928,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) !iter_active_depths_differ(&sl->state, cur)) { verbose_linfo(env, insn_idx, "; "); verbose(env, "infinite loop detected at insn %d\n", insn_idx); + verbose(env, "cur state:"); + print_verifier_state(env, cur->frame[cur->curframe], true); + verbose(env, "old state:"); + print_verifier_state(env, sl->state.frame[cur->curframe], true); return -EINVAL; } /* if the verifier is processing a loop, avoid adding new state From 99b29a499b5fdfb7ab274835b8e4d4c11df2f6d7 Mon Sep 17 00:00:00 2001 From: Albert Huang Date: Mon, 23 Oct 2023 20:57:31 +0800 Subject: [PATCH 35/51] xsk: Avoid starving the xsk further down the list In the previous implementation, when multiple xsk sockets were associated with a single xsk_buff_pool, a situation could arise where the xsk_tx_list maintained data at the front for one xsk socket while starving the xsk sockets at the back of the list. This could result in issues such as the inability to transmit packets, increased latency, and jitter. To address this problem, we introduce a new variable called tx_budget_spent, which limits each xsk to transmit a maximum of MAX_PER_SOCKET_BUDGET tx descriptors. This allocation ensures equitable opportunities for subsequent xsk sockets to send tx descriptors. The value of MAX_PER_SOCKET_BUDGET is set to 32. Signed-off-by: Albert Huang Signed-off-by: Daniel Borkmann Acked-by: Magnus Karlsson Link: https://lore.kernel.org/bpf/20231023125732.82261-1-huangjie.albert@bytedance.com --- include/net/xdp_sock.h | 7 +++++++ net/xdp/xsk.c | 18 ++++++++++++++++++ 2 files changed, 25 insertions(+) diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 7dd0df2f6f8e..f83128007fb0 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -63,6 +63,13 @@ struct xdp_sock { struct xsk_queue *tx ____cacheline_aligned_in_smp; struct list_head tx_list; + /* record the number of tx descriptors sent by this xsk and + * when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs + * to be given to other xsks for sending tx descriptors, thereby + * preventing other XSKs from being starved. + */ + u32 tx_budget_spent; + /* Protects generic receive. */ spinlock_t rx_lock; diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index ba070fd37d24..ae9f8cb611f6 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -33,6 +33,7 @@ #include "xsk.h" #define TX_BATCH_SIZE 32 +#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); @@ -423,16 +424,25 @@ EXPORT_SYMBOL(xsk_tx_release); bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) { + bool budget_exhausted = false; struct xdp_sock *xs; rcu_read_lock(); +again: list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { + if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) { + budget_exhausted = true; + continue; + } + if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { if (xskq_has_descs(xs->tx)) xskq_cons_release(xs->tx); continue; } + xs->tx_budget_spent++; + /* This is the backpressure mechanism for the Tx path. * Reserve space in the completion queue and only proceed * if there is space in it. This avoids having to implement @@ -446,6 +456,14 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) return true; } + if (budget_exhausted) { + list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) + xs->tx_budget_spent = 0; + + budget_exhausted = false; + goto again; + } + out: rcu_read_unlock(); return false; From d35381aa73f7e1e8b25f3ed5283287a64d9ddff5 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Wed, 11 Oct 2023 22:57:41 -0700 Subject: [PATCH 36/51] bpf: Fix unnecessary -EBUSY from htab_lock_bucket htab_lock_bucket uses the following logic to avoid recursion: 1. preempt_disable(); 2. check percpu counter htab->map_locked[hash] for recursion; 2.1. if map_lock[hash] is already taken, return -BUSY; 3. raw_spin_lock_irqsave(); However, if an IRQ hits between 2 and 3, BPF programs attached to the IRQ logic will not able to access the same hash of the hashtab and get -EBUSY. This -EBUSY is not really necessary. Fix it by disabling IRQ before checking map_locked: 1. preempt_disable(); 2. local_irq_save(); 3. check percpu counter htab->map_locked[hash] for recursion; 3.1. if map_lock[hash] is already taken, return -BUSY; 4. raw_spin_lock(). Similarly, use raw_spin_unlock() and local_irq_restore() in htab_unlock_bucket(). Fixes: 20b6cc34ea74 ("bpf: Avoid hashtab deadlock with map_locked") Suggested-by: Tejun Heo Signed-off-by: Song Liu Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/7a9576222aa40b1c84ad3a9ba3e64011d1a04d41.camel@linux.ibm.com Link: https://lore.kernel.org/bpf/20231012055741.3375999-1-song@kernel.org --- kernel/bpf/hashtab.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index a8c7e1c5abfa..fd8d4b0addfc 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -155,13 +155,15 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab, hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); preempt_disable(); + local_irq_save(flags); if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) { __this_cpu_dec(*(htab->map_locked[hash])); + local_irq_restore(flags); preempt_enable(); return -EBUSY; } - raw_spin_lock_irqsave(&b->raw_lock, flags); + raw_spin_lock(&b->raw_lock); *pflags = flags; return 0; @@ -172,8 +174,9 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab, unsigned long flags) { hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1); - raw_spin_unlock_irqrestore(&b->raw_lock, flags); + raw_spin_unlock(&b->raw_lock); __this_cpu_dec(*(htab->map_locked[hash])); + local_irq_restore(flags); preempt_enable(); } From 06646da01458682023321bdc7553b8140e95d077 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Wed, 18 Oct 2023 15:28:32 -0700 Subject: [PATCH 37/51] bpf: Fold smp_mb__before_atomic() into atomic_set_release() The bpf_user_ringbuf_drain() BPF_CALL function uses an atomic_set() immediately preceded by smp_mb__before_atomic() so as to order storing of ring-buffer consumer and producer positions prior to the atomic_set() call's clearing of the ->busy flag, as follows: smp_mb__before_atomic(); atomic_set(&rb->busy, 0); Although this works given current architectures and implementations, and given that this only needs to order prior writes against a later write. However, it does so by accident because the smp_mb__before_atomic() is only guaranteed to work with read-modify-write atomic operations, and not at all with things like atomic_set() and atomic_read(). Note especially that smp_mb__before_atomic() will not, repeat *not*, order the prior write to "a" before the subsequent non-read-modify-write atomic read from "b", even on strongly ordered systems such as x86: WRITE_ONCE(a, 1); smp_mb__before_atomic(); r1 = atomic_read(&b); Therefore, replace the smp_mb__before_atomic() and atomic_set() with atomic_set_release() as follows: atomic_set_release(&rb->busy, 0); This is no slower (and sometimes is faster) than the original, and also provides a formal guarantee of ordering that the original lacks. Signed-off-by: Paul E. McKenney Signed-off-by: Daniel Borkmann Acked-by: David Vernet Link: https://lore.kernel.org/bpf/ec86d38e-cfb4-44aa-8fdb-6c925922d93c@paulmck-laptop --- kernel/bpf/ringbuf.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/bpf/ringbuf.c b/kernel/bpf/ringbuf.c index f045fde632e5..0ee653a936ea 100644 --- a/kernel/bpf/ringbuf.c +++ b/kernel/bpf/ringbuf.c @@ -770,8 +770,7 @@ schedule_work_return: /* Prevent the clearing of the busy-bit from being reordered before the * storing of any rb consumer or producer positions. */ - smp_mb__before_atomic(); - atomic_set(&rb->busy, 0); + atomic_set_release(&rb->busy, 0); if (flags & BPF_RB_FORCE_WAKEUP) irq_work_queue(&rb->work); From 42d31dd601fa43b9afdf069d1ba410b2306a4c76 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Sun, 22 Oct 2023 13:57:37 -0700 Subject: [PATCH 38/51] bpf: Improve JEQ/JNE branch taken logic When determining if an if/else branch will always or never be taken, use signed range knowledge in addition to currently used unsigned range knowledge. If either signed or unsigned range suggests that condition is always/never taken, return corresponding branch_taken verdict. Current use of unsigned range for this seems arbitrary and unnecessarily incomplete. It is possible for *signed* operations to be performed on register, which could "invalidate" unsigned range for that register. In such case branch_taken will be artificially useless, even if we can still tell that some constant is outside of register value range based on its signed bounds. veristat-based validation shows zero differences across selftests, Cilium, and Meta-internal BPF object files. Signed-off-by: Andrii Nakryiko Signed-off-by: Daniel Borkmann Acked-by: Shung-Hsi Yu Link: https://lore.kernel.org/bpf/20231022205743.72352-2-andrii@kernel.org --- kernel/bpf/verifier.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 98f9d0f35931..857d76694517 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -14031,12 +14031,16 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode) return !!tnum_equals_const(subreg, val); else if (val < reg->u32_min_value || val > reg->u32_max_value) return 0; + else if (sval < reg->s32_min_value || sval > reg->s32_max_value) + return 0; break; case BPF_JNE: if (tnum_is_const(subreg)) return !tnum_equals_const(subreg, val); else if (val < reg->u32_min_value || val > reg->u32_max_value) return 1; + else if (sval < reg->s32_min_value || sval > reg->s32_max_value) + return 1; break; case BPF_JSET: if ((~subreg.mask & subreg.value) & val) @@ -14108,12 +14112,16 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode) return !!tnum_equals_const(reg->var_off, val); else if (val < reg->umin_value || val > reg->umax_value) return 0; + else if (sval < reg->smin_value || sval > reg->smax_value) + return 0; break; case BPF_JNE: if (tnum_is_const(reg->var_off)) return !tnum_equals_const(reg->var_off, val); else if (val < reg->umin_value || val > reg->umax_value) return 1; + else if (sval < reg->smin_value || sval > reg->smax_value) + return 1; break; case BPF_JSET: if ((~reg->var_off.mask & reg->var_off.value) & val) From 35dfaad7188cdc043fde31709c796f5a692ba2bd Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:48:58 +0200 Subject: [PATCH 39/51] netkit, bpf: Add bpf programmable net device MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This work adds a new, minimal BPF-programmable device called "netkit" (former PoC code-name "meta") we recently presented at LSF/MM/BPF. The core idea is that BPF programs are executed within the drivers xmit routine and therefore e.g. in case of containers/Pods moving BPF processing closer to the source. One of the goals was that in case of Pod egress traffic, this allows to move BPF programs from hostns tcx ingress into the device itself, providing earlier drop or forward mechanisms, for example, if the BPF program determines that the skb must be sent out of the node, then a redirect to the physical device can take place directly without going through per-CPU backlog queue. This helps to shift processing for such traffic from softirq to process context, leading to better scheduling decisions/performance (see measurements in the slides). In this initial version, the netkit device ships as a pair, but we plan to extend this further so it can also operate in single device mode. The pair comes with a primary and a peer device. Only the primary device, typically residing in hostns, can manage BPF programs for itself and its peer. The peer device is designated for containers/Pods and cannot attach/detach BPF programs. Upon the device creation, the user can set the default policy to 'pass' or 'drop' for the case when no BPF program is attached. Additionally, the device can be operated in L3 (default) or L2 mode. The management of BPF programs is done via bpf_mprog, so that multi-attach is supported right from the beginning with similar API and dependency controls as tcx. For details on the latter see commit 053c8e1f235d ("bpf: Add generic attach/detach/query API for multi-progs"). tc BPF compatibility is provided, so that existing programs can be easily migrated. Going forward, we plan to use netkit devices in Cilium as the main device type for connecting Pods. They will be operated in L3 mode in order to simplify a Pod's neighbor management and the peer will operate in default drop mode, so that no traffic is leaving between the time when a Pod is brought up by the CNI plugin and programs attached by the agent. Additionally, the programs we attach via tcx on the physical devices are using bpf_redirect_peer() for inbound traffic into netkit device, hence the latter is also supporting the ndo_get_peer_dev callback. Similarly, we use bpf_redirect_neigh() for the way out, pushing from netkit peer to phys device directly. Also, BIG TCP is supported on netkit device. For the follow-up work in single device mode, we plan to convert Cilium's cilium_host/_net devices into a single one. An extensive test suite for checking device operations and the BPF program and link management API comes as BPF selftests in this series. Co-developed-by: Nikolay Aleksandrov Signed-off-by: Nikolay Aleksandrov Signed-off-by: Daniel Borkmann Reviewed-by: Toke Høiland-Jørgensen Acked-by: Stanislav Fomichev Acked-by: Martin KaFai Lau Link: https://github.com/borkmann/iproute2/tree/pr/netkit Link: http://vger.kernel.org/bpfconf2023_material/tcx_meta_netdev_borkmann.pdf (24ff.) Link: https://lore.kernel.org/r/20231024214904.29825-2-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- MAINTAINERS | 9 + drivers/net/Kconfig | 9 + drivers/net/Makefile | 1 + drivers/net/netkit.c | 940 +++++++++++++++++++++++++++++++++ include/net/netkit.h | 38 ++ include/uapi/linux/bpf.h | 14 + include/uapi/linux/if_link.h | 24 + kernel/bpf/syscall.c | 30 +- tools/include/uapi/linux/bpf.h | 14 + 9 files changed, 1074 insertions(+), 5 deletions(-) create mode 100644 drivers/net/netkit.c create mode 100644 include/net/netkit.h diff --git a/MAINTAINERS b/MAINTAINERS index ed33b9df8b3d..43be6197e655 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3795,6 +3795,15 @@ L: bpf@vger.kernel.org S: Odd Fixes K: (?:\b|_)bpf(?:\b|_) +BPF [NETKIT] (BPF-programmable network device) +M: Daniel Borkmann +M: Nikolay Aleksandrov +L: bpf@vger.kernel.org +L: netdev@vger.kernel.org +S: Supported +F: drivers/net/netkit.c +F: include/net/netkit.h + BPF [NETWORKING] (struct_ops, reuseport) M: Martin KaFai Lau L: bpf@vger.kernel.org diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 44eeb5d61ba9..af0da4bb429b 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -448,6 +448,15 @@ config NLMON diagnostics, etc. This is mostly intended for developers or support to debug netlink issues. If unsure, say N. +config NETKIT + bool "BPF-programmable network device" + depends on BPF_SYSCALL + help + The netkit device is a virtual networking device where BPF programs + can be attached to the device(s) transmission routine in order to + implement the driver's internal logic. The device can be configured + to operate in L3 or L2 mode. If unsure, say N. + config NET_VRF tristate "Virtual Routing and Forwarding (Lite)" depends on IP_MULTIPLE_TABLES diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 8a83db32509d..7cab36f94782 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO) += mdio.o obj-$(CONFIG_NET) += loopback.o obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o obj-$(CONFIG_NETCONSOLE) += netconsole.o +obj-$(CONFIG_NETKIT) += netkit.o obj-y += phy/ obj-y += pse-pd/ obj-y += mdio/ diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c new file mode 100644 index 000000000000..7e484f9fd3ae --- /dev/null +++ b/drivers/net/netkit.c @@ -0,0 +1,940 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Isovalent */ + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define DRV_NAME "netkit" + +struct netkit { + /* Needed in fast-path */ + struct net_device __rcu *peer; + struct bpf_mprog_entry __rcu *active; + enum netkit_action policy; + struct bpf_mprog_bundle bundle; + + /* Needed in slow-path */ + enum netkit_mode mode; + bool primary; + u32 headroom; +}; + +struct netkit_link { + struct bpf_link link; + struct net_device *dev; + u32 location; +}; + +static __always_inline int +netkit_run(const struct bpf_mprog_entry *entry, struct sk_buff *skb, + enum netkit_action ret) +{ + const struct bpf_mprog_fp *fp; + const struct bpf_prog *prog; + + bpf_mprog_foreach_prog(entry, fp, prog) { + bpf_compute_data_pointers(skb); + ret = bpf_prog_run(prog, skb); + if (ret != NETKIT_NEXT) + break; + } + return ret; +} + +static void netkit_prep_forward(struct sk_buff *skb, bool xnet) +{ + skb_scrub_packet(skb, xnet); + skb->priority = 0; + nf_skip_egress(skb, true); +} + +static struct netkit *netkit_priv(const struct net_device *dev) +{ + return netdev_priv(dev); +} + +static netdev_tx_t netkit_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + enum netkit_action ret = READ_ONCE(nk->policy); + netdev_tx_t ret_dev = NET_XMIT_SUCCESS; + const struct bpf_mprog_entry *entry; + struct net_device *peer; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (unlikely(!peer || !(peer->flags & IFF_UP) || + !pskb_may_pull(skb, ETH_HLEN) || + skb_orphan_frags(skb, GFP_ATOMIC))) + goto drop; + netkit_prep_forward(skb, !net_eq(dev_net(dev), dev_net(peer))); + skb->dev = peer; + entry = rcu_dereference(nk->active); + if (entry) + ret = netkit_run(entry, skb, ret); + switch (ret) { + case NETKIT_NEXT: + case NETKIT_PASS: + skb->protocol = eth_type_trans(skb, skb->dev); + skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); + __netif_rx(skb); + break; + case NETKIT_REDIRECT: + skb_do_redirect(skb); + break; + case NETKIT_DROP: + default: +drop: + kfree_skb(skb); + dev_core_stats_tx_dropped_inc(dev); + ret_dev = NET_XMIT_DROP; + break; + } + rcu_read_unlock(); + return ret_dev; +} + +static int netkit_open(struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + if (!peer) + return -ENOTCONN; + if (peer->flags & IFF_UP) { + netif_carrier_on(dev); + netif_carrier_on(peer); + } + return 0; +} + +static int netkit_close(struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + netif_carrier_off(dev); + if (peer) + netif_carrier_off(peer); + return 0; +} + +static int netkit_get_iflink(const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer; + int iflink = 0; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (peer) + iflink = peer->ifindex; + rcu_read_unlock(); + return iflink; +} + +static void netkit_set_multicast(struct net_device *dev) +{ + /* Nothing to do, we receive whatever gets pushed to us! */ +} + +static void netkit_set_headroom(struct net_device *dev, int headroom) +{ + struct netkit *nk = netkit_priv(dev), *nk2; + struct net_device *peer; + + if (headroom < 0) + headroom = NET_SKB_PAD; + + rcu_read_lock(); + peer = rcu_dereference(nk->peer); + if (unlikely(!peer)) + goto out; + + nk2 = netkit_priv(peer); + nk->headroom = headroom; + headroom = max(nk->headroom, nk2->headroom); + + peer->needed_headroom = headroom; + dev->needed_headroom = headroom; +out: + rcu_read_unlock(); +} + +static struct net_device *netkit_peer_dev(struct net_device *dev) +{ + return rcu_dereference(netkit_priv(dev)->peer); +} + +static void netkit_uninit(struct net_device *dev); + +static const struct net_device_ops netkit_netdev_ops = { + .ndo_open = netkit_open, + .ndo_stop = netkit_close, + .ndo_start_xmit = netkit_xmit, + .ndo_set_rx_mode = netkit_set_multicast, + .ndo_set_rx_headroom = netkit_set_headroom, + .ndo_get_iflink = netkit_get_iflink, + .ndo_get_peer_dev = netkit_peer_dev, + .ndo_uninit = netkit_uninit, + .ndo_features_check = passthru_features_check, +}; + +static void netkit_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, DRV_NAME, sizeof(info->driver)); +} + +static const struct ethtool_ops netkit_ethtool_ops = { + .get_drvinfo = netkit_get_drvinfo, +}; + +static void netkit_setup(struct net_device *dev) +{ + static const netdev_features_t netkit_features_hw_vlan = + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_STAG_TX | + NETIF_F_HW_VLAN_STAG_RX; + static const netdev_features_t netkit_features = + netkit_features_hw_vlan | + NETIF_F_SG | + NETIF_F_FRAGLIST | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM | + NETIF_F_SCTP_CRC | + NETIF_F_HIGHDMA | + NETIF_F_GSO_SOFTWARE | + NETIF_F_GSO_ENCAP_ALL; + + ether_setup(dev); + dev->max_mtu = ETH_MAX_MTU; + + dev->flags |= IFF_NOARP; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + dev->priv_flags |= IFF_PHONY_HEADROOM; + dev->priv_flags |= IFF_NO_QUEUE; + + dev->ethtool_ops = &netkit_ethtool_ops; + dev->netdev_ops = &netkit_netdev_ops; + + dev->features |= netkit_features | NETIF_F_LLTX; + dev->hw_features = netkit_features; + dev->hw_enc_features = netkit_features; + dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; + dev->vlan_features = dev->features & ~netkit_features_hw_vlan; + + dev->needs_free_netdev = true; + + netif_set_tso_max_size(dev, GSO_MAX_SIZE); +} + +static struct net *netkit_get_link_net(const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + return peer ? dev_net(peer) : dev_net(dev); +} + +static int netkit_check_policy(int policy, struct nlattr *tb, + struct netlink_ext_ack *extack) +{ + switch (policy) { + case NETKIT_PASS: + case NETKIT_DROP: + return 0; + default: + NL_SET_ERR_MSG_ATTR(extack, tb, + "Provided default xmit policy not supported"); + return -EINVAL; + } +} + +static int netkit_check_mode(int mode, struct nlattr *tb, + struct netlink_ext_ack *extack) +{ + switch (mode) { + case NETKIT_L2: + case NETKIT_L3: + return 0; + default: + NL_SET_ERR_MSG_ATTR(extack, tb, + "Provided device mode can only be L2 or L3"); + return -EINVAL; + } +} + +static int netkit_validate(struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct nlattr *attr = tb[IFLA_ADDRESS]; + + if (!attr) + return 0; + NL_SET_ERR_MSG_ATTR(extack, attr, + "Setting Ethernet address is not supported"); + return -EOPNOTSUPP; +} + +static struct rtnl_link_ops netkit_link_ops; + +static int netkit_new_link(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb, *attr; + enum netkit_action default_prim = NETKIT_PASS; + enum netkit_action default_peer = NETKIT_PASS; + enum netkit_mode mode = NETKIT_L3; + unsigned char ifname_assign_type; + struct ifinfomsg *ifmp = NULL; + struct net_device *peer; + char ifname[IFNAMSIZ]; + struct netkit *nk; + struct net *net; + int err; + + if (data) { + if (data[IFLA_NETKIT_MODE]) { + attr = data[IFLA_NETKIT_MODE]; + mode = nla_get_u32(attr); + err = netkit_check_mode(mode, attr, extack); + if (err < 0) + return err; + } + if (data[IFLA_NETKIT_PEER_INFO]) { + attr = data[IFLA_NETKIT_PEER_INFO]; + ifmp = nla_data(attr); + err = rtnl_nla_parse_ifinfomsg(peer_tb, attr, extack); + if (err < 0) + return err; + err = netkit_validate(peer_tb, NULL, extack); + if (err < 0) + return err; + tbp = peer_tb; + } + if (data[IFLA_NETKIT_POLICY]) { + attr = data[IFLA_NETKIT_POLICY]; + default_prim = nla_get_u32(attr); + err = netkit_check_policy(default_prim, attr, extack); + if (err < 0) + return err; + } + if (data[IFLA_NETKIT_PEER_POLICY]) { + attr = data[IFLA_NETKIT_PEER_POLICY]; + default_peer = nla_get_u32(attr); + err = netkit_check_policy(default_peer, attr, extack); + if (err < 0) + return err; + } + } + + if (ifmp && tbp[IFLA_IFNAME]) { + nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); + ifname_assign_type = NET_NAME_USER; + } else { + strscpy(ifname, "nk%d", IFNAMSIZ); + ifname_assign_type = NET_NAME_ENUM; + } + + net = rtnl_link_get_net(src_net, tbp); + if (IS_ERR(net)) + return PTR_ERR(net); + + peer = rtnl_create_link(net, ifname, ifname_assign_type, + &netkit_link_ops, tbp, extack); + if (IS_ERR(peer)) { + put_net(net); + return PTR_ERR(peer); + } + + netif_inherit_tso_max(peer, dev); + + if (mode == NETKIT_L2) + eth_hw_addr_random(peer); + if (ifmp && dev->ifindex) + peer->ifindex = ifmp->ifi_index; + + nk = netkit_priv(peer); + nk->primary = false; + nk->policy = default_peer; + nk->mode = mode; + bpf_mprog_bundle_init(&nk->bundle); + RCU_INIT_POINTER(nk->active, NULL); + RCU_INIT_POINTER(nk->peer, NULL); + + err = register_netdevice(peer); + put_net(net); + if (err < 0) + goto err_register_peer; + netif_carrier_off(peer); + if (mode == NETKIT_L2) + dev_change_flags(peer, peer->flags & ~IFF_NOARP, NULL); + + err = rtnl_configure_link(peer, NULL, 0, NULL); + if (err < 0) + goto err_configure_peer; + + if (mode == NETKIT_L2) + eth_hw_addr_random(dev); + if (tb[IFLA_IFNAME]) + nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); + else + strscpy(dev->name, "nk%d", IFNAMSIZ); + + nk = netkit_priv(dev); + nk->primary = true; + nk->policy = default_prim; + nk->mode = mode; + bpf_mprog_bundle_init(&nk->bundle); + RCU_INIT_POINTER(nk->active, NULL); + RCU_INIT_POINTER(nk->peer, NULL); + + err = register_netdevice(dev); + if (err < 0) + goto err_configure_peer; + netif_carrier_off(dev); + if (mode == NETKIT_L2) + dev_change_flags(dev, dev->flags & ~IFF_NOARP, NULL); + + rcu_assign_pointer(netkit_priv(dev)->peer, peer); + rcu_assign_pointer(netkit_priv(peer)->peer, dev); + return 0; +err_configure_peer: + unregister_netdevice(peer); + return err; +err_register_peer: + free_netdev(peer); + return err; +} + +static struct bpf_mprog_entry *netkit_entry_fetch(struct net_device *dev, + bool bundle_fallback) +{ + struct netkit *nk = netkit_priv(dev); + struct bpf_mprog_entry *entry; + + ASSERT_RTNL(); + entry = rcu_dereference_rtnl(nk->active); + if (entry) + return entry; + if (bundle_fallback) + return &nk->bundle.a; + return NULL; +} + +static void netkit_entry_update(struct net_device *dev, + struct bpf_mprog_entry *entry) +{ + struct netkit *nk = netkit_priv(dev); + + ASSERT_RTNL(); + rcu_assign_pointer(nk->active, entry); +} + +static void netkit_entry_sync(void) +{ + synchronize_rcu(); +} + +static struct net_device *netkit_dev_fetch(struct net *net, u32 ifindex, u32 which) +{ + struct net_device *dev; + struct netkit *nk; + + ASSERT_RTNL(); + + switch (which) { + case BPF_NETKIT_PRIMARY: + case BPF_NETKIT_PEER: + break; + default: + return ERR_PTR(-EINVAL); + } + + dev = __dev_get_by_index(net, ifindex); + if (!dev) + return ERR_PTR(-ENODEV); + if (dev->netdev_ops != &netkit_netdev_ops) + return ERR_PTR(-ENXIO); + + nk = netkit_priv(dev); + if (!nk->primary) + return ERR_PTR(-EACCES); + if (which == BPF_NETKIT_PEER) { + dev = rcu_dereference_rtnl(nk->peer); + if (!dev) + return ERR_PTR(-ENODEV); + } + return dev; +} + +int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_mprog_entry *entry, *entry_new; + struct bpf_prog *replace_prog = NULL; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex, + attr->attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + entry = netkit_entry_fetch(dev, true); + if (attr->attach_flags & BPF_F_REPLACE) { + replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, + prog->type); + if (IS_ERR(replace_prog)) { + ret = PTR_ERR(replace_prog); + replace_prog = NULL; + goto out; + } + } + ret = bpf_mprog_attach(entry, &entry_new, prog, NULL, replace_prog, + attr->attach_flags, attr->relative_fd, + attr->expected_revision); + if (!ret) { + if (entry != entry_new) { + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + } + bpf_mprog_commit(entry); + } +out: + if (replace_prog) + bpf_prog_put(replace_prog); + rtnl_unlock(); + return ret; +} + +int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, attr->target_ifindex, + attr->attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_detach(entry, &entry_new, prog, NULL, attr->attach_flags, + attr->relative_fd, attr->expected_revision); + if (!ret) { + if (!bpf_mprog_total(entry_new)) + entry_new = NULL; + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + bpf_mprog_commit(entry); + } +out: + rtnl_unlock(); + return ret; +} + +int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr) +{ + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, + attr->query.target_ifindex, + attr->query.attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + ret = bpf_mprog_query(attr, uattr, netkit_entry_fetch(dev, false)); +out: + rtnl_unlock(); + return ret; +} + +static struct netkit_link *netkit_link(const struct bpf_link *link) +{ + return container_of(link, struct netkit_link, link); +} + +static int netkit_link_prog_attach(struct bpf_link *link, u32 flags, + u32 id_or_fd, u64 revision) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev = nkl->dev; + int ret; + + ASSERT_RTNL(); + entry = netkit_entry_fetch(dev, true); + ret = bpf_mprog_attach(entry, &entry_new, link->prog, link, NULL, flags, + id_or_fd, revision); + if (!ret) { + if (entry != entry_new) { + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + } + bpf_mprog_commit(entry); + } + return ret; +} + +static void netkit_link_release(struct bpf_link *link) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + dev = nkl->dev; + if (!dev) + goto out; + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_detach(entry, &entry_new, link->prog, link, 0, 0, 0); + if (!ret) { + if (!bpf_mprog_total(entry_new)) + entry_new = NULL; + netkit_entry_update(dev, entry_new); + netkit_entry_sync(); + bpf_mprog_commit(entry); + nkl->dev = NULL; + } +out: + WARN_ON_ONCE(ret); + rtnl_unlock(); +} + +static int netkit_link_update(struct bpf_link *link, struct bpf_prog *nprog, + struct bpf_prog *oprog) +{ + struct netkit_link *nkl = netkit_link(link); + struct bpf_mprog_entry *entry, *entry_new; + struct net_device *dev; + int ret = 0; + + rtnl_lock(); + dev = nkl->dev; + if (!dev) { + ret = -ENOLINK; + goto out; + } + if (oprog && link->prog != oprog) { + ret = -EPERM; + goto out; + } + oprog = link->prog; + if (oprog == nprog) { + bpf_prog_put(nprog); + goto out; + } + entry = netkit_entry_fetch(dev, false); + if (!entry) { + ret = -ENOENT; + goto out; + } + ret = bpf_mprog_attach(entry, &entry_new, nprog, link, oprog, + BPF_F_REPLACE | BPF_F_ID, + link->prog->aux->id, 0); + if (!ret) { + WARN_ON_ONCE(entry != entry_new); + oprog = xchg(&link->prog, nprog); + bpf_prog_put(oprog); + bpf_mprog_commit(entry); + } +out: + rtnl_unlock(); + return ret; +} + +static void netkit_link_dealloc(struct bpf_link *link) +{ + kfree(netkit_link(link)); +} + +static void netkit_link_fdinfo(const struct bpf_link *link, struct seq_file *seq) +{ + const struct netkit_link *nkl = netkit_link(link); + u32 ifindex = 0; + + rtnl_lock(); + if (nkl->dev) + ifindex = nkl->dev->ifindex; + rtnl_unlock(); + + seq_printf(seq, "ifindex:\t%u\n", ifindex); + seq_printf(seq, "attach_type:\t%u (%s)\n", + nkl->location, + nkl->location == BPF_NETKIT_PRIMARY ? "primary" : "peer"); +} + +static int netkit_link_fill_info(const struct bpf_link *link, + struct bpf_link_info *info) +{ + const struct netkit_link *nkl = netkit_link(link); + u32 ifindex = 0; + + rtnl_lock(); + if (nkl->dev) + ifindex = nkl->dev->ifindex; + rtnl_unlock(); + + info->netkit.ifindex = ifindex; + info->netkit.attach_type = nkl->location; + return 0; +} + +static int netkit_link_detach(struct bpf_link *link) +{ + netkit_link_release(link); + return 0; +} + +static const struct bpf_link_ops netkit_link_lops = { + .release = netkit_link_release, + .detach = netkit_link_detach, + .dealloc = netkit_link_dealloc, + .update_prog = netkit_link_update, + .show_fdinfo = netkit_link_fdinfo, + .fill_link_info = netkit_link_fill_info, +}; + +static int netkit_link_init(struct netkit_link *nkl, + struct bpf_link_primer *link_primer, + const union bpf_attr *attr, + struct net_device *dev, + struct bpf_prog *prog) +{ + bpf_link_init(&nkl->link, BPF_LINK_TYPE_NETKIT, + &netkit_link_lops, prog); + nkl->location = attr->link_create.attach_type; + nkl->dev = dev; + return bpf_link_prime(&nkl->link, link_primer); +} + +int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) +{ + struct bpf_link_primer link_primer; + struct netkit_link *nkl; + struct net_device *dev; + int ret; + + rtnl_lock(); + dev = netkit_dev_fetch(current->nsproxy->net_ns, + attr->link_create.target_ifindex, + attr->link_create.attach_type); + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + goto out; + } + nkl = kzalloc(sizeof(*nkl), GFP_KERNEL_ACCOUNT); + if (!nkl) { + ret = -ENOMEM; + goto out; + } + ret = netkit_link_init(nkl, &link_primer, attr, dev, prog); + if (ret) { + kfree(nkl); + goto out; + } + ret = netkit_link_prog_attach(&nkl->link, + attr->link_create.flags, + attr->link_create.netkit.relative_fd, + attr->link_create.netkit.expected_revision); + if (ret) { + nkl->dev = NULL; + bpf_link_cleanup(&link_primer); + goto out; + } + ret = bpf_link_settle(&link_primer); +out: + rtnl_unlock(); + return ret; +} + +static void netkit_release_all(struct net_device *dev) +{ + struct bpf_mprog_entry *entry; + struct bpf_tuple tuple = {}; + struct bpf_mprog_fp *fp; + struct bpf_mprog_cp *cp; + + entry = netkit_entry_fetch(dev, false); + if (!entry) + return; + netkit_entry_update(dev, NULL); + netkit_entry_sync(); + bpf_mprog_foreach_tuple(entry, fp, cp, tuple) { + if (tuple.link) + netkit_link(tuple.link)->dev = NULL; + else + bpf_prog_put(tuple.prog); + } +} + +static void netkit_uninit(struct net_device *dev) +{ + netkit_release_all(dev); +} + +static void netkit_del_link(struct net_device *dev, struct list_head *head) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + RCU_INIT_POINTER(nk->peer, NULL); + unregister_netdevice_queue(dev, head); + if (peer) { + nk = netkit_priv(peer); + RCU_INIT_POINTER(nk->peer, NULL); + unregister_netdevice_queue(peer, head); + } +} + +static int netkit_change_link(struct net_device *dev, struct nlattr *tb[], + struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + enum netkit_action policy; + struct nlattr *attr; + int err; + + if (!nk->primary) { + NL_SET_ERR_MSG(extack, + "netkit link settings can be changed only through the primary device"); + return -EACCES; + } + + if (data[IFLA_NETKIT_MODE]) { + NL_SET_ERR_MSG_ATTR(extack, data[IFLA_NETKIT_MODE], + "netkit link operating mode cannot be changed after device creation"); + return -EACCES; + } + + if (data[IFLA_NETKIT_POLICY]) { + attr = data[IFLA_NETKIT_POLICY]; + policy = nla_get_u32(attr); + err = netkit_check_policy(policy, attr, extack); + if (err) + return err; + WRITE_ONCE(nk->policy, policy); + } + + if (data[IFLA_NETKIT_PEER_POLICY]) { + err = -EOPNOTSUPP; + attr = data[IFLA_NETKIT_PEER_POLICY]; + policy = nla_get_u32(attr); + if (peer) + err = netkit_check_policy(policy, attr, extack); + if (err) + return err; + nk = netkit_priv(peer); + WRITE_ONCE(nk->policy, policy); + } + + return 0; +} + +static size_t netkit_get_size(const struct net_device *dev) +{ + return nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_POLICY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_PEER_POLICY */ + nla_total_size(sizeof(u8)) + /* IFLA_NETKIT_PRIMARY */ + nla_total_size(sizeof(u32)) + /* IFLA_NETKIT_MODE */ + 0; +} + +static int netkit_fill_info(struct sk_buff *skb, const struct net_device *dev) +{ + struct netkit *nk = netkit_priv(dev); + struct net_device *peer = rtnl_dereference(nk->peer); + + if (nla_put_u8(skb, IFLA_NETKIT_PRIMARY, nk->primary)) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_POLICY, nk->policy)) + return -EMSGSIZE; + if (nla_put_u32(skb, IFLA_NETKIT_MODE, nk->mode)) + return -EMSGSIZE; + + if (peer) { + nk = netkit_priv(peer); + if (nla_put_u32(skb, IFLA_NETKIT_PEER_POLICY, nk->policy)) + return -EMSGSIZE; + } + + return 0; +} + +static const struct nla_policy netkit_policy[IFLA_NETKIT_MAX + 1] = { + [IFLA_NETKIT_PEER_INFO] = { .len = sizeof(struct ifinfomsg) }, + [IFLA_NETKIT_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_MODE] = { .type = NLA_U32 }, + [IFLA_NETKIT_PEER_POLICY] = { .type = NLA_U32 }, + [IFLA_NETKIT_PRIMARY] = { .type = NLA_REJECT, + .reject_message = "Primary attribute is read-only" }, +}; + +static struct rtnl_link_ops netkit_link_ops = { + .kind = DRV_NAME, + .priv_size = sizeof(struct netkit), + .setup = netkit_setup, + .newlink = netkit_new_link, + .dellink = netkit_del_link, + .changelink = netkit_change_link, + .get_link_net = netkit_get_link_net, + .get_size = netkit_get_size, + .fill_info = netkit_fill_info, + .policy = netkit_policy, + .validate = netkit_validate, + .maxtype = IFLA_NETKIT_MAX, +}; + +static __init int netkit_init(void) +{ + BUILD_BUG_ON((int)NETKIT_NEXT != (int)TCX_NEXT || + (int)NETKIT_PASS != (int)TCX_PASS || + (int)NETKIT_DROP != (int)TCX_DROP || + (int)NETKIT_REDIRECT != (int)TCX_REDIRECT); + + return rtnl_link_register(&netkit_link_ops); +} + +static __exit void netkit_exit(void) +{ + rtnl_link_unregister(&netkit_link_ops); +} + +module_init(netkit_init); +module_exit(netkit_exit); + +MODULE_DESCRIPTION("BPF-programmable network device"); +MODULE_AUTHOR("Daniel Borkmann "); +MODULE_AUTHOR("Nikolay Aleksandrov "); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_RTNL_LINK(DRV_NAME); diff --git a/include/net/netkit.h b/include/net/netkit.h new file mode 100644 index 000000000000..0ba2e6b847ca --- /dev/null +++ b/include/net/netkit.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2023 Isovalent */ +#ifndef __NET_NETKIT_H +#define __NET_NETKIT_H + +#include + +#ifdef CONFIG_NETKIT +int netkit_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_prog_detach(const union bpf_attr *attr, struct bpf_prog *prog); +int netkit_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr); +#else +static inline int netkit_prog_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_link_attach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_prog_detach(const union bpf_attr *attr, + struct bpf_prog *prog) +{ + return -EINVAL; +} + +static inline int netkit_prog_query(const union bpf_attr *attr, + union bpf_attr __user *uattr) +{ + return -EINVAL; +} +#endif /* CONFIG_NETKIT */ +#endif /* __NET_NETKIT_H */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 7ba61b75bc0e..0f6cdf52b1da 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1052,6 +1052,8 @@ enum bpf_attach_type { BPF_CGROUP_UNIX_RECVMSG, BPF_CGROUP_UNIX_GETPEERNAME, BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_NETKIT_PRIMARY, + BPF_NETKIT_PEER, __MAX_BPF_ATTACH_TYPE }; @@ -1071,6 +1073,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, MAX_BPF_LINK_TYPE, }; @@ -1656,6 +1659,13 @@ union bpf_attr { __u32 flags; __u32 pid; } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; }; } link_create; @@ -6576,6 +6586,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; }; } __attribute__((aligned(8))); diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index fac351a93aed..a0aa05a28cf2 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -756,6 +756,30 @@ struct tunnel_msg { __u32 ifindex; }; +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ /* include statistics in the dump */ diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index a9b2cb500bf7..0ed286b8a0f0 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -35,8 +35,9 @@ #include #include #include -#include +#include +#include #include #define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \ @@ -3730,6 +3731,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type) return BPF_PROG_TYPE_LSM; case BPF_TCX_INGRESS: case BPF_TCX_EGRESS: + case BPF_NETKIT_PRIMARY: + case BPF_NETKIT_PEER: return BPF_PROG_TYPE_SCHED_CLS; default: return BPF_PROG_TYPE_UNSPEC; @@ -3781,7 +3784,9 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog, return 0; case BPF_PROG_TYPE_SCHED_CLS: if (attach_type != BPF_TCX_INGRESS && - attach_type != BPF_TCX_EGRESS) + attach_type != BPF_TCX_EGRESS && + attach_type != BPF_NETKIT_PRIMARY && + attach_type != BPF_NETKIT_PEER) return -EINVAL; return 0; default: @@ -3864,7 +3869,11 @@ static int bpf_prog_attach(const union bpf_attr *attr) ret = cgroup_bpf_prog_attach(attr, ptype, prog); break; case BPF_PROG_TYPE_SCHED_CLS: - ret = tcx_prog_attach(attr, prog); + if (attr->attach_type == BPF_TCX_INGRESS || + attr->attach_type == BPF_TCX_EGRESS) + ret = tcx_prog_attach(attr, prog); + else + ret = netkit_prog_attach(attr, prog); break; default: ret = -EINVAL; @@ -3925,7 +3934,11 @@ static int bpf_prog_detach(const union bpf_attr *attr) ret = cgroup_bpf_prog_detach(attr, ptype); break; case BPF_PROG_TYPE_SCHED_CLS: - ret = tcx_prog_detach(attr, prog); + if (attr->attach_type == BPF_TCX_INGRESS || + attr->attach_type == BPF_TCX_EGRESS) + ret = tcx_prog_detach(attr, prog); + else + ret = netkit_prog_detach(attr, prog); break; default: ret = -EINVAL; @@ -3992,6 +4005,9 @@ static int bpf_prog_query(const union bpf_attr *attr, case BPF_TCX_INGRESS: case BPF_TCX_EGRESS: return tcx_prog_query(attr, uattr); + case BPF_NETKIT_PRIMARY: + case BPF_NETKIT_PEER: + return netkit_prog_query(attr, uattr); default: return -EINVAL; } @@ -4973,7 +4989,11 @@ static int link_create(union bpf_attr *attr, bpfptr_t uattr) ret = bpf_xdp_link_attach(attr, prog); break; case BPF_PROG_TYPE_SCHED_CLS: - ret = tcx_link_attach(attr, prog); + if (attr->link_create.attach_type == BPF_TCX_INGRESS || + attr->link_create.attach_type == BPF_TCX_EGRESS) + ret = tcx_link_attach(attr, prog); + else + ret = netkit_link_attach(attr, prog); break; case BPF_PROG_TYPE_NETFILTER: ret = bpf_nf_link_attach(attr, prog); diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 7ba61b75bc0e..0f6cdf52b1da 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1052,6 +1052,8 @@ enum bpf_attach_type { BPF_CGROUP_UNIX_RECVMSG, BPF_CGROUP_UNIX_GETPEERNAME, BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_NETKIT_PRIMARY, + BPF_NETKIT_PEER, __MAX_BPF_ATTACH_TYPE }; @@ -1071,6 +1073,7 @@ enum bpf_link_type { BPF_LINK_TYPE_NETFILTER = 10, BPF_LINK_TYPE_TCX = 11, BPF_LINK_TYPE_UPROBE_MULTI = 12, + BPF_LINK_TYPE_NETKIT = 13, MAX_BPF_LINK_TYPE, }; @@ -1656,6 +1659,13 @@ union bpf_attr { __u32 flags; __u32 pid; } uprobe_multi; + struct { + union { + __u32 relative_fd; + __u32 relative_id; + }; + __u64 expected_revision; + } netkit; }; } link_create; @@ -6576,6 +6586,10 @@ struct bpf_link_info { __u32 ifindex; __u32 attach_type; } tcx; + struct { + __u32 ifindex; + __u32 attach_type; + } netkit; }; } __attribute__((aligned(8))); From 5c1b994de4be8a27afa3281be2ff58b38e8bc50c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:48:59 +0200 Subject: [PATCH 40/51] tools: Sync if_link uapi header Sync if_link uapi header to the latest version as we need the refresher in tooling for netkit device. Given it's been a while since the last sync and the diff is fairly big, it has been done as its own commit. Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-3-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- tools/include/uapi/linux/if_link.h | 141 +++++++++++++++++++++++++++++ 1 file changed, 141 insertions(+) diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 39e659c83cfd..a0aa05a28cf2 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -211,6 +211,9 @@ struct rtnl_link_stats { * @rx_nohandler: Number of packets received on the interface * but dropped by the networking stack because the device is * not designated to receive packets (e.g. backup link in a bond). + * + * @rx_otherhost_dropped: Number of packets dropped due to mismatch + * in destination MAC address. */ struct rtnl_link_stats64 { __u64 rx_packets; @@ -243,6 +246,23 @@ struct rtnl_link_stats64 { __u64 rx_compressed; __u64 tx_compressed; __u64 rx_nohandler; + + __u64 rx_otherhost_dropped; +}; + +/* Subset of link stats useful for in-HW collection. Meaning of the fields is as + * for struct rtnl_link_stats64. + */ +struct rtnl_hw_stats64 { + __u64 rx_packets; + __u64 tx_packets; + __u64 rx_bytes; + __u64 tx_bytes; + __u64 rx_errors; + __u64 tx_errors; + __u64 rx_dropped; + __u64 tx_dropped; + __u64 multicast; }; /* The struct should be in sync with struct ifmap */ @@ -350,7 +370,13 @@ enum { IFLA_GRO_MAX_SIZE, IFLA_TSO_MAX_SIZE, IFLA_TSO_MAX_SEGS, + IFLA_ALLMULTI, /* Allmulti count: > 0 means acts ALLMULTI */ + IFLA_DEVLINK_PORT, + + IFLA_GSO_IPV4_MAX_SIZE, + IFLA_GRO_IPV4_MAX_SIZE, + IFLA_DPLL_PIN, __IFLA_MAX }; @@ -539,6 +565,12 @@ enum { IFLA_BRPORT_MRP_IN_OPEN, IFLA_BRPORT_MCAST_EHT_HOSTS_LIMIT, IFLA_BRPORT_MCAST_EHT_HOSTS_CNT, + IFLA_BRPORT_LOCKED, + IFLA_BRPORT_MAB, + IFLA_BRPORT_MCAST_N_GROUPS, + IFLA_BRPORT_MCAST_MAX_GROUPS, + IFLA_BRPORT_NEIGH_VLAN_SUPPRESS, + IFLA_BRPORT_BACKUP_NHID, __IFLA_BRPORT_MAX }; #define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1) @@ -716,7 +748,79 @@ enum ipvlan_mode { #define IPVLAN_F_PRIVATE 0x01 #define IPVLAN_F_VEPA 0x02 +/* Tunnel RTM header */ +struct tunnel_msg { + __u8 family; + __u8 flags; + __u16 reserved2; + __u32 ifindex; +}; + +/* netkit section */ +enum netkit_action { + NETKIT_NEXT = -1, + NETKIT_PASS = 0, + NETKIT_DROP = 2, + NETKIT_REDIRECT = 7, +}; + +enum netkit_mode { + NETKIT_L2, + NETKIT_L3, +}; + +enum { + IFLA_NETKIT_UNSPEC, + IFLA_NETKIT_PEER_INFO, + IFLA_NETKIT_PRIMARY, + IFLA_NETKIT_POLICY, + IFLA_NETKIT_PEER_POLICY, + IFLA_NETKIT_MODE, + __IFLA_NETKIT_MAX, +}; +#define IFLA_NETKIT_MAX (__IFLA_NETKIT_MAX - 1) + /* VXLAN section */ + +/* include statistics in the dump */ +#define TUNNEL_MSG_FLAG_STATS 0x01 + +#define TUNNEL_MSG_VALID_USER_FLAGS TUNNEL_MSG_FLAG_STATS + +/* Embedded inside VXLAN_VNIFILTER_ENTRY_STATS */ +enum { + VNIFILTER_ENTRY_STATS_UNSPEC, + VNIFILTER_ENTRY_STATS_RX_BYTES, + VNIFILTER_ENTRY_STATS_RX_PKTS, + VNIFILTER_ENTRY_STATS_RX_DROPS, + VNIFILTER_ENTRY_STATS_RX_ERRORS, + VNIFILTER_ENTRY_STATS_TX_BYTES, + VNIFILTER_ENTRY_STATS_TX_PKTS, + VNIFILTER_ENTRY_STATS_TX_DROPS, + VNIFILTER_ENTRY_STATS_TX_ERRORS, + VNIFILTER_ENTRY_STATS_PAD, + __VNIFILTER_ENTRY_STATS_MAX +}; +#define VNIFILTER_ENTRY_STATS_MAX (__VNIFILTER_ENTRY_STATS_MAX - 1) + +enum { + VXLAN_VNIFILTER_ENTRY_UNSPEC, + VXLAN_VNIFILTER_ENTRY_START, + VXLAN_VNIFILTER_ENTRY_END, + VXLAN_VNIFILTER_ENTRY_GROUP, + VXLAN_VNIFILTER_ENTRY_GROUP6, + VXLAN_VNIFILTER_ENTRY_STATS, + __VXLAN_VNIFILTER_ENTRY_MAX +}; +#define VXLAN_VNIFILTER_ENTRY_MAX (__VXLAN_VNIFILTER_ENTRY_MAX - 1) + +enum { + VXLAN_VNIFILTER_UNSPEC, + VXLAN_VNIFILTER_ENTRY, + __VXLAN_VNIFILTER_MAX +}; +#define VXLAN_VNIFILTER_MAX (__VXLAN_VNIFILTER_MAX - 1) + enum { IFLA_VXLAN_UNSPEC, IFLA_VXLAN_ID, @@ -748,6 +852,8 @@ enum { IFLA_VXLAN_GPE, IFLA_VXLAN_TTL_INHERIT, IFLA_VXLAN_DF, + IFLA_VXLAN_VNIFILTER, /* only applicable with COLLECT_METADATA mode */ + IFLA_VXLAN_LOCALBYPASS, __IFLA_VXLAN_MAX }; #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1) @@ -781,6 +887,7 @@ enum { IFLA_GENEVE_LABEL, IFLA_GENEVE_TTL_INHERIT, IFLA_GENEVE_DF, + IFLA_GENEVE_INNER_PROTO_INHERIT, __IFLA_GENEVE_MAX }; #define IFLA_GENEVE_MAX (__IFLA_GENEVE_MAX - 1) @@ -826,6 +933,8 @@ enum { IFLA_GTP_FD1, IFLA_GTP_PDP_HASHSIZE, IFLA_GTP_ROLE, + IFLA_GTP_CREATE_SOCKETS, + IFLA_GTP_RESTART_COUNT, __IFLA_GTP_MAX, }; #define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1) @@ -1162,6 +1271,17 @@ enum { #define IFLA_STATS_FILTER_BIT(ATTR) (1 << (ATTR - 1)) +enum { + IFLA_STATS_GETSET_UNSPEC, + IFLA_STATS_GET_FILTERS, /* Nest of IFLA_STATS_LINK_xxx, each a u32 with + * a filter mask for the corresponding group. + */ + IFLA_STATS_SET_OFFLOAD_XSTATS_L3_STATS, /* 0 or 1 as u8 */ + __IFLA_STATS_GETSET_MAX, +}; + +#define IFLA_STATS_GETSET_MAX (__IFLA_STATS_GETSET_MAX - 1) + /* These are embedded into IFLA_STATS_LINK_XSTATS: * [IFLA_STATS_LINK_XSTATS] * -> [LINK_XSTATS_TYPE_xxx] @@ -1179,10 +1299,21 @@ enum { enum { IFLA_OFFLOAD_XSTATS_UNSPEC, IFLA_OFFLOAD_XSTATS_CPU_HIT, /* struct rtnl_link_stats64 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO, /* HW stats info. A nest */ + IFLA_OFFLOAD_XSTATS_L3_STATS, /* struct rtnl_hw_stats64 */ __IFLA_OFFLOAD_XSTATS_MAX }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +enum { + IFLA_OFFLOAD_XSTATS_HW_S_INFO_UNSPEC, + IFLA_OFFLOAD_XSTATS_HW_S_INFO_REQUEST, /* u8 */ + IFLA_OFFLOAD_XSTATS_HW_S_INFO_USED, /* u8 */ + __IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX, +}; +#define IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX \ + (__IFLA_OFFLOAD_XSTATS_HW_S_INFO_MAX - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) @@ -1281,4 +1412,14 @@ enum { #define IFLA_MCTP_MAX (__IFLA_MCTP_MAX - 1) +/* DSA section */ + +enum { + IFLA_DSA_UNSPEC, + IFLA_DSA_MASTER, + __IFLA_DSA_MAX, +}; + +#define IFLA_DSA_MAX (__IFLA_DSA_MAX - 1) + #endif /* _UAPI_LINUX_IF_LINK_H */ From 05c31b4ab20527c4d1695130aaecc54ef59a0e54 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:49:00 +0200 Subject: [PATCH 41/51] libbpf: Add link-based API for netkit This adds bpf_program__attach_netkit() API to libbpf. Overall it is very similar to tcx. The API looks as following: LIBBPF_API struct bpf_link * bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, const struct bpf_netkit_opts *opts); The struct bpf_netkit_opts is done in similar way as struct bpf_tcx_opts for supporting bpf_mprog control parameters. The attach location for the primary and peer device is derived from the program section "netkit/primary" and "netkit/peer", respectively. Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-4-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- tools/lib/bpf/bpf.c | 16 ++++++++++++++++ tools/lib/bpf/bpf.h | 5 +++++ tools/lib/bpf/libbpf.c | 39 +++++++++++++++++++++++++++++++++++++++ tools/lib/bpf/libbpf.h | 15 +++++++++++++++ tools/lib/bpf/libbpf.map | 1 + 5 files changed, 76 insertions(+) diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c index b0f1913763a3..9dc9625651dc 100644 --- a/tools/lib/bpf/bpf.c +++ b/tools/lib/bpf/bpf.c @@ -810,6 +810,22 @@ int bpf_link_create(int prog_fd, int target_fd, if (!OPTS_ZEROED(opts, tcx)) return libbpf_err(-EINVAL); break; + case BPF_NETKIT_PRIMARY: + case BPF_NETKIT_PEER: + relative_fd = OPTS_GET(opts, netkit.relative_fd, 0); + relative_id = OPTS_GET(opts, netkit.relative_id, 0); + if (relative_fd && relative_id) + return libbpf_err(-EINVAL); + if (relative_id) { + attr.link_create.netkit.relative_id = relative_id; + attr.link_create.flags |= BPF_F_ID; + } else { + attr.link_create.netkit.relative_fd = relative_fd; + } + attr.link_create.netkit.expected_revision = OPTS_GET(opts, netkit.expected_revision, 0); + if (!OPTS_ZEROED(opts, netkit)) + return libbpf_err(-EINVAL); + break; default: if (!OPTS_ZEROED(opts, flags)) return libbpf_err(-EINVAL); diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 74c2887cfd24..d0f53772bdc0 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h @@ -415,6 +415,11 @@ struct bpf_link_create_opts { __u32 relative_id; __u64 expected_revision; } tcx; + struct { + __u32 relative_fd; + __u32 relative_id; + __u64 expected_revision; + } netkit; }; size_t :0; }; diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index a295f6acf98f..e067be95da3c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -126,6 +126,8 @@ static const char * const attach_type_name[] = { [BPF_TCX_INGRESS] = "tcx_ingress", [BPF_TCX_EGRESS] = "tcx_egress", [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi", + [BPF_NETKIT_PRIMARY] = "netkit_primary", + [BPF_NETKIT_PEER] = "netkit_peer", }; static const char * const link_type_name[] = { @@ -142,6 +144,7 @@ static const char * const link_type_name[] = { [BPF_LINK_TYPE_NETFILTER] = "netfilter", [BPF_LINK_TYPE_TCX] = "tcx", [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi", + [BPF_LINK_TYPE_NETKIT] = "netkit", }; static const char * const map_type_name[] = { @@ -8915,6 +8918,8 @@ static const struct bpf_sec_def section_defs[] = { SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */ SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */ + SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE), + SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE), SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp), SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp), SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), @@ -12126,6 +12131,40 @@ bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts); } +struct bpf_link * +bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, + const struct bpf_netkit_opts *opts) +{ + LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); + __u32 relative_id; + int relative_fd; + + if (!OPTS_VALID(opts, bpf_netkit_opts)) + return libbpf_err_ptr(-EINVAL); + + relative_id = OPTS_GET(opts, relative_id, 0); + relative_fd = OPTS_GET(opts, relative_fd, 0); + + /* validate we don't have unexpected combinations of non-zero fields */ + if (!ifindex) { + pr_warn("prog '%s': target netdevice ifindex cannot be zero\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + if (relative_fd && relative_id) { + pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n", + prog->name); + return libbpf_err_ptr(-EINVAL); + } + + link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0); + link_create_opts.netkit.relative_fd = relative_fd; + link_create_opts.netkit.relative_id = relative_id; + link_create_opts.flags = OPTS_GET(opts, flags, 0); + + return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts); +} + struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, int target_fd, const char *attach_func_name) diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 475378438545..6cd9c501624f 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -800,6 +800,21 @@ LIBBPF_API struct bpf_link * bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex, const struct bpf_tcx_opts *opts); +struct bpf_netkit_opts { + /* size of this struct, for forward/backward compatibility */ + size_t sz; + __u32 flags; + __u32 relative_fd; + __u32 relative_id; + __u64 expected_revision; + size_t :0; +}; +#define bpf_netkit_opts__last_field expected_revision + +LIBBPF_API struct bpf_link * +bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex, + const struct bpf_netkit_opts *opts); + struct bpf_map; LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index cc973b678a39..b52dc28dc8af 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -398,6 +398,7 @@ LIBBPF_1.3.0 { bpf_object__unpin; bpf_prog_detach_opts; bpf_program__attach_netfilter; + bpf_program__attach_netkit; bpf_program__attach_tcx; bpf_program__attach_uprobe_multi; ring__avail_data_size; From 92a85e18ad4705c66ace55a19f4f8301ef0eb59f Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:49:01 +0200 Subject: [PATCH 42/51] bpftool: Implement link show support for netkit Add support to dump netkit link information to bpftool in similar way as we have for XDP. The netkit link info only exposes the ifindex and the attach_type. Below shows an example link dump output, and a cgroup link is included for comparison, too: # bpftool link [...] 10: cgroup prog 2466 cgroup_id 1 attach_type cgroup_inet6_post_bind [...] 8: netkit prog 35 ifindex nk1(18) attach_type netkit_primary [...] Equivalent json output: # bpftool link --json [...] { "id": 10, "type": "cgroup", "prog_id": 2466, "cgroup_id": 1, "attach_type": "cgroup_inet6_post_bind" }, [...] { "id": 12, "type": "netkit", "prog_id": 61, "devname": "nk1", "ifindex": 21, "attach_type": "netkit_primary" } [...] Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-5-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- tools/bpf/bpftool/link.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index 4b1407b05056..a1528cde81ab 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -451,6 +451,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) show_link_ifindex_json(info->tcx.ifindex, json_wtr); show_link_attach_type_json(info->tcx.attach_type, json_wtr); break; + case BPF_LINK_TYPE_NETKIT: + show_link_ifindex_json(info->netkit.ifindex, json_wtr); + show_link_attach_type_json(info->netkit.attach_type, json_wtr); + break; case BPF_LINK_TYPE_XDP: show_link_ifindex_json(info->xdp.ifindex, json_wtr); break; @@ -791,6 +795,11 @@ static int show_link_close_plain(int fd, struct bpf_link_info *info) show_link_ifindex_plain(info->tcx.ifindex); show_link_attach_type_plain(info->tcx.attach_type); break; + case BPF_LINK_TYPE_NETKIT: + printf("\n\t"); + show_link_ifindex_plain(info->netkit.ifindex); + show_link_attach_type_plain(info->netkit.attach_type); + break; case BPF_LINK_TYPE_XDP: printf("\n\t"); show_link_ifindex_plain(info->xdp.ifindex); From bec981a4add6dd6a63065e54e2b2e67c2af6c3fa Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:49:02 +0200 Subject: [PATCH 43/51] bpftool: Extend net dump with netkit progs Add support to dump BPF programs on netkit via bpftool. This includes both the BPF link and attach ops programs. Dumped information contain the attach location, function entry name, program ID and link ID when applicable. Example with tc BPF link: # ./bpftool net xdp: tc: nk1(22) netkit/peer tc1 prog_id 43 link_id 12 [...] Example with json dump: # ./bpftool net --json | jq [ { "xdp": [], "tc": [ { "devname": "nk1", "ifindex": 18, "kind": "netkit/primary", "name": "tc1", "prog_id": 29, "prog_flags": [], "link_id": 8, "link_flags": [] } ], "flow_dissector": [], "netfilter": [] } ] Signed-off-by: Daniel Borkmann Reviewed-by: Quentin Monnet Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-6-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- tools/bpf/bpftool/Documentation/bpftool-net.rst | 8 ++++---- tools/bpf/bpftool/net.c | 7 ++++++- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/tools/bpf/bpftool/Documentation/bpftool-net.rst b/tools/bpf/bpftool/Documentation/bpftool-net.rst index 5e2abd3de5ab..dd3f9469765b 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-net.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-net.rst @@ -37,7 +37,7 @@ DESCRIPTION **bpftool net { show | list }** [ **dev** *NAME* ] List bpf program attachments in the kernel networking subsystem. - Currently, device driver xdp attachments, tcx and old-style tc + Currently, device driver xdp attachments, tcx, netkit and old-style tc classifier/action attachments, flow_dissector as well as netfilter attachments are implemented, i.e., for program types **BPF_PROG_TYPE_XDP**, **BPF_PROG_TYPE_SCHED_CLS**, @@ -52,11 +52,11 @@ DESCRIPTION bpf programs, users should consult other tools, e.g., iproute2. The current output will start with all xdp program attachments, followed by - all tcx, then tc class/qdisc bpf program attachments, then flow_dissector - and finally netfilter programs. Both xdp programs and tcx/tc programs are + all tcx, netkit, then tc class/qdisc bpf program attachments, then flow_dissector + and finally netfilter programs. Both xdp programs and tcx/netkit/tc programs are ordered based on ifindex number. If multiple bpf programs attached to the same networking device through **tc**, the order will be first - all bpf programs attached to tcx, then tc classes, then all bpf programs + all bpf programs attached to tcx, netkit, then tc classes, then all bpf programs attached to non clsact qdiscs, and finally all bpf programs attached to root and clsact qdisc. diff --git a/tools/bpf/bpftool/net.c b/tools/bpf/bpftool/net.c index 66a8ce8ae012..968714b4c3d4 100644 --- a/tools/bpf/bpftool/net.c +++ b/tools/bpf/bpftool/net.c @@ -79,6 +79,8 @@ static const char * const attach_type_strings[] = { static const char * const attach_loc_strings[] = { [BPF_TCX_INGRESS] = "tcx/ingress", [BPF_TCX_EGRESS] = "tcx/egress", + [BPF_NETKIT_PRIMARY] = "netkit/primary", + [BPF_NETKIT_PEER] = "netkit/peer", }; const size_t net_attach_type_size = ARRAY_SIZE(attach_type_strings); @@ -506,6 +508,9 @@ static void show_dev_tc_bpf(struct ip_devname_ifindex *dev) { __show_dev_tc_bpf(dev, BPF_TCX_INGRESS); __show_dev_tc_bpf(dev, BPF_TCX_EGRESS); + + __show_dev_tc_bpf(dev, BPF_NETKIT_PRIMARY); + __show_dev_tc_bpf(dev, BPF_NETKIT_PEER); } static int show_dev_tc_bpf_classic(int sock, unsigned int nl_pid, @@ -926,7 +931,7 @@ static int do_help(int argc, char **argv) " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n" " " HELP_SPEC_OPTIONS " }\n" "\n" - "Note: Only xdp, tcx, tc, flow_dissector and netfilter attachments\n" + "Note: Only xdp, tcx, tc, netkit, flow_dissector and netfilter attachments\n" " are currently supported.\n" " For progs attached to cgroups, use \"bpftool cgroup\"\n" " to dump program attachments. For program types\n" From 51f1892b5289f0c09745d3bedb36493555d6d90c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:49:03 +0200 Subject: [PATCH 44/51] selftests/bpf: Add netlink helper library Add a minimal netlink helper library for the BPF selftests. This has been taken and cut down and cleaned up from iproute2. This covers basics such as netdevice creation which we need for BPF selftests / BPF CI given iproute2 package cannot cover it yet. Stanislav Fomichev suggested that this could be replaced in future by ynl tool generated C code once it has RTNL support to create devices. Once we get to this point the BPF CI would also need to add libmnl. If no further extensions are needed, a second option could be that we remove this code again once iproute2 package has support. Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-7-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/Makefile | 19 +- tools/testing/selftests/bpf/netlink_helpers.c | 358 ++++++++++++++++++ tools/testing/selftests/bpf/netlink_helpers.h | 46 +++ 3 files changed, 418 insertions(+), 5 deletions(-) create mode 100644 tools/testing/selftests/bpf/netlink_helpers.c create mode 100644 tools/testing/selftests/bpf/netlink_helpers.h diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 4225f975fce3..9c27b67bc7b1 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -585,11 +585,20 @@ endef # Define test_progs test runner. TRUNNER_TESTS_DIR := prog_tests TRUNNER_BPF_PROGS_DIR := progs -TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \ - network_helpers.c testing_helpers.c \ - btf_helpers.c flow_dissector_load.h \ - cap_helpers.c test_loader.c xsk.c disasm.c \ - json_writer.c unpriv_helpers.c \ +TRUNNER_EXTRA_SOURCES := test_progs.c \ + cgroup_helpers.c \ + trace_helpers.c \ + network_helpers.c \ + testing_helpers.c \ + btf_helpers.c \ + cap_helpers.c \ + unpriv_helpers.c \ + netlink_helpers.c \ + test_loader.c \ + xsk.c \ + disasm.c \ + json_writer.c \ + flow_dissector_load.h \ ip_check_defrag_frags.h TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \ $(OUTPUT)/liburandom_read.so \ diff --git a/tools/testing/selftests/bpf/netlink_helpers.c b/tools/testing/selftests/bpf/netlink_helpers.c new file mode 100644 index 000000000000..caf36eb1d032 --- /dev/null +++ b/tools/testing/selftests/bpf/netlink_helpers.c @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Taken & modified from iproute2's libnetlink.c + * Authors: Alexey Kuznetsov, + */ +#include +#include +#include +#include +#include +#include + +#include "netlink_helpers.h" + +static int rcvbuf = 1024 * 1024; + +void rtnl_close(struct rtnl_handle *rth) +{ + if (rth->fd >= 0) { + close(rth->fd); + rth->fd = -1; + } +} + +int rtnl_open_byproto(struct rtnl_handle *rth, unsigned int subscriptions, + int protocol) +{ + socklen_t addr_len; + int sndbuf = 32768; + int one = 1; + + memset(rth, 0, sizeof(*rth)); + rth->proto = protocol; + rth->fd = socket(AF_NETLINK, SOCK_RAW | SOCK_CLOEXEC, protocol); + if (rth->fd < 0) { + perror("Cannot open netlink socket"); + return -1; + } + if (setsockopt(rth->fd, SOL_SOCKET, SO_SNDBUF, + &sndbuf, sizeof(sndbuf)) < 0) { + perror("SO_SNDBUF"); + goto err; + } + if (setsockopt(rth->fd, SOL_SOCKET, SO_RCVBUF, + &rcvbuf, sizeof(rcvbuf)) < 0) { + perror("SO_RCVBUF"); + goto err; + } + + /* Older kernels may no support extended ACK reporting */ + setsockopt(rth->fd, SOL_NETLINK, NETLINK_EXT_ACK, + &one, sizeof(one)); + + memset(&rth->local, 0, sizeof(rth->local)); + rth->local.nl_family = AF_NETLINK; + rth->local.nl_groups = subscriptions; + + if (bind(rth->fd, (struct sockaddr *)&rth->local, + sizeof(rth->local)) < 0) { + perror("Cannot bind netlink socket"); + goto err; + } + addr_len = sizeof(rth->local); + if (getsockname(rth->fd, (struct sockaddr *)&rth->local, + &addr_len) < 0) { + perror("Cannot getsockname"); + goto err; + } + if (addr_len != sizeof(rth->local)) { + fprintf(stderr, "Wrong address length %d\n", addr_len); + goto err; + } + if (rth->local.nl_family != AF_NETLINK) { + fprintf(stderr, "Wrong address family %d\n", + rth->local.nl_family); + goto err; + } + rth->seq = time(NULL); + return 0; +err: + rtnl_close(rth); + return -1; +} + +int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions) +{ + return rtnl_open_byproto(rth, subscriptions, NETLINK_ROUTE); +} + +static int __rtnl_recvmsg(int fd, struct msghdr *msg, int flags) +{ + int len; + + do { + len = recvmsg(fd, msg, flags); + } while (len < 0 && (errno == EINTR || errno == EAGAIN)); + if (len < 0) { + fprintf(stderr, "netlink receive error %s (%d)\n", + strerror(errno), errno); + return -errno; + } + if (len == 0) { + fprintf(stderr, "EOF on netlink\n"); + return -ENODATA; + } + return len; +} + +static int rtnl_recvmsg(int fd, struct msghdr *msg, char **answer) +{ + struct iovec *iov = msg->msg_iov; + char *buf; + int len; + + iov->iov_base = NULL; + iov->iov_len = 0; + + len = __rtnl_recvmsg(fd, msg, MSG_PEEK | MSG_TRUNC); + if (len < 0) + return len; + if (len < 32768) + len = 32768; + buf = malloc(len); + if (!buf) { + fprintf(stderr, "malloc error: not enough buffer\n"); + return -ENOMEM; + } + iov->iov_base = buf; + iov->iov_len = len; + len = __rtnl_recvmsg(fd, msg, 0); + if (len < 0) { + free(buf); + return len; + } + if (answer) + *answer = buf; + else + free(buf); + return len; +} + +static void rtnl_talk_error(struct nlmsghdr *h, struct nlmsgerr *err, + nl_ext_ack_fn_t errfn) +{ + fprintf(stderr, "RTNETLINK answers: %s\n", + strerror(-err->error)); +} + +static int __rtnl_talk_iov(struct rtnl_handle *rtnl, struct iovec *iov, + size_t iovlen, struct nlmsghdr **answer, + bool show_rtnl_err, nl_ext_ack_fn_t errfn) +{ + struct sockaddr_nl nladdr = { .nl_family = AF_NETLINK }; + struct iovec riov; + struct msghdr msg = { + .msg_name = &nladdr, + .msg_namelen = sizeof(nladdr), + .msg_iov = iov, + .msg_iovlen = iovlen, + }; + unsigned int seq = 0; + struct nlmsghdr *h; + int i, status; + char *buf; + + for (i = 0; i < iovlen; i++) { + h = iov[i].iov_base; + h->nlmsg_seq = seq = ++rtnl->seq; + if (answer == NULL) + h->nlmsg_flags |= NLM_F_ACK; + } + status = sendmsg(rtnl->fd, &msg, 0); + if (status < 0) { + perror("Cannot talk to rtnetlink"); + return -1; + } + /* change msg to use the response iov */ + msg.msg_iov = &riov; + msg.msg_iovlen = 1; + i = 0; + while (1) { +next: + status = rtnl_recvmsg(rtnl->fd, &msg, &buf); + ++i; + if (status < 0) + return status; + if (msg.msg_namelen != sizeof(nladdr)) { + fprintf(stderr, + "Sender address length == %d!\n", + msg.msg_namelen); + exit(1); + } + for (h = (struct nlmsghdr *)buf; status >= sizeof(*h); ) { + int len = h->nlmsg_len; + int l = len - sizeof(*h); + + if (l < 0 || len > status) { + if (msg.msg_flags & MSG_TRUNC) { + fprintf(stderr, "Truncated message!\n"); + free(buf); + return -1; + } + fprintf(stderr, + "Malformed message: len=%d!\n", + len); + exit(1); + } + if (nladdr.nl_pid != 0 || + h->nlmsg_pid != rtnl->local.nl_pid || + h->nlmsg_seq > seq || h->nlmsg_seq < seq - iovlen) { + /* Don't forget to skip that message. */ + status -= NLMSG_ALIGN(len); + h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len)); + continue; + } + if (h->nlmsg_type == NLMSG_ERROR) { + struct nlmsgerr *err = (struct nlmsgerr *)NLMSG_DATA(h); + int error = err->error; + + if (l < sizeof(struct nlmsgerr)) { + fprintf(stderr, "ERROR truncated\n"); + free(buf); + return -1; + } + if (error) { + errno = -error; + if (rtnl->proto != NETLINK_SOCK_DIAG && + show_rtnl_err) + rtnl_talk_error(h, err, errfn); + } + if (i < iovlen) { + free(buf); + goto next; + } + if (error) { + free(buf); + return -i; + } + if (answer) + *answer = (struct nlmsghdr *)buf; + else + free(buf); + return 0; + } + if (answer) { + *answer = (struct nlmsghdr *)buf; + return 0; + } + fprintf(stderr, "Unexpected reply!\n"); + status -= NLMSG_ALIGN(len); + h = (struct nlmsghdr *)((char *)h + NLMSG_ALIGN(len)); + } + free(buf); + if (msg.msg_flags & MSG_TRUNC) { + fprintf(stderr, "Message truncated!\n"); + continue; + } + if (status) { + fprintf(stderr, "Remnant of size %d!\n", status); + exit(1); + } + } +} + +static int __rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, + struct nlmsghdr **answer, bool show_rtnl_err, + nl_ext_ack_fn_t errfn) +{ + struct iovec iov = { + .iov_base = n, + .iov_len = n->nlmsg_len, + }; + + return __rtnl_talk_iov(rtnl, &iov, 1, answer, show_rtnl_err, errfn); +} + +int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, + struct nlmsghdr **answer) +{ + return __rtnl_talk(rtnl, n, answer, true, NULL); +} + +int addattr(struct nlmsghdr *n, int maxlen, int type) +{ + return addattr_l(n, maxlen, type, NULL, 0); +} + +int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u8)); +} + +int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u16)); +} + +int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u32)); +} + +int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data) +{ + return addattr_l(n, maxlen, type, &data, sizeof(__u64)); +} + +int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *str) +{ + return addattr_l(n, maxlen, type, str, strlen(str)+1); +} + +int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, + int alen) +{ + int len = RTA_LENGTH(alen); + struct rtattr *rta; + + if (NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len) > maxlen) { + fprintf(stderr, "%s: Message exceeded bound of %d\n", + __func__, maxlen); + return -1; + } + rta = NLMSG_TAIL(n); + rta->rta_type = type; + rta->rta_len = len; + if (alen) + memcpy(RTA_DATA(rta), data, alen); + n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + RTA_ALIGN(len); + return 0; +} + +int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len) +{ + if (NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len) > maxlen) { + fprintf(stderr, "%s: Message exceeded bound of %d\n", + __func__, maxlen); + return -1; + } + + memcpy(NLMSG_TAIL(n), data, len); + memset((void *) NLMSG_TAIL(n) + len, 0, NLMSG_ALIGN(len) - len); + n->nlmsg_len = NLMSG_ALIGN(n->nlmsg_len) + NLMSG_ALIGN(len); + return 0; +} + +struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type) +{ + struct rtattr *nest = NLMSG_TAIL(n); + + addattr_l(n, maxlen, type, NULL, 0); + return nest; +} + +int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest) +{ + nest->rta_len = (void *)NLMSG_TAIL(n) - (void *)nest; + return n->nlmsg_len; +} diff --git a/tools/testing/selftests/bpf/netlink_helpers.h b/tools/testing/selftests/bpf/netlink_helpers.h new file mode 100644 index 000000000000..68116818a47e --- /dev/null +++ b/tools/testing/selftests/bpf/netlink_helpers.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef NETLINK_HELPERS_H +#define NETLINK_HELPERS_H + +#include +#include +#include + +struct rtnl_handle { + int fd; + struct sockaddr_nl local; + struct sockaddr_nl peer; + __u32 seq; + __u32 dump; + int proto; + FILE *dump_fp; +#define RTNL_HANDLE_F_LISTEN_ALL_NSID 0x01 +#define RTNL_HANDLE_F_SUPPRESS_NLERR 0x02 +#define RTNL_HANDLE_F_STRICT_CHK 0x04 + int flags; +}; + +#define NLMSG_TAIL(nmsg) \ + ((struct rtattr *) (((void *) (nmsg)) + NLMSG_ALIGN((nmsg)->nlmsg_len))) + +typedef int (*nl_ext_ack_fn_t)(const char *errmsg, uint32_t off, + const struct nlmsghdr *inner_nlh); + +int rtnl_open(struct rtnl_handle *rth, unsigned int subscriptions) + __attribute__((warn_unused_result)); +void rtnl_close(struct rtnl_handle *rth); +int rtnl_talk(struct rtnl_handle *rtnl, struct nlmsghdr *n, + struct nlmsghdr **answer) + __attribute__((warn_unused_result)); + +int addattr(struct nlmsghdr *n, int maxlen, int type); +int addattr8(struct nlmsghdr *n, int maxlen, int type, __u8 data); +int addattr16(struct nlmsghdr *n, int maxlen, int type, __u16 data); +int addattr32(struct nlmsghdr *n, int maxlen, int type, __u32 data); +int addattr64(struct nlmsghdr *n, int maxlen, int type, __u64 data); +int addattrstrz(struct nlmsghdr *n, int maxlen, int type, const char *data); +int addattr_l(struct nlmsghdr *n, int maxlen, int type, const void *data, int alen); +int addraw_l(struct nlmsghdr *n, int maxlen, const void *data, int len); +struct rtattr *addattr_nest(struct nlmsghdr *n, int maxlen, int type); +int addattr_nest_end(struct nlmsghdr *n, struct rtattr *nest); +#endif /* NETLINK_HELPERS_H */ From ace15f91e569172dac71ae0aeb3a2e76d1ce1b17 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Tue, 24 Oct 2023 23:49:04 +0200 Subject: [PATCH 45/51] selftests/bpf: Add selftests for netkit Add a bigger batch of test coverage to assert correct operation of netkit devices and their BPF program management: # ./test_progs -t tc_netkit [...] [ 1.166267] bpf_testmod: loading out-of-tree module taints kernel. [ 1.166831] bpf_testmod: module verification failed: signature and/or required key missing - tainting kernel [ 1.270957] tsc: Refined TSC clocksource calibration: 3407.988 MHz [ 1.272579] clocksource: tsc: mask: 0xffffffffffffffff max_cycles: 0x311fc932722, max_idle_ns: 440795381586 ns [ 1.275336] clocksource: Switched to clocksource tsc #257 tc_netkit_basic:OK #258 tc_netkit_device:OK #259 tc_netkit_multi_links:OK #260 tc_netkit_multi_opts:OK #261 tc_netkit_neigh_links:OK Summary: 5/0 PASSED, 0 SKIPPED, 0 FAILED [...] Signed-off-by: Daniel Borkmann Acked-by: Martin KaFai Lau Link: https://lore.kernel.org/r/20231024214904.29825-8-daniel@iogearbox.net Signed-off-by: Martin KaFai Lau --- tools/testing/selftests/bpf/config | 1 + .../selftests/bpf/prog_tests/tc_helpers.h | 4 + .../selftests/bpf/prog_tests/tc_netkit.c | 687 ++++++++++++++++++ .../selftests/bpf/progs/test_tc_link.c | 13 + 4 files changed, 705 insertions(+) create mode 100644 tools/testing/selftests/bpf/prog_tests/tc_netkit.c diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 02dd4409200e..3ec5927ec3e5 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -71,6 +71,7 @@ CONFIG_NETFILTER_SYNPROXY=y CONFIG_NETFILTER_XT_CONNMARK=y CONFIG_NETFILTER_XT_MATCH_STATE=y CONFIG_NETFILTER_XT_TARGET_CT=y +CONFIG_NETKIT=y CONFIG_NF_CONNTRACK=y CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_DEFRAG_IPV4=y diff --git a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h index 67f985f7d215..924d0e25320c 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_helpers.h +++ b/tools/testing/selftests/bpf/prog_tests/tc_helpers.h @@ -4,6 +4,10 @@ #define TC_HELPERS #include +#ifndef loopback +# define loopback 1 +#endif + static inline __u32 id_from_prog_fd(int fd) { struct bpf_prog_info prog_info = {}; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_netkit.c b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c new file mode 100644 index 000000000000..15ee7b2fc410 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/tc_netkit.c @@ -0,0 +1,687 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Isovalent */ +#include +#include +#include + +#define netkit_peer "nk0" +#define netkit_name "nk1" + +#define ping_addr_neigh 0x0a000002 /* 10.0.0.2 */ +#define ping_addr_noneigh 0x0a000003 /* 10.0.0.3 */ + +#include "test_tc_link.skel.h" +#include "netlink_helpers.h" +#include "tc_helpers.h" + +#define ICMP_ECHO 8 + +struct icmphdr { + __u8 type; + __u8 code; + __sum16 checksum; + struct { + __be16 id; + __be16 sequence; + } echo; +}; + +struct iplink_req { + struct nlmsghdr n; + struct ifinfomsg i; + char buf[1024]; +}; + +static int create_netkit(int mode, int policy, int peer_policy, int *ifindex, + bool same_netns) +{ + struct rtnl_handle rth = { .fd = -1 }; + struct iplink_req req = {}; + struct rtattr *linkinfo, *data; + const char *type = "netkit"; + int err; + + err = rtnl_open(&rth, 0); + if (!ASSERT_OK(err, "open_rtnetlink")) + return err; + + memset(&req, 0, sizeof(req)); + req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)); + req.n.nlmsg_flags = NLM_F_REQUEST | NLM_F_CREATE | NLM_F_EXCL; + req.n.nlmsg_type = RTM_NEWLINK; + req.i.ifi_family = AF_UNSPEC; + + addattr_l(&req.n, sizeof(req), IFLA_IFNAME, netkit_name, + strlen(netkit_name)); + linkinfo = addattr_nest(&req.n, sizeof(req), IFLA_LINKINFO); + addattr_l(&req.n, sizeof(req), IFLA_INFO_KIND, type, strlen(type)); + data = addattr_nest(&req.n, sizeof(req), IFLA_INFO_DATA); + addattr32(&req.n, sizeof(req), IFLA_NETKIT_POLICY, policy); + addattr32(&req.n, sizeof(req), IFLA_NETKIT_PEER_POLICY, peer_policy); + addattr32(&req.n, sizeof(req), IFLA_NETKIT_MODE, mode); + addattr_nest_end(&req.n, data); + addattr_nest_end(&req.n, linkinfo); + + err = rtnl_talk(&rth, &req.n, NULL); + ASSERT_OK(err, "talk_rtnetlink"); + rtnl_close(&rth); + *ifindex = if_nametoindex(netkit_name); + + ASSERT_GT(*ifindex, 0, "retrieve_ifindex"); + ASSERT_OK(system("ip netns add foo"), "create netns"); + ASSERT_OK(system("ip link set dev " netkit_name " up"), + "up primary"); + ASSERT_OK(system("ip addr add dev " netkit_name " 10.0.0.1/24"), + "addr primary"); + if (same_netns) { + ASSERT_OK(system("ip link set dev " netkit_peer " up"), + "up peer"); + ASSERT_OK(system("ip addr add dev " netkit_peer " 10.0.0.2/24"), + "addr peer"); + } else { + ASSERT_OK(system("ip link set " netkit_peer " netns foo"), + "move peer"); + ASSERT_OK(system("ip netns exec foo ip link set dev " + netkit_peer " up"), "up peer"); + ASSERT_OK(system("ip netns exec foo ip addr add dev " + netkit_peer " 10.0.0.2/24"), "addr peer"); + } + return err; +} + +static void destroy_netkit(void) +{ + ASSERT_OK(system("ip link del dev " netkit_name), "del primary"); + ASSERT_OK(system("ip netns del foo"), "delete netns"); + ASSERT_EQ(if_nametoindex(netkit_name), 0, netkit_name "_ifindex"); +} + +static int __send_icmp(__u32 dest) +{ + struct sockaddr_in addr; + struct icmphdr icmp; + int sock, ret; + + ret = write_sysctl("/proc/sys/net/ipv4/ping_group_range", "0 0"); + if (!ASSERT_OK(ret, "write_sysctl(net.ipv4.ping_group_range)")) + return ret; + + sock = socket(AF_INET, SOCK_DGRAM, IPPROTO_ICMP); + if (!ASSERT_GE(sock, 0, "icmp_socket")) + return -errno; + + ret = setsockopt(sock, SOL_SOCKET, SO_BINDTODEVICE, + netkit_name, strlen(netkit_name) + 1); + if (!ASSERT_OK(ret, "setsockopt(SO_BINDTODEVICE)")) + goto out; + + memset(&addr, 0, sizeof(addr)); + addr.sin_family = AF_INET; + addr.sin_addr.s_addr = htonl(dest); + + memset(&icmp, 0, sizeof(icmp)); + icmp.type = ICMP_ECHO; + icmp.echo.id = 1234; + icmp.echo.sequence = 1; + + ret = sendto(sock, &icmp, sizeof(icmp), 0, + (struct sockaddr *)&addr, sizeof(addr)); + if (!ASSERT_GE(ret, 0, "icmp_sendto")) + ret = -errno; + else + ret = 0; +out: + close(sock); + return ret; +} + +static int send_icmp(void) +{ + return __send_icmp(ping_addr_neigh); +} + +void serial_test_tc_netkit_basic(void) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_netkit_opts, optl); + __u32 prog_ids[2], link_ids[2]; + __u32 pid1, pid2, lid1, lid2; + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex; + + err = create_netkit(NETKIT_L2, NETKIT_PASS, NETKIT_PASS, + &ifindex, false); + if (err) + return; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, + BPF_NETKIT_PRIMARY), 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, + BPF_NETKIT_PEER), 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PRIMARY, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + link = bpf_program__attach_netkit(skel->progs.tc2, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + ASSERT_NEQ(lid1, lid2, "link_ids_1_2"); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 1); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PEER, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + destroy_netkit(); +} + +static void serial_test_tc_netkit_multi_links_target(int mode, int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_netkit_opts, optl); + __u32 prog_ids[3], link_ids[3]; + __u32 pid1, pid2, lid1, lid2; + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex; + + err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, + &ifindex, false); + if (err) + return; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, + target), 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, + target), 0, "tc2_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count_ifindex(ifindex, target, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count_ifindex(ifindex, target, 1); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + LIBBPF_OPTS_RESET(optl, + .flags = BPF_F_BEFORE, + .relative_fd = bpf_program__fd(skel->progs.tc1), + ); + + link = bpf_program__attach_netkit(skel->progs.tc2, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc2 = link; + + lid2 = id_from_link_fd(bpf_link__fd(skel->links.tc2)); + ASSERT_NEQ(lid1, lid2, "link_ids_1_2"); + + assert_mprog_count_ifindex(ifindex, target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid2, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], lid1, "link_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + ASSERT_EQ(optq.link_ids[2], 0, "link_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count_ifindex(ifindex, target, 0); + destroy_netkit(); +} + +void serial_test_tc_netkit_multi_links(void) +{ + serial_test_tc_netkit_multi_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY); + serial_test_tc_netkit_multi_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY); + serial_test_tc_netkit_multi_links_target(NETKIT_L2, BPF_NETKIT_PEER); + serial_test_tc_netkit_multi_links_target(NETKIT_L3, BPF_NETKIT_PEER); +} + +static void serial_test_tc_netkit_multi_opts_target(int mode, int target) +{ + LIBBPF_OPTS(bpf_prog_attach_opts, opta); + LIBBPF_OPTS(bpf_prog_detach_opts, optd); + LIBBPF_OPTS(bpf_prog_query_opts, optq); + __u32 pid1, pid2, fd1, fd2; + __u32 prog_ids[3]; + struct test_tc_link *skel; + int err, ifindex; + + err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, + &ifindex, false); + if (err) + return; + + skel = test_tc_link__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_load")) + goto cleanup; + + fd1 = bpf_program__fd(skel->progs.tc1); + fd2 = bpf_program__fd(skel->progs.tc2); + + pid1 = id_from_prog_fd(fd1); + pid2 = id_from_prog_fd(fd2); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count_ifindex(ifindex, target, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + err = bpf_prog_attach_opts(fd1, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup; + + assert_mprog_count_ifindex(ifindex, target, 1); + + optq.prog_ids = prog_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_fd1; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + LIBBPF_OPTS_RESET(opta, + .flags = BPF_F_BEFORE, + .relative_fd = fd1, + ); + + err = bpf_prog_attach_opts(fd2, ifindex, target, &opta); + if (!ASSERT_EQ(err, 0, "prog_attach")) + goto cleanup_fd1; + + assert_mprog_count_ifindex(ifindex, target, 2); + + memset(prog_ids, 0, sizeof(prog_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup_fd2; + + ASSERT_EQ(optq.count, 2, "count"); + ASSERT_EQ(optq.revision, 3, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid2, "prog_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], pid1, "prog_ids[1]"); + ASSERT_EQ(optq.prog_ids[2], 0, "prog_ids[2]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, true, "seen_eth"); + ASSERT_EQ(skel->bss->seen_tc2, true, "seen_tc2"); + +cleanup_fd2: + err = bpf_prog_detach_opts(fd2, ifindex, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count_ifindex(ifindex, target, 1); +cleanup_fd1: + err = bpf_prog_detach_opts(fd1, ifindex, target, &optd); + ASSERT_OK(err, "prog_detach"); + assert_mprog_count_ifindex(ifindex, target, 0); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count_ifindex(ifindex, target, 0); + destroy_netkit(); +} + +void serial_test_tc_netkit_multi_opts(void) +{ + serial_test_tc_netkit_multi_opts_target(NETKIT_L2, BPF_NETKIT_PRIMARY); + serial_test_tc_netkit_multi_opts_target(NETKIT_L3, BPF_NETKIT_PRIMARY); + serial_test_tc_netkit_multi_opts_target(NETKIT_L2, BPF_NETKIT_PEER); + serial_test_tc_netkit_multi_opts_target(NETKIT_L3, BPF_NETKIT_PEER); +} + +void serial_test_tc_netkit_device(void) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_netkit_opts, optl); + __u32 prog_ids[2], link_ids[2]; + __u32 pid1, pid2, lid1; + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex, ifindex2; + + err = create_netkit(NETKIT_L3, NETKIT_PASS, NETKIT_PASS, + &ifindex, true); + if (err) + return; + + ifindex2 = if_nametoindex(netkit_peer); + ASSERT_NEQ(ifindex, ifindex2, "ifindex_1_2"); + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, + BPF_NETKIT_PRIMARY), 0, "tc1_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc2, + BPF_NETKIT_PEER), 0, "tc2_attach_type"); + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc3, + BPF_NETKIT_PRIMARY), 0, "tc3_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + pid2 = id_from_prog_fd(bpf_program__fd(skel->progs.tc2)); + + ASSERT_NEQ(pid1, pid2, "prog_ids_1_2"); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, BPF_NETKIT_PRIMARY, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(send_icmp(), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_tc2, false, "seen_tc2"); + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex2, BPF_NETKIT_PRIMARY, &optq); + ASSERT_EQ(err, -EACCES, "prog_query_should_fail"); + + err = bpf_prog_query_opts(ifindex2, BPF_NETKIT_PEER, &optq); + ASSERT_EQ(err, -EACCES, "prog_query_should_fail"); + + link = bpf_program__attach_netkit(skel->progs.tc2, ifindex2, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + link = bpf_program__attach_netkit(skel->progs.tc3, ifindex2, &optl); + if (!ASSERT_ERR_PTR(link, "link_attach_should_fail")) { + bpf_link__destroy(link); + goto cleanup; + } + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 1); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PRIMARY, 0); + assert_mprog_count_ifindex(ifindex, BPF_NETKIT_PEER, 0); + destroy_netkit(); +} + +static void serial_test_tc_netkit_neigh_links_target(int mode, int target) +{ + LIBBPF_OPTS(bpf_prog_query_opts, optq); + LIBBPF_OPTS(bpf_netkit_opts, optl); + __u32 prog_ids[2], link_ids[2]; + __u32 pid1, lid1; + struct test_tc_link *skel; + struct bpf_link *link; + int err, ifindex; + + err = create_netkit(mode, NETKIT_PASS, NETKIT_PASS, + &ifindex, false); + if (err) + return; + + skel = test_tc_link__open(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + goto cleanup; + + ASSERT_EQ(bpf_program__set_expected_attach_type(skel->progs.tc1, + BPF_NETKIT_PRIMARY), 0, "tc1_attach_type"); + + err = test_tc_link__load(skel); + if (!ASSERT_OK(err, "skel_load")) + goto cleanup; + + pid1 = id_from_prog_fd(bpf_program__fd(skel->progs.tc1)); + + assert_mprog_count_ifindex(ifindex, target, 0); + + ASSERT_EQ(skel->bss->seen_tc1, false, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, false, "seen_eth"); + + link = bpf_program__attach_netkit(skel->progs.tc1, ifindex, &optl); + if (!ASSERT_OK_PTR(link, "link_attach")) + goto cleanup; + + skel->links.tc1 = link; + + lid1 = id_from_link_fd(bpf_link__fd(skel->links.tc1)); + + assert_mprog_count_ifindex(ifindex, target, 1); + + optq.prog_ids = prog_ids; + optq.link_ids = link_ids; + + memset(prog_ids, 0, sizeof(prog_ids)); + memset(link_ids, 0, sizeof(link_ids)); + optq.count = ARRAY_SIZE(prog_ids); + + err = bpf_prog_query_opts(ifindex, target, &optq); + if (!ASSERT_OK(err, "prog_query")) + goto cleanup; + + ASSERT_EQ(optq.count, 1, "count"); + ASSERT_EQ(optq.revision, 2, "revision"); + ASSERT_EQ(optq.prog_ids[0], pid1, "prog_ids[0]"); + ASSERT_EQ(optq.link_ids[0], lid1, "link_ids[0]"); + ASSERT_EQ(optq.prog_ids[1], 0, "prog_ids[1]"); + ASSERT_EQ(optq.link_ids[1], 0, "link_ids[1]"); + + tc_skel_reset_all_seen(skel); + ASSERT_EQ(__send_icmp(ping_addr_noneigh), 0, "icmp_pkt"); + + ASSERT_EQ(skel->bss->seen_tc1, true /* L2: ARP */, "seen_tc1"); + ASSERT_EQ(skel->bss->seen_eth, mode == NETKIT_L3, "seen_eth"); +cleanup: + test_tc_link__destroy(skel); + + assert_mprog_count_ifindex(ifindex, target, 0); + destroy_netkit(); +} + +void serial_test_tc_netkit_neigh_links(void) +{ + serial_test_tc_netkit_neigh_links_target(NETKIT_L2, BPF_NETKIT_PRIMARY); + serial_test_tc_netkit_neigh_links_target(NETKIT_L3, BPF_NETKIT_PRIMARY); +} diff --git a/tools/testing/selftests/bpf/progs/test_tc_link.c b/tools/testing/selftests/bpf/progs/test_tc_link.c index 30e7124c49a1..992400acb957 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_link.c +++ b/tools/testing/selftests/bpf/progs/test_tc_link.c @@ -1,7 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Isovalent */ #include + #include +#include + +#include #include char LICENSE[] SEC("license") = "GPL"; @@ -12,10 +16,19 @@ bool seen_tc3; bool seen_tc4; bool seen_tc5; bool seen_tc6; +bool seen_eth; SEC("tc/ingress") int tc1(struct __sk_buff *skb) { + struct ethhdr eth = {}; + + if (skb->protocol != __bpf_constant_htons(ETH_P_IP)) + goto out; + if (bpf_skb_load_bytes(skb, 0, ð, sizeof(eth))) + goto out; + seen_eth = eth.h_proto == bpf_htons(ETH_P_IP); +out: seen_tc1 = true; return TCX_NEXT; } From c421c12586b3f00fb96b5c9af15c9a051a9090b1 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Sat, 21 Oct 2023 09:49:59 +0800 Subject: [PATCH 46/51] bpf: Add more WARN_ON_ONCE checks for mismatched alloc and free There are two possible mismatched alloc and free cases in BPF memory allocator: 1) allocate from cache X but free by cache Y with a different unit_size 2) allocate from per-cpu cache but free by kmalloc cache or vice versa So add more WARN_ON_ONCE checks in free_bulk() and __free_by_rcu() to spot these mismatched alloc and free early. Signed-off-by: Hou Tao Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20231021014959.3563841-1-houtao@huaweicloud.com --- kernel/bpf/memalloc.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 5308e386380a..63b909d277d4 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -340,6 +340,7 @@ static void free_bulk(struct bpf_mem_cache *c) int cnt; WARN_ON_ONCE(tgt->unit_size != c->unit_size); + WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); do { inc_active(c, &flags); @@ -365,6 +366,9 @@ static void __free_by_rcu(struct rcu_head *head) struct bpf_mem_cache *tgt = c->tgt; struct llist_node *llnode; + WARN_ON_ONCE(tgt->unit_size != c->unit_size); + WARN_ON_ONCE(tgt->percpu_size != c->percpu_size); + llnode = llist_del_all(&c->waiting_for_gp); if (!llnode) goto out; From 870f09f1ba3014e2c157b14299c172b4bb716638 Mon Sep 17 00:00:00 2001 From: Viktor Malik Date: Wed, 25 Oct 2023 08:19:12 +0200 Subject: [PATCH 47/51] samples/bpf: Allow building with custom CFLAGS/LDFLAGS Currently, it is not possible to specify custom flags when building samples/bpf. The flags are defined in TPROGS_CFLAGS/TPROGS_LDFLAGS variables, however, when trying to override those from the make command, compilation fails. For example, when trying to build with PIE: $ make -C samples/bpf TPROGS_CFLAGS="-fpie" TPROGS_LDFLAGS="-pie" This is because samples/bpf/Makefile updates these variables, especially appends include paths to TPROGS_CFLAGS and these updates are overridden by setting the variables from the make command. This patch introduces variables TPROGS_USER_CFLAGS/TPROGS_USER_LDFLAGS for this purpose, which can be set from the make command and their values are propagated to TPROGS_CFLAGS/TPROGS_LDFLAGS. Signed-off-by: Viktor Malik Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/2d81100b830a71f0e72329cc7781edaefab75f62.1698213811.git.vmalik@redhat.com --- samples/bpf/Makefile | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 90af76fa9dd8..5a9805edec93 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -150,6 +150,9 @@ always-y += ibumad_kern.o always-y += hbm_out_kern.o always-y += hbm_edt_kern.o +TPROGS_CFLAGS = $(TPROGS_USER_CFLAGS) +TPROGS_LDFLAGS = $(TPROGS_USER_LDFLAGS) + ifeq ($(ARCH), arm) # Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux # headers when arm instruction set identification is requested. @@ -316,7 +319,7 @@ XDP_SAMPLE_CFLAGS += -Wall -O2 \ -I$(LIBBPF_INCLUDE) \ -I$(src)/../../tools/testing/selftests/bpf -$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS) +$(obj)/$(XDP_SAMPLE): TPROGS_CFLAGS = $(XDP_SAMPLE_CFLAGS) $(TPROGS_USER_CFLAGS) $(obj)/$(XDP_SAMPLE): $(src)/xdp_sample_user.h $(src)/xdp_sample_shared.h # Override includes for trace_helpers.o because __must_check won't be defined # in our include path. From f56bcfadf7d6d56b099726df4fc262b76486b0e0 Mon Sep 17 00:00:00 2001 From: Viktor Malik Date: Wed, 25 Oct 2023 08:19:13 +0200 Subject: [PATCH 48/51] samples/bpf: Fix passing LDFLAGS to libbpf samples/bpf/Makefile passes LDFLAGS=$(TPROGS_LDFLAGS) to libbpf build without surrounding quotes, which may cause compilation errors when passing custom TPROGS_USER_LDFLAGS. For example: $ make -C samples/bpf/ TPROGS_USER_LDFLAGS="-Wl,--as-needed -specs=/usr/lib/gcc/x86_64-redhat-linux/13/libsanitizer.spec" make: Entering directory './samples/bpf' make -C ../../ M=./samples/bpf BPF_SAMPLES_PATH=./samples/bpf make[1]: Entering directory '.' make -C ./samples/bpf/../../tools/lib/bpf RM='rm -rf' EXTRA_CFLAGS="-Wall -O2 -Wmissing-prototypes -Wstrict-prototypes -I./usr/include -I./tools/testing/selftests/bpf/ -I./samples/bpf/libbpf/include -I./tools/include -I./tools/perf -I./tools/lib -DHAVE_ATTR_TEST=0" \ LDFLAGS=-Wl,--as-needed -specs=/usr/lib/gcc/x86_64-redhat-linux/13/libsanitizer.spec srctree=./samples/bpf/../../ \ O= OUTPUT=./samples/bpf/libbpf/ DESTDIR=./samples/bpf/libbpf prefix= \ ./samples/bpf/libbpf/libbpf.a install_headers make: invalid option -- 'c' make: invalid option -- '=' make: invalid option -- '/' make: invalid option -- 'u' make: invalid option -- '/' [...] Fix the error by properly quoting $(TPROGS_LDFLAGS). Suggested-by: Donald Zickus Signed-off-by: Viktor Malik Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/c690de6671cc6c983d32a566d33fd7eabd18b526.1698213811.git.vmalik@redhat.com --- samples/bpf/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 5a9805edec93..378b9f3e9321 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -254,7 +254,7 @@ clean: $(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OUTPUT) # Fix up variables inherited from Kbuild that tools/ build system won't like $(MAKE) -C $(LIBBPF_SRC) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \ - LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ \ + LDFLAGS="$(TPROGS_LDFLAGS)" srctree=$(BPF_SAMPLES_PATH)/../../ \ O= OUTPUT=$(LIBBPF_OUTPUT)/ DESTDIR=$(LIBBPF_DESTDIR) prefix= \ $@ install_headers From 37db10bc247d5d0b448babd7ff386f092246e732 Mon Sep 17 00:00:00 2001 From: Viktor Malik Date: Wed, 25 Oct 2023 08:19:14 +0200 Subject: [PATCH 49/51] samples/bpf: Allow building with custom bpftool samples/bpf build its own bpftool boostrap to generate vmlinux.h as well as some BPF objects. This is a redundant step if bpftool has been already built, so update samples/bpf/Makefile such that it accepts a path to bpftool passed via the BPFTOOL variable. The approach is practically the same as tools/testing/selftests/bpf/Makefile uses. Signed-off-by: Viktor Malik Signed-off-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/bd746954ac271b02468d8d951ff9f11e655d485b.1698213811.git.vmalik@redhat.com --- samples/bpf/Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 378b9f3e9321..933f6c3fe6b0 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile @@ -260,8 +260,9 @@ $(LIBBPF): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(LIBBPF_OU BPFTOOLDIR := $(TOOLS_PATH)/bpf/bpftool BPFTOOL_OUTPUT := $(abspath $(BPF_SAMPLES_PATH))/bpftool -BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool -$(BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT) +DEFAULT_BPFTOOL := $(BPFTOOL_OUTPUT)/bootstrap/bpftool +BPFTOOL ?= $(DEFAULT_BPFTOOL) +$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) | $(BPFTOOL_OUTPUT) $(MAKE) -C $(BPFTOOLDIR) srctree=$(BPF_SAMPLES_PATH)/../../ \ OUTPUT=$(BPFTOOL_OUTPUT)/ bootstrap From 399f6185a1c02f39bcadb8749bc2d9d48685816f Mon Sep 17 00:00:00 2001 From: Yafang Shao Date: Wed, 25 Oct 2023 03:11:44 +0000 Subject: [PATCH 50/51] selftests/bpf: Fix selftests broken by mitigations=off When we configure the kernel command line with 'mitigations=off' and set the sysctl knob 'kernel.unprivileged_bpf_disabled' to 0, the commit bc5bc309db45 ("bpf: Inherit system settings for CPU security mitigations") causes issues in the execution of `test_progs -t verifier`. This is because 'mitigations=off' bypasses Spectre v1 and Spectre v4 protections. Currently, when a program requests to run in unprivileged mode (kernel.unprivileged_bpf_disabled = 0), the BPF verifier may prevent it from running due to the following conditions not being enabled: - bypass_spec_v1 - bypass_spec_v4 - allow_ptr_leaks - allow_uninit_stack While 'mitigations=off' enables the first two conditions, it does not enable the latter two. As a result, some test cases in 'test_progs -t verifier' that were expected to fail to run may run successfully, while others still fail but with different error messages. This makes it challenging to address them comprehensively. Moreover, in the future, we may introduce more fine-grained control over CPU mitigations, such as enabling only bypass_spec_v1 or bypass_spec_v4. Given the complexity of the situation, rather than fixing each broken test case individually, it's preferable to skip them when 'mitigations=off' is in effect and introduce specific test cases for the new 'mitigations=off' scenario. For instance, we can introduce new BTF declaration tags like '__failure__nospec', '__failure_nospecv1' and '__failure_nospecv4'. In this patch, the approach is to simply skip the broken test cases when 'mitigations=off' is enabled. The result of `test_progs -t verifier` as follows after this commit, Before this commit ================== - without 'mitigations=off' - kernel.unprivileged_bpf_disabled = 2 Summary: 74/948 PASSED, 388 SKIPPED, 0 FAILED - kernel.unprivileged_bpf_disabled = 0 Summary: 74/1336 PASSED, 0 SKIPPED, 0 FAILED <<<< - with 'mitigations=off' - kernel.unprivileged_bpf_disabled = 2 Summary: 74/948 PASSED, 388 SKIPPED, 0 FAILED - kernel.unprivileged_bpf_disabled = 0 Summary: 63/1276 PASSED, 0 SKIPPED, 11 FAILED <<<< 11 FAILED After this commit ================= - without 'mitigations=off' - kernel.unprivileged_bpf_disabled = 2 Summary: 74/948 PASSED, 388 SKIPPED, 0 FAILED - kernel.unprivileged_bpf_disabled = 0 Summary: 74/1336 PASSED, 0 SKIPPED, 0 FAILED <<<< - with this patch, with 'mitigations=off' - kernel.unprivileged_bpf_disabled = 2 Summary: 74/948 PASSED, 388 SKIPPED, 0 FAILED - kernel.unprivileged_bpf_disabled = 0 Summary: 74/948 PASSED, 388 SKIPPED, 0 FAILED <<<< SKIPPED Fixes: bc5bc309db45 ("bpf: Inherit system settings for CPU security mitigations") Reported-by: Alexei Starovoitov Signed-off-by: Yafang Shao Signed-off-by: Daniel Borkmann Acked-by: Yonghong Song Closes: https://lore.kernel.org/bpf/CAADnVQKUBJqg+hHtbLeeC2jhoJAWqnmRAzXW3hmUCNSV9kx4sQ@mail.gmail.com Link: https://lore.kernel.org/bpf/20231025031144.5508-1-laoar.shao@gmail.com --- tools/testing/selftests/bpf/unpriv_helpers.c | 33 +++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/unpriv_helpers.c b/tools/testing/selftests/bpf/unpriv_helpers.c index 2a6efbd0401e..b6d016461fb0 100644 --- a/tools/testing/selftests/bpf/unpriv_helpers.c +++ b/tools/testing/selftests/bpf/unpriv_helpers.c @@ -4,9 +4,40 @@ #include #include #include +#include +#include +#include #include "unpriv_helpers.h" +static bool get_mitigations_off(void) +{ + char cmdline[4096], *c; + int fd, ret = false; + + fd = open("/proc/cmdline", O_RDONLY); + if (fd < 0) { + perror("open /proc/cmdline"); + return false; + } + + if (read(fd, cmdline, sizeof(cmdline) - 1) < 0) { + perror("read /proc/cmdline"); + goto out; + } + + cmdline[sizeof(cmdline) - 1] = '\0'; + for (c = strtok(cmdline, " \n"); c; c = strtok(NULL, " \n")) { + if (strncmp(c, "mitigations=off", strlen(c))) + continue; + ret = true; + break; + } +out: + close(fd); + return ret; +} + bool get_unpriv_disabled(void) { bool disabled; @@ -22,5 +53,5 @@ bool get_unpriv_disabled(void) disabled = true; } - return disabled; + return disabled ? true : get_mitigations_off(); } From ea41b880cc85f0a992571f66e4554a69f7806246 Mon Sep 17 00:00:00 2001 From: Nikolay Aleksandrov Date: Thu, 26 Oct 2023 12:41:05 +0300 Subject: [PATCH 51/51] netkit: Remove explicit active/peer ptr initialization Remove the explicit NULLing of active/peer pointers and rely on the implicit one done at net device allocation. Suggested-by: Jiri Pirko Signed-off-by: Nikolay Aleksandrov Signed-off-by: Daniel Borkmann Reviewed-by: Jiri Pirko Acked-by: Daniel Borkmann Link: https://lore.kernel.org/bpf/20231026094106.1505892-2-razor@blackwall.org --- drivers/net/netkit.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/net/netkit.c b/drivers/net/netkit.c index 7e484f9fd3ae..5a0f86f38f09 100644 --- a/drivers/net/netkit.c +++ b/drivers/net/netkit.c @@ -371,8 +371,6 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, nk->policy = default_peer; nk->mode = mode; bpf_mprog_bundle_init(&nk->bundle); - RCU_INIT_POINTER(nk->active, NULL); - RCU_INIT_POINTER(nk->peer, NULL); err = register_netdevice(peer); put_net(net); @@ -398,8 +396,6 @@ static int netkit_new_link(struct net *src_net, struct net_device *dev, nk->policy = default_prim; nk->mode = mode; bpf_mprog_bundle_init(&nk->bundle); - RCU_INIT_POINTER(nk->active, NULL); - RCU_INIT_POINTER(nk->peer, NULL); err = register_netdevice(dev); if (err < 0)