Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2021-05-19

The following pull-request contains BPF updates for your *net-next* tree.

We've added 43 non-merge commits during the last 11 day(s) which contain
a total of 74 files changed, 3717 insertions(+), 578 deletions(-).

The main changes are:

1) syscall program type, fd array, and light skeleton, from Alexei.

2) Stop emitting static variables in skeleton, from Andrii.

3) Low level tc-bpf api, from Kumar.

4) Reduce verifier kmalloc/kfree churn, from Lorenz.
====================
This commit is contained in:
David S. Miller 2021-05-19 12:58:29 -07:00
commit 7b16509b29
74 changed files with 3724 additions and 585 deletions

View file

@ -178,9 +178,6 @@ static bool is_addsub_imm(u32 imm)
return !(imm & ~0xfff) || !(imm & ~0xfff000);
}
/* Stack must be multiples of 16B */
#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
/* Tail call offset to jump into */
#if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
#define PROLOGUE_OFFSET 8
@ -255,7 +252,8 @@ static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
emit(A64_BTI_J, ctx);
}
ctx->stack_size = STACK_ALIGN(prog->aux->stack_depth);
/* Stack must be multiples of 16B */
ctx->stack_size = round_up(prog->aux->stack_depth, 16);
/* Set up function call stack */
emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
@ -487,17 +485,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
break;
case BPF_ALU | BPF_DIV | BPF_X:
case BPF_ALU64 | BPF_DIV | BPF_X:
emit(A64_UDIV(is64, dst, dst, src), ctx);
break;
case BPF_ALU | BPF_MOD | BPF_X:
case BPF_ALU64 | BPF_MOD | BPF_X:
switch (BPF_OP(code)) {
case BPF_DIV:
emit(A64_UDIV(is64, dst, dst, src), ctx);
break;
case BPF_MOD:
emit(A64_UDIV(is64, tmp, dst, src), ctx);
emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
break;
}
emit(A64_UDIV(is64, tmp, dst, src), ctx);
emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
break;
case BPF_ALU | BPF_LSH | BPF_X:
case BPF_ALU64 | BPF_LSH | BPF_X:

View file

@ -22,6 +22,7 @@
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#include <linux/bpfptr.h>
struct bpf_verifier_env;
struct bpf_verifier_log;
@ -1428,7 +1429,7 @@ struct bpf_iter__bpf_map_elem {
int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info);
void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info);
bool bpf_iter_prog_supported(struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog);
int bpf_iter_new_fd(struct bpf_link *link);
bool bpf_link_is_iter(struct bpf_link *link);
struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop);
@ -1459,7 +1460,7 @@ int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value);
int bpf_get_file_flag(int flags);
int bpf_check_uarg_tail_zero(void __user *uaddr, size_t expected_size,
int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
@ -1479,8 +1480,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
}
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr,
union bpf_attr __user *uattr);
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr);
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
@ -1826,6 +1826,9 @@ static inline bool bpf_map_is_dev_bound(struct bpf_map *map)
struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr);
void bpf_map_offload_map_free(struct bpf_map *map);
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
#else
static inline int bpf_prog_offload_init(struct bpf_prog *prog,
union bpf_attr *attr)
@ -1851,6 +1854,13 @@ static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr)
static inline void bpf_map_offload_map_free(struct bpf_map *map)
{
}
static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
return -ENOTSUPP;
}
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
@ -1964,6 +1974,7 @@ extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
const struct bpf_func_proto *bpf_tracing_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);

View file

@ -77,6 +77,8 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_LSM, lsm,
void *, void *)
#endif /* CONFIG_BPF_LSM */
#endif
BPF_PROG_TYPE(BPF_PROG_TYPE_SYSCALL, bpf_syscall,
void *, void *)
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)

View file

@ -215,6 +215,13 @@ struct bpf_idx_pair {
u32 idx;
};
struct bpf_id_pair {
u32 old;
u32 cur;
};
/* Maximum number of register states that can exist at once */
#define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
#define MAX_CALL_FRAMES 8
struct bpf_verifier_state {
/* call stack tracking */
@ -418,6 +425,7 @@ struct bpf_verifier_env {
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE];
struct {
int *insn_state;
int *insn_stack;
@ -442,6 +450,7 @@ struct bpf_verifier_env {
u32 peak_states;
/* longest register parentage chain walked for liveness marking */
u32 longest_mark_read_walk;
bpfptr_t fd_array;
};
__printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,

75
include/linux/bpfptr.h Normal file
View file

@ -0,0 +1,75 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* A pointer that can point to either kernel or userspace memory. */
#ifndef _LINUX_BPFPTR_H
#define _LINUX_BPFPTR_H
#include <linux/sockptr.h>
typedef sockptr_t bpfptr_t;
static inline bool bpfptr_is_kernel(bpfptr_t bpfptr)
{
return bpfptr.is_kernel;
}
static inline bpfptr_t KERNEL_BPFPTR(void *p)
{
return (bpfptr_t) { .kernel = p, .is_kernel = true };
}
static inline bpfptr_t USER_BPFPTR(void __user *p)
{
return (bpfptr_t) { .user = p };
}
static inline bpfptr_t make_bpfptr(u64 addr, bool is_kernel)
{
if (is_kernel)
return KERNEL_BPFPTR((void*) (uintptr_t) addr);
else
return USER_BPFPTR(u64_to_user_ptr(addr));
}
static inline bool bpfptr_is_null(bpfptr_t bpfptr)
{
if (bpfptr_is_kernel(bpfptr))
return !bpfptr.kernel;
return !bpfptr.user;
}
static inline void bpfptr_add(bpfptr_t *bpfptr, size_t val)
{
if (bpfptr_is_kernel(*bpfptr))
bpfptr->kernel += val;
else
bpfptr->user += val;
}
static inline int copy_from_bpfptr_offset(void *dst, bpfptr_t src,
size_t offset, size_t size)
{
return copy_from_sockptr_offset(dst, (sockptr_t) src, offset, size);
}
static inline int copy_from_bpfptr(void *dst, bpfptr_t src, size_t size)
{
return copy_from_bpfptr_offset(dst, src, 0, size);
}
static inline int copy_to_bpfptr_offset(bpfptr_t dst, size_t offset,
const void *src, size_t size)
{
return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size);
}
static inline void *memdup_bpfptr(bpfptr_t src, size_t len)
{
return memdup_sockptr((sockptr_t) src, len);
}
static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count)
{
return strncpy_from_sockptr(dst, (sockptr_t) src, count);
}
#endif /* _LINUX_BPFPTR_H */

View file

@ -21,7 +21,7 @@ extern const struct file_operations btf_fops;
void btf_get(struct btf *btf);
void btf_put(struct btf *btf);
int btf_new_fd(const union bpf_attr *attr);
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr);
struct btf *btf_get_by_fd(int fd);
int btf_get_info_by_fd(const struct btf *btf,
const union bpf_attr *attr,

View file

@ -126,8 +126,7 @@ int sk_msg_zerocopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
struct sk_msg *msg, u32 bytes);
int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
long timeo, int *err);
int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo);
int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg,
int len, int flags);

View file

@ -837,6 +837,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@ -937,6 +938,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
};
enum bpf_attach_type {
@ -1097,8 +1099,8 @@ enum bpf_link_type {
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
*
* insn[0].src_reg: BPF_PSEUDO_MAP_FD
* insn[0].imm: map fd
* insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
* insn[0].imm: map fd or fd_idx
* insn[1].imm: 0
* insn[0].off: 0
* insn[1].off: 0
@ -1106,15 +1108,19 @@ enum bpf_link_type {
* verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
* insn[0].imm: map fd
#define BPF_PSEUDO_MAP_IDX 5
/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
* insn[0].imm: map fd or fd_idx
* insn[1].imm: offset into value
* insn[0].off: 0
* insn[1].off: 0
* ldimm64 rewrite: address of map[0]+offset
* verifier type: PTR_TO_MAP_VALUE
*/
#define BPF_PSEUDO_MAP_VALUE 2
#define BPF_PSEUDO_MAP_VALUE 2
#define BPF_PSEUDO_MAP_IDX_VALUE 6
/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
* insn[0].imm: kernel btd id of VAR
* insn[1].imm: 0
@ -1314,6 +1320,8 @@ union bpf_attr {
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
__u32 :32; /* pad */
__aligned_u64 fd_array; /* array of FDs */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@ -4735,6 +4743,24 @@ union bpf_attr {
* be zero-terminated except when **str_size** is 0.
*
* Or **-EBUSY** if the per-CPU memory copy buffer is busy.
*
* long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
* Description
* Execute bpf syscall with given arguments.
* Return
* A syscall result.
*
* long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
* Description
* Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
* Return
* Returns btf_id and btf_obj_fd in lower and upper 32 bits.
*
* long bpf_sys_close(u32 fd)
* Description
* Execute close syscall for given FD.
* Return
* A syscall result.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@ -4903,6 +4929,9 @@ union bpf_attr {
FN(check_mtu), \
FN(for_each_map_elem), \
FN(snprintf), \
FN(sys_bpf), \
FN(btf_find_by_name_kind), \
FN(sys_close), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

View file

@ -473,15 +473,16 @@ bool bpf_link_is_iter(struct bpf_link *link)
return link->ops == &bpf_iter_link_lops;
}
int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
struct bpf_prog *prog)
{
union bpf_iter_link_info __user *ulinfo;
struct bpf_link_primer link_primer;
struct bpf_iter_target_info *tinfo;
union bpf_iter_link_info linfo;
struct bpf_iter_link *link;
u32 prog_btf_id, linfo_len;
bool existed = false;
bpfptr_t ulinfo;
int err;
if (attr->link_create.target_fd || attr->link_create.flags)
@ -489,18 +490,18 @@ int bpf_iter_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
memset(&linfo, 0, sizeof(union bpf_iter_link_info));
ulinfo = u64_to_user_ptr(attr->link_create.iter_info);
ulinfo = make_bpfptr(attr->link_create.iter_info, uattr.is_kernel);
linfo_len = attr->link_create.iter_info_len;
if (!ulinfo ^ !linfo_len)
if (bpfptr_is_null(ulinfo) ^ !linfo_len)
return -EINVAL;
if (ulinfo) {
if (!bpfptr_is_null(ulinfo)) {
err = bpf_check_uarg_tail_zero(ulinfo, sizeof(linfo),
linfo_len);
if (err)
return err;
linfo_len = min_t(u32, linfo_len, sizeof(linfo));
if (copy_from_user(&linfo, ulinfo, linfo_len))
if (copy_from_bpfptr(&linfo, ulinfo, linfo_len))
return -EFAULT;
}

View file

@ -4257,7 +4257,7 @@ static int btf_parse_hdr(struct btf_verifier_env *env)
return 0;
}
static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
static struct btf *btf_parse(bpfptr_t btf_data, u32 btf_data_size,
u32 log_level, char __user *log_ubuf, u32 log_size)
{
struct btf_verifier_env *env = NULL;
@ -4306,7 +4306,7 @@ static struct btf *btf_parse(void __user *btf_data, u32 btf_data_size,
btf->data = data;
btf->data_size = btf_data_size;
if (copy_from_user(data, btf_data, btf_data_size)) {
if (copy_from_bpfptr(data, btf_data, btf_data_size)) {
err = -EFAULT;
goto errout;
}
@ -5780,12 +5780,12 @@ static int __btf_new_fd(struct btf *btf)
return anon_inode_getfd("btf", &btf_fops, btf, O_RDONLY | O_CLOEXEC);
}
int btf_new_fd(const union bpf_attr *attr)
int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr)
{
struct btf *btf;
int ret;
btf = btf_parse(u64_to_user_ptr(attr->btf),
btf = btf_parse(make_bpfptr(attr->btf, uattr.is_kernel),
attr->btf_size, attr->btf_log_level,
u64_to_user_ptr(attr->btf_log_buf),
attr->btf_log_size);
@ -6085,3 +6085,65 @@ struct module *btf_try_get_module(const struct btf *btf)
return res;
}
BPF_CALL_4(bpf_btf_find_by_name_kind, char *, name, int, name_sz, u32, kind, int, flags)
{
struct btf *btf;
long ret;
if (flags)
return -EINVAL;
if (name_sz <= 1 || name[name_sz - 1])
return -EINVAL;
btf = bpf_get_btf_vmlinux();
if (IS_ERR(btf))
return PTR_ERR(btf);
ret = btf_find_by_name_kind(btf, name, kind);
/* ret is never zero, since btf_find_by_name_kind returns
* positive btf_id or negative error.
*/
if (ret < 0) {
struct btf *mod_btf;
int id;
/* If name is not found in vmlinux's BTF then search in module's BTFs */
spin_lock_bh(&btf_idr_lock);
idr_for_each_entry(&btf_idr, mod_btf, id) {
if (!btf_is_module(mod_btf))
continue;
/* linear search could be slow hence unlock/lock
* the IDR to avoiding holding it for too long
*/
btf_get(mod_btf);
spin_unlock_bh(&btf_idr_lock);
ret = btf_find_by_name_kind(mod_btf, name, kind);
if (ret > 0) {
int btf_obj_fd;
btf_obj_fd = __btf_new_fd(mod_btf);
if (btf_obj_fd < 0) {
btf_put(mod_btf);
return btf_obj_fd;
}
return ret | (((u64)btf_obj_fd) << 32);
}
spin_lock_bh(&btf_idr_lock);
btf_put(mod_btf);
}
spin_unlock_bh(&btf_idr_lock);
}
return ret;
}
const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
.func = bpf_btf_find_by_name_kind,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_MEM,
.arg2_type = ARG_CONST_SIZE,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_ANYTHING,
};

View file

@ -72,11 +72,10 @@ static const struct bpf_map_ops * const bpf_map_types[] = {
* copy_from_user() call. However, this is not a concern since this function is
* meant to be a future-proofing of bits.
*/
int bpf_check_uarg_tail_zero(void __user *uaddr,
int bpf_check_uarg_tail_zero(bpfptr_t uaddr,
size_t expected_size,
size_t actual_size)
{
unsigned char __user *addr = uaddr + expected_size;
int res;
if (unlikely(actual_size > PAGE_SIZE)) /* silly large */
@ -85,7 +84,12 @@ int bpf_check_uarg_tail_zero(void __user *uaddr,
if (actual_size <= expected_size)
return 0;
res = check_zeroed_user(addr, actual_size - expected_size);
if (uaddr.is_kernel)
res = memchr_inv(uaddr.kernel + expected_size, 0,
actual_size - expected_size) == NULL;
else
res = check_zeroed_user(uaddr.user + expected_size,
actual_size - expected_size);
if (res < 0)
return res;
return res ? 0 : -E2BIG;
@ -1004,6 +1008,17 @@ static void *__bpf_copy_key(void __user *ukey, u64 key_size)
return NULL;
}
static void *___bpf_copy_key(bpfptr_t ukey, u64 key_size)
{
if (key_size)
return memdup_bpfptr(ukey, key_size);
if (!bpfptr_is_null(ukey))
return ERR_PTR(-EINVAL);
return NULL;
}
/* last field in 'union bpf_attr' used by this command */
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD flags
@ -1074,10 +1089,10 @@ static int map_lookup_elem(union bpf_attr *attr)
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
static int map_update_elem(union bpf_attr *attr)
static int map_update_elem(union bpf_attr *attr, bpfptr_t uattr)
{
void __user *ukey = u64_to_user_ptr(attr->key);
void __user *uvalue = u64_to_user_ptr(attr->value);
bpfptr_t ukey = make_bpfptr(attr->key, uattr.is_kernel);
bpfptr_t uvalue = make_bpfptr(attr->value, uattr.is_kernel);
int ufd = attr->map_fd;
struct bpf_map *map;
void *key, *value;
@ -1103,7 +1118,7 @@ static int map_update_elem(union bpf_attr *attr)
goto err_put;
}
key = __bpf_copy_key(ukey, map->key_size);
key = ___bpf_copy_key(ukey, map->key_size);
if (IS_ERR(key)) {
err = PTR_ERR(key);
goto err_put;
@ -1123,7 +1138,7 @@ static int map_update_elem(union bpf_attr *attr)
goto free_key;
err = -EFAULT;
if (copy_from_user(value, uvalue, value_size) != 0)
if (copy_from_bpfptr(value, uvalue, value_size) != 0)
goto free_value;
err = bpf_map_update_value(map, f, key, value, attr->flags);
@ -2014,6 +2029,7 @@ bpf_prog_load_check_attach(enum bpf_prog_type prog_type,
if (expected_attach_type == BPF_SK_LOOKUP)
return 0;
return -EINVAL;
case BPF_PROG_TYPE_SYSCALL:
case BPF_PROG_TYPE_EXT:
if (expected_attach_type)
return -EINVAL;
@ -2073,9 +2089,9 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD attach_prog_fd
#define BPF_PROG_LOAD_LAST_FIELD fd_array
static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog, *dst_prog = NULL;
@ -2100,8 +2116,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
return -EPERM;
/* copy eBPF program license from user space */
if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
sizeof(license) - 1) < 0)
if (strncpy_from_bpfptr(license,
make_bpfptr(attr->license, uattr.is_kernel),
sizeof(license) - 1) < 0)
return -EFAULT;
license[sizeof(license) - 1] = 0;
@ -2185,8 +2202,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
prog->len = attr->insn_cnt;
err = -EFAULT;
if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
bpf_prog_insn_size(prog)) != 0)
if (copy_from_bpfptr(prog->insns,
make_bpfptr(attr->insns, uattr.is_kernel),
bpf_prog_insn_size(prog)) != 0)
goto free_prog_sec;
prog->orig_prog = NULL;
@ -3422,7 +3440,7 @@ static int bpf_prog_get_info_by_fd(struct file *file,
u32 ulen;
int err;
err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
@ -3701,7 +3719,7 @@ static int bpf_map_get_info_by_fd(struct file *file,
u32 info_len = attr->info.info_len;
int err;
err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
@ -3744,7 +3762,7 @@ static int bpf_btf_get_info_by_fd(struct file *file,
u32 info_len = attr->info.info_len;
int err;
err = bpf_check_uarg_tail_zero(uinfo, sizeof(*uinfo), info_len);
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(*uinfo), info_len);
if (err)
return err;
@ -3761,7 +3779,7 @@ static int bpf_link_get_info_by_fd(struct file *file,
u32 info_len = attr->info.info_len;
int err;
err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
err = bpf_check_uarg_tail_zero(USER_BPFPTR(uinfo), sizeof(info), info_len);
if (err)
return err;
info_len = min_t(u32, sizeof(info), info_len);
@ -3824,7 +3842,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
#define BPF_BTF_LOAD_LAST_FIELD btf_log_level
static int bpf_btf_load(const union bpf_attr *attr)
static int bpf_btf_load(const union bpf_attr *attr, bpfptr_t uattr)
{
if (CHECK_ATTR(BPF_BTF_LOAD))
return -EINVAL;
@ -3832,7 +3850,7 @@ static int bpf_btf_load(const union bpf_attr *attr)
if (!bpf_capable())
return -EPERM;
return btf_new_fd(attr);
return btf_new_fd(attr, uattr);
}
#define BPF_BTF_GET_FD_BY_ID_LAST_FIELD btf_id
@ -4022,13 +4040,14 @@ static int bpf_map_do_batch(const union bpf_attr *attr,
return err;
}
static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
static int tracing_bpf_link_attach(const union bpf_attr *attr, bpfptr_t uattr,
struct bpf_prog *prog)
{
if (attr->link_create.attach_type != prog->expected_attach_type)
return -EINVAL;
if (prog->expected_attach_type == BPF_TRACE_ITER)
return bpf_iter_link_attach(attr, prog);
return bpf_iter_link_attach(attr, uattr, prog);
else if (prog->type == BPF_PROG_TYPE_EXT)
return bpf_tracing_prog_attach(prog,
attr->link_create.target_fd,
@ -4037,7 +4056,7 @@ static int tracing_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *
}
#define BPF_LINK_CREATE_LAST_FIELD link_create.iter_info_len
static int link_create(union bpf_attr *attr)
static int link_create(union bpf_attr *attr, bpfptr_t uattr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
@ -4056,7 +4075,7 @@ static int link_create(union bpf_attr *attr)
goto out;
if (prog->type == BPF_PROG_TYPE_EXT) {
ret = tracing_bpf_link_attach(attr, prog);
ret = tracing_bpf_link_attach(attr, uattr, prog);
goto out;
}
@ -4077,7 +4096,7 @@ static int link_create(union bpf_attr *attr)
ret = cgroup_bpf_link_attach(attr, prog);
break;
case BPF_PROG_TYPE_TRACING:
ret = tracing_bpf_link_attach(attr, prog);
ret = tracing_bpf_link_attach(attr, uattr, prog);
break;
case BPF_PROG_TYPE_FLOW_DISSECTOR:
case BPF_PROG_TYPE_SK_LOOKUP:
@ -4365,7 +4384,7 @@ static int bpf_prog_bind_map(union bpf_attr *attr)
return ret;
}
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
static int __sys_bpf(int cmd, bpfptr_t uattr, unsigned int size)
{
union bpf_attr attr;
int err;
@ -4380,7 +4399,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
memset(&attr, 0, sizeof(attr));
if (copy_from_user(&attr, uattr, size) != 0)
if (copy_from_bpfptr(&attr, uattr, size) != 0)
return -EFAULT;
err = security_bpf(cmd, &attr, size);
@ -4395,7 +4414,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = map_lookup_elem(&attr);
break;
case BPF_MAP_UPDATE_ELEM:
err = map_update_elem(&attr);
err = map_update_elem(&attr, uattr);
break;
case BPF_MAP_DELETE_ELEM:
err = map_delete_elem(&attr);
@ -4422,21 +4441,21 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_prog_detach(&attr);
break;
case BPF_PROG_QUERY:
err = bpf_prog_query(&attr, uattr);
err = bpf_prog_query(&attr, uattr.user);
break;
case BPF_PROG_TEST_RUN:
err = bpf_prog_test_run(&attr, uattr);
err = bpf_prog_test_run(&attr, uattr.user);
break;
case BPF_PROG_GET_NEXT_ID:
err = bpf_obj_get_next_id(&attr, uattr,
err = bpf_obj_get_next_id(&attr, uattr.user,
&prog_idr, &prog_idr_lock);
break;
case BPF_MAP_GET_NEXT_ID:
err = bpf_obj_get_next_id(&attr, uattr,
err = bpf_obj_get_next_id(&attr, uattr.user,
&map_idr, &map_idr_lock);
break;
case BPF_BTF_GET_NEXT_ID:
err = bpf_obj_get_next_id(&attr, uattr,
err = bpf_obj_get_next_id(&attr, uattr.user,
&btf_idr, &btf_idr_lock);
break;
case BPF_PROG_GET_FD_BY_ID:
@ -4446,38 +4465,38 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_map_get_fd_by_id(&attr);
break;
case BPF_OBJ_GET_INFO_BY_FD:
err = bpf_obj_get_info_by_fd(&attr, uattr);
err = bpf_obj_get_info_by_fd(&attr, uattr.user);
break;
case BPF_RAW_TRACEPOINT_OPEN:
err = bpf_raw_tracepoint_open(&attr);
break;
case BPF_BTF_LOAD:
err = bpf_btf_load(&attr);
err = bpf_btf_load(&attr, uattr);
break;
case BPF_BTF_GET_FD_BY_ID:
err = bpf_btf_get_fd_by_id(&attr);
break;
case BPF_TASK_FD_QUERY:
err = bpf_task_fd_query(&attr, uattr);
err = bpf_task_fd_query(&attr, uattr.user);
break;
case BPF_MAP_LOOKUP_AND_DELETE_ELEM:
err = map_lookup_and_delete_elem(&attr);
break;
case BPF_MAP_LOOKUP_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_LOOKUP_BATCH);
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_LOOKUP_BATCH);
break;
case BPF_MAP_LOOKUP_AND_DELETE_BATCH:
err = bpf_map_do_batch(&attr, uattr,
err = bpf_map_do_batch(&attr, uattr.user,
BPF_MAP_LOOKUP_AND_DELETE_BATCH);
break;
case BPF_MAP_UPDATE_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_UPDATE_BATCH);
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_UPDATE_BATCH);
break;
case BPF_MAP_DELETE_BATCH:
err = bpf_map_do_batch(&attr, uattr, BPF_MAP_DELETE_BATCH);
err = bpf_map_do_batch(&attr, uattr.user, BPF_MAP_DELETE_BATCH);
break;
case BPF_LINK_CREATE:
err = link_create(&attr);
err = link_create(&attr, uattr);
break;
case BPF_LINK_UPDATE:
err = link_update(&attr);
@ -4486,7 +4505,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
err = bpf_link_get_fd_by_id(&attr);
break;
case BPF_LINK_GET_NEXT_ID:
err = bpf_obj_get_next_id(&attr, uattr,
err = bpf_obj_get_next_id(&attr, uattr.user,
&link_idr, &link_idr_lock);
break;
case BPF_ENABLE_STATS:
@ -4508,3 +4527,94 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
return err;
}
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
{
return __sys_bpf(cmd, USER_BPFPTR(uattr), size);
}
static bool syscall_prog_is_valid_access(int off, int size,
enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info)
{
if (off < 0 || off >= U16_MAX)
return false;
if (off % size != 0)
return false;
return true;
}
BPF_CALL_3(bpf_sys_bpf, int, cmd, void *, attr, u32, attr_size)
{
switch (cmd) {
case BPF_MAP_CREATE:
case BPF_MAP_UPDATE_ELEM:
case BPF_MAP_FREEZE:
case BPF_PROG_LOAD:
case BPF_BTF_LOAD:
break;
/* case BPF_PROG_TEST_RUN:
* is not part of this list to prevent recursive test_run
*/
default:
return -EINVAL;
}
return __sys_bpf(cmd, KERNEL_BPFPTR(attr), attr_size);
}
static const struct bpf_func_proto bpf_sys_bpf_proto = {
.func = bpf_sys_bpf,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
.arg2_type = ARG_PTR_TO_MEM,
.arg3_type = ARG_CONST_SIZE,
};
const struct bpf_func_proto * __weak
tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
return bpf_base_func_proto(func_id);
}
BPF_CALL_1(bpf_sys_close, u32, fd)
{
/* When bpf program calls this helper there should not be
* an fdget() without matching completed fdput().
* This helper is allowed in the following callchain only:
* sys_bpf->prog_test_run->bpf_prog->bpf_sys_close
*/
return close_fd(fd);
}
static const struct bpf_func_proto bpf_sys_close_proto = {
.func = bpf_sys_close,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_ANYTHING,
};
static const struct bpf_func_proto *
syscall_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
case BPF_FUNC_sys_bpf:
return &bpf_sys_bpf_proto;
case BPF_FUNC_btf_find_by_name_kind:
return &bpf_btf_find_by_name_kind_proto;
case BPF_FUNC_sys_close:
return &bpf_sys_close_proto;
default:
return tracing_prog_func_proto(func_id, prog);
}
}
const struct bpf_verifier_ops bpf_syscall_verifier_ops = {
.get_func_proto = syscall_prog_func_proto,
.is_valid_access = syscall_prog_is_valid_access,
};
const struct bpf_prog_ops bpf_syscall_prog_ops = {
.test_run = bpf_prog_test_run_syscall,
};

View file

@ -737,81 +737,104 @@ static void print_verifier_state(struct bpf_verifier_env *env,
verbose(env, "\n");
}
#define COPY_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int copy_##NAME##_state(struct bpf_func_state *dst, \
const struct bpf_func_state *src) \
{ \
if (!src->FIELD) \
return 0; \
if (WARN_ON_ONCE(dst->COUNT < src->COUNT)) { \
/* internal bug, make state invalid to reject the program */ \
memset(dst, 0, sizeof(*dst)); \
return -EFAULT; \
} \
memcpy(dst->FIELD, src->FIELD, \
sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
return 0; \
}
/* copy_reference_state() */
COPY_STATE_FN(reference, acquired_refs, refs, 1)
/* copy_stack_state() */
COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef COPY_STATE_FN
#define REALLOC_STATE_FN(NAME, COUNT, FIELD, SIZE) \
static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
bool copy_old) \
{ \
u32 old_size = state->COUNT; \
struct bpf_##NAME##_state *new_##FIELD; \
int slot = size / SIZE; \
\
if (size <= old_size || !size) { \
if (copy_old) \
return 0; \
state->COUNT = slot * SIZE; \
if (!size && old_size) { \
kfree(state->FIELD); \
state->FIELD = NULL; \
} \
return 0; \
} \
new_##FIELD = kmalloc_array(slot, sizeof(struct bpf_##NAME##_state), \
GFP_KERNEL); \
if (!new_##FIELD) \
return -ENOMEM; \
if (copy_old) { \
if (state->FIELD) \
memcpy(new_##FIELD, state->FIELD, \
sizeof(*new_##FIELD) * (old_size / SIZE)); \
memset(new_##FIELD + old_size / SIZE, 0, \
sizeof(*new_##FIELD) * (size - old_size) / SIZE); \
} \
state->COUNT = slot * SIZE; \
kfree(state->FIELD); \
state->FIELD = new_##FIELD; \
return 0; \
}
/* realloc_reference_state() */
REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
/* realloc_stack_state() */
REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
#undef REALLOC_STATE_FN
/* do_check() starts with zero-sized stack in struct bpf_verifier_state to
* make it consume minimal amount of memory. check_stack_write() access from
* the program calls into realloc_func_state() to grow the stack size.
* Note there is a non-zero 'parent' pointer inside bpf_verifier_state
* which realloc_stack_state() copies over. It points to previous
* bpf_verifier_state which is never reallocated.
/* copy array src of length n * size bytes to dst. dst is reallocated if it's too
* small to hold src. This is different from krealloc since we don't want to preserve
* the contents of dst.
*
* Leaves dst untouched if src is NULL or length is zero. Returns NULL if memory could
* not be allocated.
*/
static int realloc_func_state(struct bpf_func_state *state, int stack_size,
int refs_size, bool copy_old)
static void *copy_array(void *dst, const void *src, size_t n, size_t size, gfp_t flags)
{
int err = realloc_reference_state(state, refs_size, copy_old);
if (err)
return err;
return realloc_stack_state(state, stack_size, copy_old);
size_t bytes;
if (ZERO_OR_NULL_PTR(src))
goto out;
if (unlikely(check_mul_overflow(n, size, &bytes)))
return NULL;
if (ksize(dst) < bytes) {
kfree(dst);
dst = kmalloc_track_caller(bytes, flags);
if (!dst)
return NULL;
}
memcpy(dst, src, bytes);
out:
return dst ? dst : ZERO_SIZE_PTR;
}
/* resize an array from old_n items to new_n items. the array is reallocated if it's too
* small to hold new_n items. new items are zeroed out if the array grows.
*
* Contrary to krealloc_array, does not free arr if new_n is zero.
*/
static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
{
if (!new_n || old_n == new_n)
goto out;
arr = krealloc_array(arr, new_n, size, GFP_KERNEL);
if (!arr)
return NULL;
if (new_n > old_n)
memset(arr + old_n * size, 0, (new_n - old_n) * size);
out:
return arr ? arr : ZERO_SIZE_PTR;
}
static int copy_reference_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
{
dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
sizeof(struct bpf_reference_state), GFP_KERNEL);
if (!dst->refs)
return -ENOMEM;
dst->acquired_refs = src->acquired_refs;
return 0;
}
static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_state *src)
{
size_t n = src->allocated_stack / BPF_REG_SIZE;
dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
GFP_KERNEL);
if (!dst->stack)
return -ENOMEM;
dst->allocated_stack = src->allocated_stack;
return 0;
}
static int resize_reference_state(struct bpf_func_state *state, size_t n)
{
state->refs = realloc_array(state->refs, state->acquired_refs, n,
sizeof(struct bpf_reference_state));
if (!state->refs)
return -ENOMEM;
state->acquired_refs = n;
return 0;
}
static int grow_stack_state(struct bpf_func_state *state, int size)
{
size_t old_n = state->allocated_stack / BPF_REG_SIZE, n = size / BPF_REG_SIZE;
if (old_n >= n)
return 0;
state->stack = realloc_array(state->stack, old_n, n, sizeof(struct bpf_stack_state));
if (!state->stack)
return -ENOMEM;
state->allocated_stack = size;
return 0;
}
/* Acquire a pointer id from the env and update the state->refs to include
@ -825,7 +848,7 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
int new_ofs = state->acquired_refs;
int id, err;
err = realloc_reference_state(state, state->acquired_refs + 1, true);
err = resize_reference_state(state, state->acquired_refs + 1);
if (err)
return err;
id = ++env->id_gen;
@ -854,18 +877,6 @@ static int release_reference_state(struct bpf_func_state *state, int ptr_id)
return -EINVAL;
}
static int transfer_reference_state(struct bpf_func_state *dst,
struct bpf_func_state *src)
{
int err = realloc_reference_state(dst, src->acquired_refs, false);
if (err)
return err;
err = copy_reference_state(dst, src);
if (err)
return err;
return 0;
}
static void free_func_state(struct bpf_func_state *state)
{
if (!state)
@ -904,10 +915,6 @@ static int copy_func_state(struct bpf_func_state *dst,
{
int err;
err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
false);
if (err)
return err;
memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
err = copy_reference_state(dst, src);
if (err)
@ -919,16 +926,13 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
const struct bpf_verifier_state *src)
{
struct bpf_func_state *dst;
u32 jmp_sz = sizeof(struct bpf_idx_pair) * src->jmp_history_cnt;
int i, err;
if (dst_state->jmp_history_cnt < src->jmp_history_cnt) {
kfree(dst_state->jmp_history);
dst_state->jmp_history = kmalloc(jmp_sz, GFP_USER);
if (!dst_state->jmp_history)
return -ENOMEM;
}
memcpy(dst_state->jmp_history, src->jmp_history, jmp_sz);
dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
GFP_USER);
if (!dst_state->jmp_history)
return -ENOMEM;
dst_state->jmp_history_cnt = src->jmp_history_cnt;
/* if dst has more stack frames then src frame, free them */
@ -2590,8 +2594,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
u32 dst_reg = env->prog->insnsi[insn_idx].dst_reg;
struct bpf_reg_state *reg = NULL;
err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
state->acquired_refs, true);
err = grow_stack_state(state, round_up(slot + 1, BPF_REG_SIZE));
if (err)
return err;
/* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@ -2753,8 +2756,7 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
if (value_reg && register_is_null(value_reg))
writing_zero = true;
err = realloc_func_state(state, round_up(-min_off, BPF_REG_SIZE),
state->acquired_refs, true);
err = grow_stack_state(state, round_up(-min_off, BPF_REG_SIZE));
if (err)
return err;
@ -5629,7 +5631,7 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn
subprog /* subprog number within this prog */);
/* Transfer references to the callee */
err = transfer_reference_state(callee, caller);
err = copy_reference_state(callee, caller);
if (err)
return err;
@ -5780,7 +5782,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
}
/* Transfer references to the caller */
err = transfer_reference_state(caller, callee);
err = copy_reference_state(caller, callee);
if (err)
return err;
@ -8913,12 +8915,14 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
mark_reg_known_zero(env, regs, insn->dst_reg);
dst_reg->map_ptr = map;
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE) {
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
dst_reg->type = PTR_TO_MAP_VALUE;
dst_reg->off = aux->map_off;
if (map_value_has_spin_lock(map))
dst_reg->id = ++env->id_gen;
} else if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
} else if (insn->src_reg == BPF_PSEUDO_MAP_FD ||
insn->src_reg == BPF_PSEUDO_MAP_IDX) {
dst_reg->type = CONST_PTR_TO_MAP;
} else {
verbose(env, "bpf verifier is misconfigured\n");
@ -9434,7 +9438,7 @@ static int check_abnormal_return(struct bpf_verifier_env *env)
static int check_btf_func(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
bpfptr_t uattr)
{
const struct btf_type *type, *func_proto, *ret_type;
u32 i, nfuncs, urec_size, min_size;
@ -9443,7 +9447,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
struct bpf_func_info_aux *info_aux = NULL;
struct bpf_prog *prog;
const struct btf *btf;
void __user *urecord;
bpfptr_t urecord;
u32 prev_offset = 0;
bool scalar_return;
int ret = -ENOMEM;
@ -9471,7 +9475,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
prog = env->prog;
btf = prog->aux->btf;
urecord = u64_to_user_ptr(attr->func_info);
urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
min_size = min_t(u32, krec_size, urec_size);
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
@ -9489,13 +9493,15 @@ static int check_btf_func(struct bpf_verifier_env *env,
/* set the size kernel expects so loader can zero
* out the rest of the record.
*/
if (put_user(min_size, &uattr->func_info_rec_size))
if (copy_to_bpfptr_offset(uattr,
offsetof(union bpf_attr, func_info_rec_size),
&min_size, sizeof(min_size)))
ret = -EFAULT;
}
goto err_free;
}
if (copy_from_user(&krecord[i], urecord, min_size)) {
if (copy_from_bpfptr(&krecord[i], urecord, min_size)) {
ret = -EFAULT;
goto err_free;
}
@ -9547,7 +9553,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
}
prev_offset = krecord[i].insn_off;
urecord += urec_size;
bpfptr_add(&urecord, urec_size);
}
prog->aux->func_info = krecord;
@ -9579,14 +9585,14 @@ static void adjust_btf_func(struct bpf_verifier_env *env)
static int check_btf_line(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
bpfptr_t uattr)
{
u32 i, s, nr_linfo, ncopy, expected_size, rec_size, prev_offset = 0;
struct bpf_subprog_info *sub;
struct bpf_line_info *linfo;
struct bpf_prog *prog;
const struct btf *btf;
void __user *ulinfo;
bpfptr_t ulinfo;
int err;
nr_linfo = attr->line_info_cnt;
@ -9612,7 +9618,7 @@ static int check_btf_line(struct bpf_verifier_env *env,
s = 0;
sub = env->subprog_info;
ulinfo = u64_to_user_ptr(attr->line_info);
ulinfo = make_bpfptr(attr->line_info, uattr.is_kernel);
expected_size = sizeof(struct bpf_line_info);
ncopy = min_t(u32, expected_size, rec_size);
for (i = 0; i < nr_linfo; i++) {
@ -9620,14 +9626,15 @@ static int check_btf_line(struct bpf_verifier_env *env,
if (err) {
if (err == -E2BIG) {
verbose(env, "nonzero tailing record in line_info");
if (put_user(expected_size,
&uattr->line_info_rec_size))
if (copy_to_bpfptr_offset(uattr,
offsetof(union bpf_attr, line_info_rec_size),
&expected_size, sizeof(expected_size)))
err = -EFAULT;
}
goto err_free;
}
if (copy_from_user(&linfo[i], ulinfo, ncopy)) {
if (copy_from_bpfptr(&linfo[i], ulinfo, ncopy)) {
err = -EFAULT;
goto err_free;
}
@ -9679,7 +9686,7 @@ static int check_btf_line(struct bpf_verifier_env *env,
}
prev_offset = linfo[i].insn_off;
ulinfo += rec_size;
bpfptr_add(&ulinfo, rec_size);
}
if (s != env->subprog_cnt) {
@ -9701,7 +9708,7 @@ static int check_btf_line(struct bpf_verifier_env *env,
static int check_btf_info(struct bpf_verifier_env *env,
const union bpf_attr *attr,
union bpf_attr __user *uattr)
bpfptr_t uattr)
{
struct btf *btf;
int err;
@ -9746,13 +9753,6 @@ static bool range_within(struct bpf_reg_state *old,
old->s32_max_value >= cur->s32_max_value;
}
/* Maximum number of register states that can exist at once */
#define ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE)
struct idpair {
u32 old;
u32 cur;
};
/* If in the old state two registers had the same id, then they need to have
* the same id in the new state as well. But that id could be different from
* the old state, so we need to track the mapping from old to new ids.
@ -9763,11 +9763,11 @@ struct idpair {
* So we look through our idmap to see if this old id has been seen before. If
* so, we require the new id to match; otherwise, we add the id pair to the map.
*/
static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
static bool check_ids(u32 old_id, u32 cur_id, struct bpf_id_pair *idmap)
{
unsigned int i;
for (i = 0; i < ID_MAP_SIZE; i++) {
for (i = 0; i < BPF_ID_MAP_SIZE; i++) {
if (!idmap[i].old) {
/* Reached an empty slot; haven't seen this id before */
idmap[i].old = old_id;
@ -9880,7 +9880,7 @@ static void clean_live_states(struct bpf_verifier_env *env, int insn,
/* Returns true if (rold safe implies rcur safe) */
static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
struct idpair *idmap)
struct bpf_id_pair *idmap)
{
bool equal;
@ -9998,7 +9998,7 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
static bool stacksafe(struct bpf_func_state *old,
struct bpf_func_state *cur,
struct idpair *idmap)
struct bpf_id_pair *idmap)
{
int i, spi;
@ -10095,32 +10095,23 @@ static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
* whereas register type in current state is meaningful, it means that
* the current state will reach 'bpf_exit' instruction safely
*/
static bool func_states_equal(struct bpf_func_state *old,
static bool func_states_equal(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur)
{
struct idpair *idmap;
bool ret = false;
int i;
idmap = kcalloc(ID_MAP_SIZE, sizeof(struct idpair), GFP_KERNEL);
/* If we failed to allocate the idmap, just say it's not safe */
if (!idmap)
memset(env->idmap_scratch, 0, sizeof(env->idmap_scratch));
for (i = 0; i < MAX_BPF_REG; i++)
if (!regsafe(&old->regs[i], &cur->regs[i], env->idmap_scratch))
return false;
if (!stacksafe(old, cur, env->idmap_scratch))
return false;
for (i = 0; i < MAX_BPF_REG; i++) {
if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
goto out_free;
}
if (!stacksafe(old, cur, idmap))
goto out_free;
if (!refsafe(old, cur))
goto out_free;
ret = true;
out_free:
kfree(idmap);
return ret;
return false;
return true;
}
static bool states_equal(struct bpf_verifier_env *env,
@ -10147,7 +10138,7 @@ static bool states_equal(struct bpf_verifier_env *env,
for (i = 0; i <= old->curframe; i++) {
if (old->frame[i]->callsite != cur->frame[i]->callsite)
return false;
if (!func_states_equal(old->frame[i], cur->frame[i]))
if (!func_states_equal(env, old->frame[i], cur->frame[i]))
return false;
}
return true;
@ -11184,6 +11175,7 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
struct bpf_map *map;
struct fd f;
u64 addr;
u32 fd;
if (i == insn_cnt - 1 || insn[1].code != 0 ||
insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
@ -11213,16 +11205,38 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
/* In final convert_pseudo_ld_imm64() step, this is
* converted into regular 64-bit imm load insn.
*/
if ((insn[0].src_reg != BPF_PSEUDO_MAP_FD &&
insn[0].src_reg != BPF_PSEUDO_MAP_VALUE) ||
(insn[0].src_reg == BPF_PSEUDO_MAP_FD &&
insn[1].imm != 0)) {
verbose(env,
"unrecognized bpf_ld_imm64 insn\n");
switch (insn[0].src_reg) {
case BPF_PSEUDO_MAP_VALUE:
case BPF_PSEUDO_MAP_IDX_VALUE:
break;
case BPF_PSEUDO_MAP_FD:
case BPF_PSEUDO_MAP_IDX:
if (insn[1].imm == 0)
break;
fallthrough;
default:
verbose(env, "unrecognized bpf_ld_imm64 insn\n");
return -EINVAL;
}
f = fdget(insn[0].imm);
switch (insn[0].src_reg) {
case BPF_PSEUDO_MAP_IDX_VALUE:
case BPF_PSEUDO_MAP_IDX:
if (bpfptr_is_null(env->fd_array)) {
verbose(env, "fd_idx without fd_array is invalid\n");
return -EPROTO;
}
if (copy_from_bpfptr_offset(&fd, env->fd_array,
insn[0].imm * sizeof(fd),
sizeof(fd)))
return -EFAULT;
break;
default:
fd = insn[0].imm;
break;
}
f = fdget(fd);
map = __bpf_map_get(f);
if (IS_ERR(map)) {
verbose(env, "fd %d is not pointing to valid bpf_map\n",
@ -11237,7 +11251,8 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
}
aux = &env->insn_aux_data[i];
if (insn->src_reg == BPF_PSEUDO_MAP_FD) {
if (insn[0].src_reg == BPF_PSEUDO_MAP_FD ||
insn[0].src_reg == BPF_PSEUDO_MAP_IDX) {
addr = (unsigned long)map;
} else {
u32 off = insn[1].imm;
@ -13210,6 +13225,14 @@ static int check_attach_btf_id(struct bpf_verifier_env *env)
int ret;
u64 key;
if (prog->type == BPF_PROG_TYPE_SYSCALL) {
if (prog->aux->sleepable)
/* attach_btf_id checked to be zero already */
return 0;
verbose(env, "Syscall programs can only be sleepable\n");
return -EINVAL;
}
if (prog->aux->sleepable && prog->type != BPF_PROG_TYPE_TRACING &&
prog->type != BPF_PROG_TYPE_LSM) {
verbose(env, "Only fentry/fexit/fmod_ret and lsm programs can be sleepable\n");
@ -13281,8 +13304,7 @@ struct btf *bpf_get_btf_vmlinux(void)
return btf_vmlinux;
}
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
union bpf_attr __user *uattr)
int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr)
{
u64 start_time = ktime_get_ns();
struct bpf_verifier_env *env;
@ -13312,6 +13334,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr,
env->insn_aux_data[i].orig_idx = i;
env->prog = *prog;
env->ops = bpf_verifier_ops[env->prog->type];
env->fd_array = make_bpfptr(attr->fd_array, uattr.is_kernel);
is_priv = bpf_capable();
bpf_get_btf_vmlinux();

View file

@ -409,7 +409,7 @@ static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
return ERR_PTR(-ENOMEM);
if (data_in) {
err = bpf_check_uarg_tail_zero(data_in, max_size, size);
err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
if (err) {
kfree(data);
return ERR_PTR(err);
@ -918,3 +918,46 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
kfree(user_ctx);
return ret;
}
int bpf_prog_test_run_syscall(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr)
{
void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
__u32 ctx_size_in = kattr->test.ctx_size_in;
void *ctx = NULL;
u32 retval;
int err = 0;
/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
if (kattr->test.data_in || kattr->test.data_out ||
kattr->test.ctx_out || kattr->test.duration ||
kattr->test.repeat || kattr->test.flags)
return -EINVAL;
if (ctx_size_in < prog->aux->max_ctx_offset ||
ctx_size_in > U16_MAX)
return -EINVAL;
if (ctx_size_in) {
ctx = kzalloc(ctx_size_in, GFP_USER);
if (!ctx)
return -ENOMEM;
if (copy_from_user(ctx, ctx_in, ctx_size_in)) {
err = -EFAULT;
goto out;
}
}
retval = bpf_prog_run_pin_on_cpu(prog, ctx);
if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
err = -EFAULT;
goto out;
}
if (ctx_size_in)
if (copy_to_user(ctx_in, ctx, ctx_size_in))
err = -EFAULT;
out:
kfree(ctx);
return err;
}

View file

@ -3235,7 +3235,7 @@ static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
return ret;
}
static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
static int bpf_skb_proto_4_to_6(struct sk_buff *skb, u64 flags)
{
const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
u32 off = skb_mac_header_len(skb);
@ -3264,7 +3264,9 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
}
/* Due to IPv6 header, MSS needs to be downgraded. */
skb_decrease_gso_size(shinfo, len_diff);
if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
skb_decrease_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= SKB_GSO_DODGY;
shinfo->gso_segs = 0;
@ -3276,7 +3278,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
return 0;
}
static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
static int bpf_skb_proto_6_to_4(struct sk_buff *skb, u64 flags)
{
const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
u32 off = skb_mac_header_len(skb);
@ -3305,7 +3307,9 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
}
/* Due to IPv4 header, MSS can be upgraded. */
skb_increase_gso_size(shinfo, len_diff);
if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
skb_increase_gso_size(shinfo, len_diff);
/* Header must be checked, and gso_segs recomputed. */
shinfo->gso_type |= SKB_GSO_DODGY;
shinfo->gso_segs = 0;
@ -3317,17 +3321,17 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
return 0;
}
static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto, u64 flags)
{
__be16 from_proto = skb->protocol;
if (from_proto == htons(ETH_P_IP) &&
to_proto == htons(ETH_P_IPV6))
return bpf_skb_proto_4_to_6(skb);
return bpf_skb_proto_4_to_6(skb, flags);
if (from_proto == htons(ETH_P_IPV6) &&
to_proto == htons(ETH_P_IP))
return bpf_skb_proto_6_to_4(skb);
return bpf_skb_proto_6_to_4(skb, flags);
return -ENOTSUPP;
}
@ -3337,7 +3341,7 @@ BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
{
int ret;
if (unlikely(flags))
if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO)))
return -EINVAL;
/* General idea is that this helper does the basic groundwork
@ -3357,7 +3361,7 @@ BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
* that. For offloads, we mark packet as dodgy, so that headers
* need to be verified first.
*/
ret = bpf_skb_proto_xlat(skb, proto);
ret = bpf_skb_proto_xlat(skb, proto, flags);
bpf_compute_data_pointers(skb);
return ret;
}

View file

@ -399,8 +399,7 @@ int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from,
}
EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter);
int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, int flags,
long timeo, int *err)
int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo)
{
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int ret = 0;

View file

@ -184,11 +184,11 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
if (!copied) {
int data, err = 0;
long timeo;
int data;
timeo = sock_rcvtimeo(sk, nonblock);
data = sk_msg_wait_data(sk, psock, flags, timeo, &err);
data = sk_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
@ -196,14 +196,9 @@ static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
sk_psock_put(sk, psock);
return tcp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
}
if (err) {
ret = err;
goto out;
}
copied = -EAGAIN;
}
ret = copied;
out:
release_sock(sk);
sk_psock_put(sk, psock);
return ret;

View file

@ -43,21 +43,17 @@ static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
msg_bytes_ready:
copied = sk_msg_recvmsg(sk, psock, msg, len, flags);
if (!copied) {
int data, err = 0;
long timeo;
int data;
timeo = sock_rcvtimeo(sk, nonblock);
data = sk_msg_wait_data(sk, psock, flags, timeo, &err);
data = sk_msg_wait_data(sk, psock, timeo);
if (data) {
if (!sk_psock_queue_empty(psock))
goto msg_bytes_ready;
ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len);
goto out;
}
if (err) {
ret = err;
goto out;
}
copied = -EAGAIN;
}
ret = copied;

View file

@ -396,7 +396,7 @@ int main(int argc, char **argv)
* on different systems with different compilers. The right way is
* to parse the ELF file. We took a shortcut here.
*/
uprobe_file_offset = (__u64)main - (__u64)&__executable_start;
uprobe_file_offset = (unsigned long)main - (unsigned long)&__executable_start;
CHECK_AND_RET(test_nondebug_fs_probe("uprobe", (char *)argv[0],
uprobe_file_offset, 0x0, false,
BPF_FD_TYPE_UPROBE,

View file

@ -136,7 +136,7 @@ endif
BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool
BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o)
BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o) $(OUTPUT)disasm.o
OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o
VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \

View file

@ -18,6 +18,7 @@
#include <sys/stat.h>
#include <sys/mman.h>
#include <bpf/btf.h>
#include <bpf/bpf_gen_internal.h>
#include "json_writer.h"
#include "main.h"
@ -106,8 +107,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
if (strcmp(sec_name, ".data") == 0) {
sec_ident = "data";
strip_mods = true;
} else if (strcmp(sec_name, ".bss") == 0) {
sec_ident = "bss";
strip_mods = true;
} else if (strcmp(sec_name, ".rodata") == 0) {
sec_ident = "rodata";
strip_mods = true;
@ -129,6 +132,10 @@ static int codegen_datasec_def(struct bpf_object *obj,
int need_off = sec_var->offset, align_off, align;
__u32 var_type_id = var->type;
/* static variables are not exposed through BPF skeleton */
if (btf_var(var)->linkage == BTF_VAR_STATIC)
continue;
if (off > need_off) {
p_err("Something is wrong for %s's variable #%d: need offset %d, already at %d.\n",
sec_name, i, need_off, off);
@ -268,6 +275,327 @@ static void codegen(const char *template, ...)
free(s);
}
static void print_hex(const char *data, int data_sz)
{
int i, len;
for (i = 0, len = 0; i < data_sz; i++) {
int w = data[i] ? 4 : 2;
len += w;
if (len > 78) {
printf("\\\n");
len = w;
}
if (!data[i])
printf("\\0");
else
printf("\\x%02x", (unsigned char)data[i]);
}
}
static size_t bpf_map_mmap_sz(const struct bpf_map *map)
{
long page_sz = sysconf(_SC_PAGE_SIZE);
size_t map_sz;
map_sz = (size_t)roundup(bpf_map__value_size(map), 8) * bpf_map__max_entries(map);
map_sz = roundup(map_sz, page_sz);
return map_sz;
}
static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
{
struct bpf_program *prog;
bpf_object__for_each_program(prog, obj) {
const char *tp_name;
codegen("\
\n\
\n\
static inline int \n\
%1$s__%2$s__attach(struct %1$s *skel) \n\
{ \n\
int prog_fd = skel->progs.%2$s.prog_fd; \n\
", obj_name, bpf_program__name(prog));
switch (bpf_program__get_type(prog)) {
case BPF_PROG_TYPE_RAW_TRACEPOINT:
tp_name = strchr(bpf_program__section_name(prog), '/') + 1;
printf("\tint fd = bpf_raw_tracepoint_open(\"%s\", prog_fd);\n", tp_name);
break;
case BPF_PROG_TYPE_TRACING:
printf("\tint fd = bpf_raw_tracepoint_open(NULL, prog_fd);\n");
break;
default:
printf("\tint fd = ((void)prog_fd, 0); /* auto-attach not supported */\n");
break;
}
codegen("\
\n\
\n\
if (fd > 0) \n\
skel->links.%1$s_fd = fd; \n\
return fd; \n\
} \n\
", bpf_program__name(prog));
}
codegen("\
\n\
\n\
static inline int \n\
%1$s__attach(struct %1$s *skel) \n\
{ \n\
int ret = 0; \n\
\n\
", obj_name);
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
ret = ret < 0 ? ret : %1$s__%2$s__attach(skel); \n\
", obj_name, bpf_program__name(prog));
}
codegen("\
\n\
return ret < 0 ? ret : 0; \n\
} \n\
\n\
static inline void \n\
%1$s__detach(struct %1$s *skel) \n\
{ \n\
", obj_name);
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
skel_closenz(skel->links.%1$s_fd); \n\
", bpf_program__name(prog));
}
codegen("\
\n\
} \n\
");
}
static void codegen_destroy(struct bpf_object *obj, const char *obj_name)
{
struct bpf_program *prog;
struct bpf_map *map;
codegen("\
\n\
static void \n\
%1$s__destroy(struct %1$s *skel) \n\
{ \n\
if (!skel) \n\
return; \n\
%1$s__detach(skel); \n\
",
obj_name);
bpf_object__for_each_program(prog, obj) {
codegen("\
\n\
skel_closenz(skel->progs.%1$s.prog_fd); \n\
", bpf_program__name(prog));
}
bpf_object__for_each_map(map, obj) {
const char * ident;
ident = get_map_ident(map);
if (!ident)
continue;
if (bpf_map__is_internal(map) &&
(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
printf("\tmunmap(skel->%1$s, %2$zd);\n",
ident, bpf_map_mmap_sz(map));
codegen("\
\n\
skel_closenz(skel->maps.%1$s.map_fd); \n\
", ident);
}
codegen("\
\n\
free(skel); \n\
} \n\
",
obj_name);
}
static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *header_guard)
{
struct bpf_object_load_attr load_attr = {};
DECLARE_LIBBPF_OPTS(gen_loader_opts, opts);
struct bpf_map *map;
int err = 0;
err = bpf_object__gen_loader(obj, &opts);
if (err)
return err;
load_attr.obj = obj;
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
load_attr.log_level = 1 + 2 + 4;
err = bpf_object__load_xattr(&load_attr);
if (err) {
p_err("failed to load object file");
goto out;
}
/* If there was no error during load then gen_loader_opts
* are populated with the loader program.
*/
/* finish generating 'struct skel' */
codegen("\
\n\
}; \n\
", obj_name);
codegen_attach_detach(obj, obj_name);
codegen_destroy(obj, obj_name);
codegen("\
\n\
static inline struct %1$s * \n\
%1$s__open(void) \n\
{ \n\
struct %1$s *skel; \n\
\n\
skel = calloc(sizeof(*skel), 1); \n\
if (!skel) \n\
goto cleanup; \n\
skel->ctx.sz = (void *)&skel->links - (void *)skel; \n\
",
obj_name, opts.data_sz);
bpf_object__for_each_map(map, obj) {
const char *ident;
const void *mmap_data = NULL;
size_t mmap_size = 0;
ident = get_map_ident(map);
if (!ident)
continue;
if (!bpf_map__is_internal(map) ||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
continue;
codegen("\
\n\
skel->%1$s = \n\
mmap(NULL, %2$zd, PROT_READ | PROT_WRITE,\n\
MAP_SHARED | MAP_ANONYMOUS, -1, 0); \n\
if (skel->%1$s == (void *) -1) \n\
goto cleanup; \n\
memcpy(skel->%1$s, (void *)\"\\ \n\
", ident, bpf_map_mmap_sz(map));
mmap_data = bpf_map__initial_value(map, &mmap_size);
print_hex(mmap_data, mmap_size);
printf("\", %2$zd);\n"
"\tskel->maps.%1$s.initial_value = (__u64)(long)skel->%1$s;\n",
ident, mmap_size);
}
codegen("\
\n\
return skel; \n\
cleanup: \n\
%1$s__destroy(skel); \n\
return NULL; \n\
} \n\
\n\
static inline int \n\
%1$s__load(struct %1$s *skel) \n\
{ \n\
struct bpf_load_and_run_opts opts = {}; \n\
int err; \n\
\n\
opts.ctx = (struct bpf_loader_ctx *)skel; \n\
opts.data_sz = %2$d; \n\
opts.data = (void *)\"\\ \n\
",
obj_name, opts.data_sz);
print_hex(opts.data, opts.data_sz);
codegen("\
\n\
\"; \n\
");
codegen("\
\n\
opts.insns_sz = %d; \n\
opts.insns = (void *)\"\\ \n\
",
opts.insns_sz);
print_hex(opts.insns, opts.insns_sz);
codegen("\
\n\
\"; \n\
err = bpf_load_and_run(&opts); \n\
if (err < 0) \n\
return err; \n\
", obj_name);
bpf_object__for_each_map(map, obj) {
const char *ident, *mmap_flags;
ident = get_map_ident(map);
if (!ident)
continue;
if (!bpf_map__is_internal(map) ||
!(bpf_map__def(map)->map_flags & BPF_F_MMAPABLE))
continue;
if (bpf_map__def(map)->map_flags & BPF_F_RDONLY_PROG)
mmap_flags = "PROT_READ";
else
mmap_flags = "PROT_READ | PROT_WRITE";
printf("\tskel->%1$s =\n"
"\t\tmmap(skel->%1$s, %2$zd, %3$s, MAP_SHARED | MAP_FIXED,\n"
"\t\t\tskel->maps.%1$s.map_fd, 0);\n",
ident, bpf_map_mmap_sz(map), mmap_flags);
}
codegen("\
\n\
return 0; \n\
} \n\
\n\
static inline struct %1$s * \n\
%1$s__open_and_load(void) \n\
{ \n\
struct %1$s *skel; \n\
\n\
skel = %1$s__open(); \n\
if (!skel) \n\
return NULL; \n\
if (%1$s__load(skel)) { \n\
%1$s__destroy(skel); \n\
return NULL; \n\
} \n\
return skel; \n\
} \n\
", obj_name);
codegen("\
\n\
\n\
#endif /* %s */ \n\
",
header_guard);
err = 0;
out:
return err;
}
static int do_skeleton(int argc, char **argv)
{
char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")];
@ -277,7 +605,7 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj = NULL;
const char *file, *ident;
struct bpf_program *prog;
int fd, len, err = -1;
int fd, err = -1;
struct bpf_map *map;
struct btf *btf;
struct stat st;
@ -359,7 +687,25 @@ static int do_skeleton(int argc, char **argv)
}
get_header_guard(header_guard, obj_name);
codegen("\
if (use_loader) {
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
/* THIS FILE IS AUTOGENERATED! */ \n\
#ifndef %2$s \n\
#define %2$s \n\
\n\
#include <stdlib.h> \n\
#include <bpf/bpf.h> \n\
#include <bpf/skel_internal.h> \n\
\n\
struct %1$s { \n\
struct bpf_loader_ctx ctx; \n\
",
obj_name, header_guard
);
} else {
codegen("\
\n\
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ \n\
\n\
@ -375,7 +721,8 @@ static int do_skeleton(int argc, char **argv)
struct bpf_object *obj; \n\
",
obj_name, header_guard
);
);
}
if (map_cnt) {
printf("\tstruct {\n");
@ -383,7 +730,10 @@ static int do_skeleton(int argc, char **argv)
ident = get_map_ident(map);
if (!ident)
continue;
printf("\t\tstruct bpf_map *%s;\n", ident);
if (use_loader)
printf("\t\tstruct bpf_map_desc %s;\n", ident);
else
printf("\t\tstruct bpf_map *%s;\n", ident);
}
printf("\t} maps;\n");
}
@ -391,14 +741,22 @@ static int do_skeleton(int argc, char **argv)
if (prog_cnt) {
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
printf("\t\tstruct bpf_program *%s;\n",
bpf_program__name(prog));
if (use_loader)
printf("\t\tstruct bpf_prog_desc %s;\n",
bpf_program__name(prog));
else
printf("\t\tstruct bpf_program *%s;\n",
bpf_program__name(prog));
}
printf("\t} progs;\n");
printf("\tstruct {\n");
bpf_object__for_each_program(prog, obj) {
printf("\t\tstruct bpf_link *%s;\n",
bpf_program__name(prog));
if (use_loader)
printf("\t\tint %s_fd;\n",
bpf_program__name(prog));
else
printf("\t\tstruct bpf_link *%s;\n",
bpf_program__name(prog));
}
printf("\t} links;\n");
}
@ -409,6 +767,10 @@ static int do_skeleton(int argc, char **argv)
if (err)
goto out;
}
if (use_loader) {
err = gen_trace(obj, obj_name, header_guard);
goto out;
}
codegen("\
\n\
@ -578,19 +940,7 @@ static int do_skeleton(int argc, char **argv)
file_sz);
/* embed contents of BPF object file */
for (i = 0, len = 0; i < file_sz; i++) {
int w = obj_data[i] ? 4 : 2;
len += w;
if (len > 78) {
printf("\\\n");
len = w;
}
if (!obj_data[i])
printf("\\0");
else
printf("\\x%02x", (unsigned char)obj_data[i]);
}
print_hex(obj_data, file_sz);
codegen("\
\n\
@ -636,7 +986,7 @@ static int do_object(int argc, char **argv)
while (argc) {
file = GET_ARG();
err = bpf_linker__add_file(linker, file);
err = bpf_linker__add_file(linker, file, NULL);
if (err) {
p_err("failed to link '%s': %s (%d)", file, strerror(err), err);
goto out;

View file

@ -29,6 +29,7 @@ bool show_pinned;
bool block_mount;
bool verifier_logs;
bool relaxed_maps;
bool use_loader;
struct btf *base_btf;
struct pinned_obj_table prog_table;
struct pinned_obj_table map_table;
@ -392,6 +393,7 @@ int main(int argc, char **argv)
{ "mapcompat", no_argument, NULL, 'm' },
{ "nomount", no_argument, NULL, 'n' },
{ "debug", no_argument, NULL, 'd' },
{ "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
{ 0 }
};
@ -409,7 +411,7 @@ int main(int argc, char **argv)
hash_init(link_table.table);
opterr = 0;
while ((opt = getopt_long(argc, argv, "VhpjfmndB:",
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@ -452,6 +454,9 @@ int main(int argc, char **argv)
return -1;
}
break;
case 'L':
use_loader = true;
break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)

View file

@ -90,6 +90,7 @@ extern bool show_pids;
extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
extern bool use_loader;
extern struct btf *base_btf;
extern struct pinned_obj_table prog_table;
extern struct pinned_obj_table map_table;

View file

@ -16,6 +16,7 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <dirent.h>
#include <linux/err.h>
#include <linux/perf_event.h>
@ -24,6 +25,8 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
#include <bpf/bpf_gen_internal.h>
#include <bpf/skel_internal.h>
#include "cfg.h"
#include "main.h"
@ -1499,7 +1502,7 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
set_max_rlimit();
obj = bpf_object__open_file(file, &open_opts);
if (IS_ERR_OR_NULL(obj)) {
if (libbpf_get_error(obj)) {
p_err("failed to open object file");
goto err_free_reuse_maps;
}
@ -1645,8 +1648,110 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
return -1;
}
static int count_open_fds(void)
{
DIR *dp = opendir("/proc/self/fd");
struct dirent *de;
int cnt = -3;
if (!dp)
return -1;
while ((de = readdir(dp)))
cnt++;
closedir(dp);
return cnt;
}
static int try_loader(struct gen_loader_opts *gen)
{
struct bpf_load_and_run_opts opts = {};
struct bpf_loader_ctx *ctx;
int ctx_sz = sizeof(*ctx) + 64 * max(sizeof(struct bpf_map_desc),
sizeof(struct bpf_prog_desc));
int log_buf_sz = (1u << 24) - 1;
int err, fds_before, fd_delta;
char *log_buf;
ctx = alloca(ctx_sz);
memset(ctx, 0, ctx_sz);
ctx->sz = ctx_sz;
ctx->log_level = 1;
ctx->log_size = log_buf_sz;
log_buf = malloc(log_buf_sz);
if (!log_buf)
return -ENOMEM;
ctx->log_buf = (long) log_buf;
opts.ctx = ctx;
opts.data = gen->data;
opts.data_sz = gen->data_sz;
opts.insns = gen->insns;
opts.insns_sz = gen->insns_sz;
fds_before = count_open_fds();
err = bpf_load_and_run(&opts);
fd_delta = count_open_fds() - fds_before;
if (err < 0) {
fprintf(stderr, "err %d\n%s\n%s", err, opts.errstr, log_buf);
if (fd_delta)
fprintf(stderr, "loader prog leaked %d FDs\n",
fd_delta);
}
free(log_buf);
return err;
}
static int do_loader(int argc, char **argv)
{
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
DECLARE_LIBBPF_OPTS(gen_loader_opts, gen);
struct bpf_object_load_attr load_attr = {};
struct bpf_object *obj;
const char *file;
int err = 0;
if (!REQ_ARGS(1))
return -1;
file = GET_ARG();
obj = bpf_object__open_file(file, &open_opts);
if (libbpf_get_error(obj)) {
p_err("failed to open object file");
goto err_close_obj;
}
err = bpf_object__gen_loader(obj, &gen);
if (err)
goto err_close_obj;
load_attr.obj = obj;
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
load_attr.log_level = 1 + 2 + 4;
err = bpf_object__load_xattr(&load_attr);
if (err) {
p_err("failed to load object file");
goto err_close_obj;
}
if (verifier_logs) {
struct dump_data dd = {};
kernel_syms_load(&dd);
dump_xlated_plain(&dd, (void *)gen.insns, gen.insns_sz, false, false);
kernel_syms_destroy(&dd);
}
err = try_loader(&gen);
err_close_obj:
bpf_object__close(obj);
return err;
}
static int do_load(int argc, char **argv)
{
if (use_loader)
return do_loader(argc, argv);
return load_with_options(argc, argv, true);
}

View file

@ -196,6 +196,9 @@ static const char *print_imm(void *private_data,
else if (insn->src_reg == BPF_PSEUDO_MAP_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[id:%u][0]+%u", insn->imm, (insn + 1)->imm);
else if (insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"map[idx:%u]+%u", insn->imm, (insn + 1)->imm);
else if (insn->src_reg == BPF_PSEUDO_FUNC)
snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
"subprog[%+d]", insn->imm);

View file

@ -837,6 +837,7 @@ enum bpf_cmd {
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
BPF_PROG_TEST_RUN,
BPF_PROG_RUN = BPF_PROG_TEST_RUN,
BPF_PROG_GET_NEXT_ID,
BPF_MAP_GET_NEXT_ID,
BPF_PROG_GET_FD_BY_ID,
@ -937,6 +938,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_EXT,
BPF_PROG_TYPE_LSM,
BPF_PROG_TYPE_SK_LOOKUP,
BPF_PROG_TYPE_SYSCALL, /* a program that can execute syscalls */
};
enum bpf_attach_type {
@ -1097,8 +1099,8 @@ enum bpf_link_type {
/* When BPF ldimm64's insn[0].src_reg != 0 then this can have
* the following extensions:
*
* insn[0].src_reg: BPF_PSEUDO_MAP_FD
* insn[0].imm: map fd
* insn[0].src_reg: BPF_PSEUDO_MAP_[FD|IDX]
* insn[0].imm: map fd or fd_idx
* insn[1].imm: 0
* insn[0].off: 0
* insn[1].off: 0
@ -1106,15 +1108,19 @@ enum bpf_link_type {
* verifier type: CONST_PTR_TO_MAP
*/
#define BPF_PSEUDO_MAP_FD 1
/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE
* insn[0].imm: map fd
#define BPF_PSEUDO_MAP_IDX 5
/* insn[0].src_reg: BPF_PSEUDO_MAP_[IDX_]VALUE
* insn[0].imm: map fd or fd_idx
* insn[1].imm: offset into value
* insn[0].off: 0
* insn[1].off: 0
* ldimm64 rewrite: address of map[0]+offset
* verifier type: PTR_TO_MAP_VALUE
*/
#define BPF_PSEUDO_MAP_VALUE 2
#define BPF_PSEUDO_MAP_VALUE 2
#define BPF_PSEUDO_MAP_IDX_VALUE 6
/* insn[0].src_reg: BPF_PSEUDO_BTF_ID
* insn[0].imm: kernel btd id of VAR
* insn[1].imm: 0
@ -1314,6 +1320,8 @@ union bpf_attr {
/* or valid module BTF object fd or 0 to attach to vmlinux */
__u32 attach_btf_obj_fd;
};
__u32 :32; /* pad */
__aligned_u64 fd_array; /* array of FDs */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@ -4735,6 +4743,24 @@ union bpf_attr {
* be zero-terminated except when **str_size** is 0.
*
* Or **-EBUSY** if the per-CPU memory copy buffer is busy.
*
* long bpf_sys_bpf(u32 cmd, void *attr, u32 attr_size)
* Description
* Execute bpf syscall with given arguments.
* Return
* A syscall result.
*
* long bpf_btf_find_by_name_kind(char *name, int name_sz, u32 kind, int flags)
* Description
* Find BTF type with given name and kind in vmlinux BTF or in module's BTFs.
* Return
* Returns btf_id and btf_obj_fd in lower and upper 32 bits.
*
* long bpf_sys_close(u32 fd)
* Description
* Execute close syscall for given FD.
* Return
* A syscall result.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@ -4903,6 +4929,9 @@ union bpf_attr {
FN(check_mtu), \
FN(for_each_map_elem), \
FN(snprintf), \
FN(sys_bpf), \
FN(btf_find_by_name_kind), \
FN(sys_close), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper

View file

@ -1,3 +1,3 @@
libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
btf_dump.o ringbuf.o strset.o linker.o
btf_dump.o ringbuf.o strset.o linker.o gen_loader.o

View file

@ -0,0 +1,41 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (c) 2021 Facebook */
#ifndef __BPF_GEN_INTERNAL_H
#define __BPF_GEN_INTERNAL_H
struct ksym_relo_desc {
const char *name;
int kind;
int insn_idx;
};
struct bpf_gen {
struct gen_loader_opts *opts;
void *data_start;
void *data_cur;
void *insn_start;
void *insn_cur;
ssize_t cleanup_label;
__u32 nr_progs;
__u32 nr_maps;
int log_level;
int error;
struct ksym_relo_desc *relos;
int relo_cnt;
char attach_target[128];
int attach_kind;
};
void bpf_gen__init(struct bpf_gen *gen, int log_level);
int bpf_gen__finish(struct bpf_gen *gen);
void bpf_gen__free(struct bpf_gen *gen);
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx);
struct bpf_prog_load_params;
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind, int insn_idx);
#endif

729
tools/lib/bpf/gen_loader.c Normal file
View file

@ -0,0 +1,729 @@
// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
/* Copyright (c) 2021 Facebook */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <linux/filter.h>
#include "btf.h"
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
#include "hashmap.h"
#include "bpf_gen_internal.h"
#include "skel_internal.h"
#define MAX_USED_MAPS 64
#define MAX_USED_PROGS 32
/* The following structure describes the stack layout of the loader program.
* In addition R6 contains the pointer to context.
* R7 contains the result of the last sys_bpf command (typically error or FD).
* R9 contains the result of the last sys_close command.
*
* Naming convention:
* ctx - bpf program context
* stack - bpf program stack
* blob - bpf_attr-s, strings, insns, map data.
* All the bytes that loader prog will use for read/write.
*/
struct loader_stack {
__u32 btf_fd;
__u32 map_fd[MAX_USED_MAPS];
__u32 prog_fd[MAX_USED_PROGS];
__u32 inner_map_fd;
};
#define stack_off(field) \
(__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
#define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
{
size_t off = gen->insn_cur - gen->insn_start;
void *insn_start;
if (gen->error)
return gen->error;
if (size > INT32_MAX || off + size > INT32_MAX) {
gen->error = -ERANGE;
return -ERANGE;
}
insn_start = realloc(gen->insn_start, off + size);
if (!insn_start) {
gen->error = -ENOMEM;
free(gen->insn_start);
gen->insn_start = NULL;
return -ENOMEM;
}
gen->insn_start = insn_start;
gen->insn_cur = insn_start + off;
return 0;
}
static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
{
size_t off = gen->data_cur - gen->data_start;
void *data_start;
if (gen->error)
return gen->error;
if (size > INT32_MAX || off + size > INT32_MAX) {
gen->error = -ERANGE;
return -ERANGE;
}
data_start = realloc(gen->data_start, off + size);
if (!data_start) {
gen->error = -ENOMEM;
free(gen->data_start);
gen->data_start = NULL;
return -ENOMEM;
}
gen->data_start = data_start;
gen->data_cur = data_start + off;
return 0;
}
static void emit(struct bpf_gen *gen, struct bpf_insn insn)
{
if (realloc_insn_buf(gen, sizeof(insn)))
return;
memcpy(gen->insn_cur, &insn, sizeof(insn));
gen->insn_cur += sizeof(insn);
}
static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
{
emit(gen, insn1);
emit(gen, insn2);
}
void bpf_gen__init(struct bpf_gen *gen, int log_level)
{
size_t stack_sz = sizeof(struct loader_stack);
int i;
gen->log_level = log_level;
/* save ctx pointer into R6 */
emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
/* bzero stack */
emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
/* jump over cleanup code */
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
/* size of cleanup code below */
(stack_sz / 4) * 3 + 2));
/* remember the label where all error branches will jump to */
gen->cleanup_label = gen->insn_cur - gen->insn_start;
/* emit cleanup code: close all temp FDs */
for (i = 0; i < stack_sz; i += 4) {
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
}
/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
emit(gen, BPF_EXIT_INSN());
}
static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
{
void *prev;
if (realloc_data_buf(gen, size))
return 0;
prev = gen->data_cur;
memcpy(gen->data_cur, data, size);
gen->data_cur += size;
return prev - gen->data_start;
}
static int insn_bytes_to_bpf_size(__u32 sz)
{
switch (sz) {
case 8: return BPF_DW;
case 4: return BPF_W;
case 2: return BPF_H;
case 1: return BPF_B;
default: return -1;
}
}
/* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
static void emit_rel_store(struct bpf_gen *gen, int off, int data)
{
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, data));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, off));
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
}
/* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
{
emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, off));
emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
}
static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
bool check_non_zero)
{
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
if (check_non_zero)
/* If value in ctx is zero don't update the blob.
* For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
*/
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, off));
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
}
static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
{
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, off));
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
}
static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
{
emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
}
static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
{
emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, attr));
emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
/* remember the result in R7 */
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
}
static bool is_simm16(__s64 value)
{
return value == (__s64)(__s16)value;
}
static void emit_check_err(struct bpf_gen *gen)
{
__s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
/* R7 contains result of last sys_bpf command.
* if (R7 < 0) goto cleanup;
*/
if (is_simm16(off)) {
emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
} else {
gen->error = -ERANGE;
emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
}
}
/* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
const char *fmt, va_list args)
{
char buf[1024];
int addr, len, ret;
if (!gen->log_level)
return;
ret = vsnprintf(buf, sizeof(buf), fmt, args);
if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
/* The special case to accommodate common debug_ret():
* to avoid specifying BPF_REG_7 and adding " r=%%d" to
* prints explicitly.
*/
strcat(buf, " r=%d");
len = strlen(buf) + 1;
addr = add_data(gen, buf, len);
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, addr));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
if (reg1 >= 0)
emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
if (reg2 >= 0)
emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
}
static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
emit_debug(gen, reg1, reg2, fmt, args);
va_end(args);
}
static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
emit_debug(gen, BPF_REG_7, -1, fmt, args);
va_end(args);
}
static void __emit_sys_close(struct bpf_gen *gen)
{
emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
/* 2 is the number of the following insns
* * 6 is additional insns in debug_regs
*/
2 + (gen->log_level ? 6 : 0)));
emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
}
static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
{
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
__emit_sys_close(gen);
}
static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
{
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, blob_off));
emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
__emit_sys_close(gen);
}
int bpf_gen__finish(struct bpf_gen *gen)
{
int i;
emit_sys_close_stack(gen, stack_off(btf_fd));
for (i = 0; i < gen->nr_progs; i++)
move_stack2ctx(gen,
sizeof(struct bpf_loader_ctx) +
sizeof(struct bpf_map_desc) * gen->nr_maps +
sizeof(struct bpf_prog_desc) * i +
offsetof(struct bpf_prog_desc, prog_fd), 4,
stack_off(prog_fd[i]));
for (i = 0; i < gen->nr_maps; i++)
move_stack2ctx(gen,
sizeof(struct bpf_loader_ctx) +
sizeof(struct bpf_map_desc) * i +
offsetof(struct bpf_map_desc, map_fd), 4,
stack_off(map_fd[i]));
emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
emit(gen, BPF_EXIT_INSN());
pr_debug("gen: finish %d\n", gen->error);
if (!gen->error) {
struct gen_loader_opts *opts = gen->opts;
opts->insns = gen->insn_start;
opts->insns_sz = gen->insn_cur - gen->insn_start;
opts->data = gen->data_start;
opts->data_sz = gen->data_cur - gen->data_start;
}
return gen->error;
}
void bpf_gen__free(struct bpf_gen *gen)
{
if (!gen)
return;
free(gen->data_start);
free(gen->insn_start);
free(gen);
}
void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
__u32 btf_raw_size)
{
int attr_size = offsetofend(union bpf_attr, btf_log_level);
int btf_data, btf_load_attr;
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: load_btf: size %d\n", btf_raw_size);
btf_data = add_data(gen, btf_raw_data, btf_raw_size);
attr.btf_size = btf_raw_size;
btf_load_attr = add_data(gen, &attr, attr_size);
/* populate union bpf_attr with user provided log details */
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
offsetof(struct bpf_loader_ctx, log_level), false);
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
offsetof(struct bpf_loader_ctx, log_size), false);
move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
offsetof(struct bpf_loader_ctx, log_buf), false);
/* populate union bpf_attr with a pointer to the BTF data */
emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
/* emit BTF_LOAD command */
emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
debug_ret(gen, "btf_load size %d", btf_raw_size);
emit_check_err(gen);
/* remember btf_fd in the stack, if successful */
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
}
void bpf_gen__map_create(struct bpf_gen *gen,
struct bpf_create_map_attr *map_attr, int map_idx)
{
int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
bool close_inner_map_fd = false;
int map_create_attr;
union bpf_attr attr;
memset(&attr, 0, attr_size);
attr.map_type = map_attr->map_type;
attr.key_size = map_attr->key_size;
attr.value_size = map_attr->value_size;
attr.map_flags = map_attr->map_flags;
memcpy(attr.map_name, map_attr->name,
min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
attr.numa_node = map_attr->numa_node;
attr.map_ifindex = map_attr->map_ifindex;
attr.max_entries = map_attr->max_entries;
switch (attr.map_type) {
case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
case BPF_MAP_TYPE_CGROUP_ARRAY:
case BPF_MAP_TYPE_STACK_TRACE:
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
case BPF_MAP_TYPE_DEVMAP:
case BPF_MAP_TYPE_DEVMAP_HASH:
case BPF_MAP_TYPE_CPUMAP:
case BPF_MAP_TYPE_XSKMAP:
case BPF_MAP_TYPE_SOCKMAP:
case BPF_MAP_TYPE_SOCKHASH:
case BPF_MAP_TYPE_QUEUE:
case BPF_MAP_TYPE_STACK:
case BPF_MAP_TYPE_RINGBUF:
break;
default:
attr.btf_key_type_id = map_attr->btf_key_type_id;
attr.btf_value_type_id = map_attr->btf_value_type_id;
}
pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
map_create_attr = add_data(gen, &attr, attr_size);
if (attr.btf_value_type_id)
/* populate union bpf_attr with btf_fd saved in the stack earlier */
move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
stack_off(btf_fd));
switch (attr.map_type) {
case BPF_MAP_TYPE_ARRAY_OF_MAPS:
case BPF_MAP_TYPE_HASH_OF_MAPS:
move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
stack_off(inner_map_fd));
close_inner_map_fd = true;
break;
default:
break;
}
/* conditionally update max_entries */
if (map_idx >= 0)
move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
sizeof(struct bpf_loader_ctx) +
sizeof(struct bpf_map_desc) * map_idx +
offsetof(struct bpf_map_desc, max_entries),
true /* check that max_entries != 0 */);
/* emit MAP_CREATE command */
emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
attr.map_name, map_idx, map_attr->map_type, attr.value_size,
attr.btf_value_type_id);
emit_check_err(gen);
/* remember map_fd in the stack, if successful */
if (map_idx < 0) {
/* This bpf_gen__map_create() function is called with map_idx >= 0
* for all maps that libbpf loading logic tracks.
* It's called with -1 to create an inner map.
*/
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
stack_off(inner_map_fd)));
} else if (map_idx != gen->nr_maps) {
gen->error = -EDOM; /* internal bug */
return;
} else {
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
stack_off(map_fd[map_idx])));
gen->nr_maps++;
}
if (close_inner_map_fd)
emit_sys_close_stack(gen, stack_off(inner_map_fd));
}
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
enum bpf_attach_type type)
{
const char *prefix;
int kind, ret;
btf_get_kernel_prefix_kind(type, &prefix, &kind);
gen->attach_kind = kind;
ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
prefix, attach_name);
if (ret == sizeof(gen->attach_target))
gen->error = -ENOSPC;
}
static void emit_find_attach_target(struct bpf_gen *gen)
{
int name, len = strlen(gen->attach_target) + 1;
pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
name = add_data(gen, gen->attach_target, len);
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, name));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
debug_ret(gen, "find_by_name_kind(%s,%d)",
gen->attach_target, gen->attach_kind);
emit_check_err(gen);
/* if successful, btf_id is in lower 32-bit of R7 and
* btf_obj_fd is in upper 32-bit
*/
}
void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
int insn_idx)
{
struct ksym_relo_desc *relo;
relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
if (!relo) {
gen->error = -ENOMEM;
return;
}
gen->relos = relo;
relo += gen->relo_cnt;
relo->name = name;
relo->kind = kind;
relo->insn_idx = insn_idx;
gen->relo_cnt++;
}
static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
{
int name, insn, len = strlen(relo->name) + 1;
pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
name = add_data(gen, relo->name, len);
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, name));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
emit_check_err(gen);
/* store btf_id into insn[insn_idx].imm */
insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
offsetof(struct bpf_insn, imm);
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, insn));
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
if (relo->kind == BTF_KIND_VAR) {
/* store btf_obj_fd into insn[insn_idx + 1].imm */
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
sizeof(struct bpf_insn)));
}
}
static void emit_relos(struct bpf_gen *gen, int insns)
{
int i;
for (i = 0; i < gen->relo_cnt; i++)
emit_relo(gen, gen->relos + i, insns);
}
static void cleanup_relos(struct bpf_gen *gen, int insns)
{
int i, insn;
for (i = 0; i < gen->relo_cnt; i++) {
if (gen->relos[i].kind != BTF_KIND_VAR)
continue;
/* close fd recorded in insn[insn_idx + 1].imm */
insn = insns +
sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
offsetof(struct bpf_insn, imm);
emit_sys_close_blob(gen, insn);
}
if (gen->relo_cnt) {
free(gen->relos);
gen->relo_cnt = 0;
gen->relos = NULL;
}
}
void bpf_gen__prog_load(struct bpf_gen *gen,
struct bpf_prog_load_params *load_attr, int prog_idx)
{
int attr_size = offsetofend(union bpf_attr, fd_array);
int prog_load_attr, license, insns, func_info, line_info;
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
load_attr->prog_type, load_attr->insn_cnt);
/* add license string to blob of bytes */
license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
/* add insns to blob of bytes */
insns = add_data(gen, load_attr->insns,
load_attr->insn_cnt * sizeof(struct bpf_insn));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.attach_btf_id = load_attr->attach_btf_id;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.kern_version = 0;
attr.insn_cnt = (__u32)load_attr->insn_cnt;
attr.prog_flags = load_attr->prog_flags;
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
func_info = add_data(gen, load_attr->func_info,
attr.func_info_cnt * attr.func_info_rec_size);
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
line_info = add_data(gen, load_attr->line_info,
attr.line_info_cnt * attr.line_info_rec_size);
memcpy(attr.prog_name, load_attr->name,
min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
prog_load_attr = add_data(gen, &attr, attr_size);
/* populate union bpf_attr with a pointer to license */
emit_rel_store(gen, attr_field(prog_load_attr, license), license);
/* populate union bpf_attr with a pointer to instructions */
emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
/* populate union bpf_attr with a pointer to func_info */
emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
/* populate union bpf_attr with a pointer to line_info */
emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
/* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
stack_off(map_fd[0]));
/* populate union bpf_attr with user provided log details */
move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
offsetof(struct bpf_loader_ctx, log_level), false);
move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
offsetof(struct bpf_loader_ctx, log_size), false);
move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
offsetof(struct bpf_loader_ctx, log_buf), false);
/* populate union bpf_attr with btf_fd saved in the stack earlier */
move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
stack_off(btf_fd));
if (gen->attach_kind) {
emit_find_attach_target(gen);
/* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, prog_load_attr));
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
offsetof(union bpf_attr, attach_btf_id)));
emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
offsetof(union bpf_attr, attach_btf_obj_fd)));
}
emit_relos(gen, insns);
/* emit PROG_LOAD command */
emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
cleanup_relos(gen, insns);
if (gen->attach_kind)
emit_sys_close_blob(gen,
attr_field(prog_load_attr, attach_btf_obj_fd));
emit_check_err(gen);
/* remember prog_fd in the stack, if successful */
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
stack_off(prog_fd[gen->nr_progs])));
gen->nr_progs++;
}
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
__u32 value_size)
{
int attr_size = offsetofend(union bpf_attr, flags);
int map_update_attr, value, key;
union bpf_attr attr;
int zero = 0;
memset(&attr, 0, attr_size);
pr_debug("gen: map_update_elem: idx %d\n", map_idx);
value = add_data(gen, pvalue, value_size);
key = add_data(gen, &zero, sizeof(zero));
/* if (map_desc[map_idx].initial_value)
* copy_from_user(value, initial_value, value_size);
*/
emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
sizeof(struct bpf_loader_ctx) +
sizeof(struct bpf_map_desc) * map_idx +
offsetof(struct bpf_map_desc, initial_value)));
emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
0, 0, 0, value));
emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
map_update_attr = add_data(gen, &attr, attr_size);
move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
stack_off(map_fd[map_idx]));
emit_rel_store(gen, attr_field(map_update_attr, key), key);
emit_rel_store(gen, attr_field(map_update_attr, value), value);
/* emit MAP_UPDATE_ELEM command */
emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
emit_check_err(gen);
}
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
{
int attr_size = offsetofend(union bpf_attr, map_fd);
int map_freeze_attr;
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: map_freeze: idx %d\n", map_idx);
map_freeze_attr = add_data(gen, &attr, attr_size);
move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
stack_off(map_fd[map_idx]));
/* emit MAP_FREEZE command */
emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
debug_ret(gen, "map_freeze");
emit_check_err(gen);
}

View file

@ -54,6 +54,7 @@
#include "str_error.h"
#include "libbpf_internal.h"
#include "hashmap.h"
#include "bpf_gen_internal.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
@ -178,7 +179,7 @@ enum kern_feature_id {
__FEAT_CNT,
};
static bool kernel_supports(enum kern_feature_id feat_id);
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id);
enum reloc_type {
RELO_LD64,
@ -432,6 +433,8 @@ struct bpf_object {
bool loaded;
bool has_subcalls;
struct bpf_gen *gen_loader;
/*
* Information when doing elf related work. Only valid if fd
* is valid.
@ -677,6 +680,11 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
return -LIBBPF_ERRNO__FORMAT;
}
if (sec_idx != obj->efile.text_shndx && GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
return -ENOTSUP;
}
pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
@ -700,13 +708,14 @@ bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
if (err)
return err;
/* if function is a global/weak symbol, but has hidden
* visibility (STV_HIDDEN), mark its BTF FUNC as static to
* enable more permissive BPF verification mode with more
* outside context available to BPF verifier
/* if function is a global/weak symbol, but has restricted
* (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
* as static to enable more permissive BPF verification mode
* with more outside context available to BPF verifier
*/
if (GELF_ST_BIND(sym.st_info) != STB_LOCAL
&& GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN)
&& (GELF_ST_VISIBILITY(sym.st_other) == STV_HIDDEN
|| GELF_ST_VISIBILITY(sym.st_other) == STV_INTERNAL))
prog->mark_btf_static = true;
nr_progs++;
@ -1794,7 +1803,6 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
if (!symbols)
return -EINVAL;
scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
data = elf_sec_data(obj, scn);
if (!scn || !data) {
@ -1854,6 +1862,12 @@ static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
return -LIBBPF_ERRNO__FORMAT;
}
if (GELF_ST_TYPE(sym.st_info) == STT_SECTION
|| GELF_ST_BIND(sym.st_info) == STB_LOCAL) {
pr_warn("map '%s' (legacy): static maps are not supported\n", map_name);
return -ENOTSUP;
}
map->libbpf_type = LIBBPF_MAP_UNSPEC;
map->sec_idx = sym.st_shndx;
map->sec_offset = sym.st_value;
@ -2261,6 +2275,16 @@ static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def
pr_debug("map '%s': found inner map definition.\n", map->name);
}
static const char *btf_var_linkage_str(__u32 linkage)
{
switch (linkage) {
case BTF_VAR_STATIC: return "static";
case BTF_VAR_GLOBAL_ALLOCATED: return "global";
case BTF_VAR_GLOBAL_EXTERN: return "extern";
default: return "unknown";
}
}
static int bpf_object__init_user_btf_map(struct bpf_object *obj,
const struct btf_type *sec,
int var_idx, int sec_idx,
@ -2293,10 +2317,9 @@ static int bpf_object__init_user_btf_map(struct bpf_object *obj,
map_name, btf_kind_str(var));
return -EINVAL;
}
if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
var_extra->linkage != BTF_VAR_STATIC) {
pr_warn("map '%s': unsupported var linkage %u.\n",
map_name, var_extra->linkage);
if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
pr_warn("map '%s': unsupported map linkage %s.\n",
map_name, btf_var_linkage_str(var_extra->linkage));
return -EOPNOTSUPP;
}
@ -2443,20 +2466,20 @@ static bool section_have_execinstr(struct bpf_object *obj, int idx)
static bool btf_needs_sanitization(struct bpf_object *obj)
{
bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
bool has_float = kernel_supports(FEAT_BTF_FLOAT);
bool has_func = kernel_supports(FEAT_BTF_FUNC);
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
return !has_func || !has_datasec || !has_func_global || !has_float;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
{
bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
bool has_float = kernel_supports(FEAT_BTF_FLOAT);
bool has_func = kernel_supports(FEAT_BTF_FUNC);
bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
struct btf_type *t;
int i, j, vlen;
@ -2637,7 +2660,7 @@ static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
int err;
/* btf_vmlinux could be loaded earlier */
if (obj->btf_vmlinux)
if (obj->btf_vmlinux || obj->gen_loader)
return 0;
if (!force && !obj_needs_vmlinux_btf(obj))
@ -2662,7 +2685,7 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
if (!obj->btf)
return 0;
if (!kernel_supports(FEAT_BTF)) {
if (!kernel_supports(obj, FEAT_BTF)) {
if (kernel_needs_btf(obj)) {
err = -EOPNOTSUPP;
goto report;
@ -2719,7 +2742,20 @@ static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
bpf_object__sanitize_btf(obj, kern_btf);
}
err = btf__load(kern_btf);
if (obj->gen_loader) {
__u32 raw_size = 0;
const void *raw_data = btf__get_raw_data(kern_btf, &raw_size);
if (!raw_data)
return -ENOMEM;
bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
/* Pretend to have valid FD to pass various fd >= 0 checks.
* This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
*/
btf__set_fd(kern_btf, 0);
} else {
err = btf__load(kern_btf);
}
if (sanitize) {
if (!err) {
/* move fd to libbpf's BTF */
@ -4290,11 +4326,17 @@ static struct kern_feature_desc {
},
};
static bool kernel_supports(enum kern_feature_id feat_id)
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
{
struct kern_feature_desc *feat = &feature_probes[feat_id];
int ret;
if (obj->gen_loader)
/* To generate loader program assume the latest kernel
* to avoid doing extra prog_load, map_create syscalls.
*/
return true;
if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
ret = feat->probe();
if (ret > 0) {
@ -4377,6 +4419,13 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
char *cp, errmsg[STRERR_BUFSIZE];
int err, zero = 0;
if (obj->gen_loader) {
bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
map->mmaped, map->def.value_size);
if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
return 0;
}
err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
if (err) {
err = -errno;
@ -4402,14 +4451,14 @@ bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
static void bpf_map__destroy(struct bpf_map *map);
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
{
struct bpf_create_map_attr create_attr;
struct bpf_map_def *def = &map->def;
memset(&create_attr, 0, sizeof(create_attr));
if (kernel_supports(FEAT_PROG_NAME))
if (kernel_supports(obj, FEAT_PROG_NAME))
create_attr.name = map->name;
create_attr.map_ifindex = map->map_ifindex;
create_attr.map_type = def->type;
@ -4450,7 +4499,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
if (map->inner_map) {
int err;
err = bpf_object__create_map(obj, map->inner_map);
err = bpf_object__create_map(obj, map->inner_map, true);
if (err) {
pr_warn("map '%s': failed to create inner map: %d\n",
map->name, err);
@ -4462,7 +4511,15 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
create_attr.inner_map_fd = map->inner_map_fd;
}
map->fd = bpf_create_map_xattr(&create_attr);
if (obj->gen_loader) {
bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps);
/* Pretend to have valid FD to pass various fd >= 0 checks.
* This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
*/
map->fd = 0;
} else {
map->fd = bpf_create_map_xattr(&create_attr);
}
if (map->fd < 0 && (create_attr.btf_key_type_id ||
create_attr.btf_value_type_id)) {
char *cp, errmsg[STRERR_BUFSIZE];
@ -4483,6 +4540,8 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return -errno;
if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
if (obj->gen_loader)
map->inner_map->fd = -1;
bpf_map__destroy(map->inner_map);
zfree(&map->inner_map);
}
@ -4490,11 +4549,11 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
return 0;
}
static int init_map_slots(struct bpf_map *map)
static int init_map_slots(struct bpf_object *obj, struct bpf_map *map)
{
const struct bpf_map *targ_map;
unsigned int i;
int fd, err;
int fd, err = 0;
for (i = 0; i < map->init_slots_sz; i++) {
if (!map->init_slots[i])
@ -4502,7 +4561,13 @@ static int init_map_slots(struct bpf_map *map)
targ_map = map->init_slots[i];
fd = bpf_map__fd(targ_map);
err = bpf_map_update_elem(map->fd, &i, &fd, 0);
if (obj->gen_loader) {
pr_warn("// TODO map_update_elem: idx %ld key %d value==map_idx %ld\n",
map - obj->maps, i, targ_map - obj->maps);
return -ENOTSUP;
} else {
err = bpf_map_update_elem(map->fd, &i, &fd, 0);
}
if (err) {
err = -errno;
pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
@ -4544,7 +4609,7 @@ bpf_object__create_maps(struct bpf_object *obj)
pr_debug("map '%s': skipping creation (preset fd=%d)\n",
map->name, map->fd);
} else {
err = bpf_object__create_map(obj, map);
err = bpf_object__create_map(obj, map, false);
if (err)
goto err_out;
@ -4560,7 +4625,7 @@ bpf_object__create_maps(struct bpf_object *obj)
}
if (map->init_slots_sz) {
err = init_map_slots(map);
err = init_map_slots(obj, map);
if (err < 0) {
zclose(map->fd);
goto err_out;
@ -4970,11 +5035,14 @@ static int load_module_btfs(struct bpf_object *obj)
if (obj->btf_modules_loaded)
return 0;
if (obj->gen_loader)
return 0;
/* don't do this again, even if we find no module BTFs */
obj->btf_modules_loaded = true;
/* kernel too old to support module BTFs */
if (!kernel_supports(FEAT_MODULE_BTF))
if (!kernel_supports(obj, FEAT_MODULE_BTF))
return 0;
while (true) {
@ -6117,6 +6185,12 @@ static int bpf_core_apply_relo(struct bpf_program *prog,
if (str_is_empty(spec_str))
return -EINVAL;
if (prog->obj->gen_loader) {
pr_warn("// TODO core_relo: prog %ld insn[%d] %s %s kind %d\n",
prog - prog->obj->programs, relo->insn_off / 8,
local_name, spec_str, relo->kind);
return -ENOTSUP;
}
err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
if (err) {
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
@ -6368,19 +6442,34 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
switch (relo->type) {
case RELO_LD64:
insn[0].src_reg = BPF_PSEUDO_MAP_FD;
insn[0].imm = obj->maps[relo->map_idx].fd;
if (obj->gen_loader) {
insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
insn[0].imm = relo->map_idx;
} else {
insn[0].src_reg = BPF_PSEUDO_MAP_FD;
insn[0].imm = obj->maps[relo->map_idx].fd;
}
break;
case RELO_DATA:
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[1].imm = insn[0].imm + relo->sym_off;
insn[0].imm = obj->maps[relo->map_idx].fd;
if (obj->gen_loader) {
insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
insn[0].imm = relo->map_idx;
} else {
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[0].imm = obj->maps[relo->map_idx].fd;
}
break;
case RELO_EXTERN_VAR:
ext = &obj->externs[relo->sym_off];
if (ext->type == EXT_KCFG) {
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
if (obj->gen_loader) {
insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
insn[0].imm = obj->kconfig_map_idx;
} else {
insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
}
insn[1].imm = ext->kcfg.data_off;
} else /* EXT_KSYM */ {
if (ext->ksym.type_id) { /* typed ksyms */
@ -6399,11 +6488,15 @@ bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
insn[0].imm = ext->ksym.kernel_btf_id;
break;
case RELO_SUBPROG_ADDR:
insn[0].src_reg = BPF_PSEUDO_FUNC;
/* will be handled as a follow up pass */
if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
pr_warn("prog '%s': relo #%d: bad insn\n",
prog->name, i);
return -EINVAL;
}
/* handled already */
break;
case RELO_CALL:
/* will be handled as a follow up pass */
/* handled already */
break;
default:
pr_warn("prog '%s': relo #%d: bad relo type %d\n",
@ -6494,7 +6587,7 @@ reloc_prog_func_and_line_info(const struct bpf_object *obj,
/* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
* supprot func/line info
*/
if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
return 0;
/* only attempt func info relocation if main program's func_info
@ -6572,6 +6665,30 @@ static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, si
sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
}
static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
{
int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
struct reloc_desc *relos;
int i;
if (main_prog == subprog)
return 0;
relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
if (!relos)
return -ENOMEM;
memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
sizeof(*relos) * subprog->nr_reloc);
for (i = main_prog->nr_reloc; i < new_cnt; i++)
relos[i].insn_idx += subprog->sub_insn_off;
/* After insn_idx adjustment the 'relos' array is still sorted
* by insn_idx and doesn't break bsearch.
*/
main_prog->reloc_desc = relos;
main_prog->nr_reloc = new_cnt;
return 0;
}
static int
bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
struct bpf_program *prog)
@ -6592,6 +6709,11 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
continue;
relo = find_prog_insn_relo(prog, insn_idx);
if (relo && relo->type == RELO_EXTERN_FUNC)
/* kfunc relocations will be handled later
* in bpf_object__relocate_data()
*/
continue;
if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
prog->name, insn_idx, relo->type);
@ -6666,6 +6788,10 @@ bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
main_prog->name, subprog->insns_cnt, subprog->name);
/* The subprog insns are now appended. Append its relos too. */
err = append_subprog_relos(main_prog, subprog);
if (err)
return err;
err = bpf_object__reloc_code(obj, main_prog, subprog);
if (err)
return err;
@ -6795,11 +6921,25 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
return 0;
}
static void
bpf_object__free_relocs(struct bpf_object *obj)
{
struct bpf_program *prog;
int i;
/* free up relocation descriptors */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
zfree(&prog->reloc_desc);
prog->nr_reloc = 0;
}
}
static int
bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
{
struct bpf_program *prog;
size_t i;
size_t i, j;
int err;
if (obj->btf_ext) {
@ -6810,23 +6950,32 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
/* relocate data references first for all programs and sub-programs,
* as they don't change relative to code locations, so subsequent
* subprogram processing won't need to re-calculate any of them
/* Before relocating calls pre-process relocations and mark
* few ld_imm64 instructions that points to subprogs.
* Otherwise bpf_object__reloc_code() later would have to consider
* all ld_imm64 insns as relocation candidates. That would
* reduce relocation speed, since amount of find_prog_insn_relo()
* would increase and most of them will fail to find a relo.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
err = bpf_object__relocate_data(obj, prog);
if (err) {
pr_warn("prog '%s': failed to relocate data references: %d\n",
prog->name, err);
return err;
for (j = 0; j < prog->nr_reloc; j++) {
struct reloc_desc *relo = &prog->reloc_desc[j];
struct bpf_insn *insn = &prog->insns[relo->insn_idx];
/* mark the insn, so it's recognized by insn_is_pseudo_func() */
if (relo->type == RELO_SUBPROG_ADDR)
insn[0].src_reg = BPF_PSEUDO_FUNC;
}
}
/* now relocate subprogram calls and append used subprograms to main
/* relocate subprogram calls and append used subprograms to main
* programs; each copy of subprogram code needs to be relocated
* differently for each main program, because its code location might
* have changed
* have changed.
* Append subprog relos to main programs to allow data relos to be
* processed after text is completely relocated.
*/
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
@ -6843,12 +6992,20 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
/* free up relocation descriptors */
/* Process data relos for main programs */
for (i = 0; i < obj->nr_programs; i++) {
prog = &obj->programs[i];
zfree(&prog->reloc_desc);
prog->nr_reloc = 0;
if (prog_is_subprog(obj, prog))
continue;
err = bpf_object__relocate_data(obj, prog);
if (err) {
pr_warn("prog '%s': failed to relocate data references: %d\n",
prog->name, err);
return err;
}
}
if (!obj->gen_loader)
bpf_object__free_relocs(obj);
return 0;
}
@ -7037,6 +7194,9 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
enum bpf_func_id func_id;
int i;
if (obj->gen_loader)
return 0;
for (i = 0; i < prog->insns_cnt; i++, insn++) {
if (!insn_is_helper_call(insn, &func_id))
continue;
@ -7048,12 +7208,12 @@ static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program
switch (func_id) {
case BPF_FUNC_probe_read_kernel:
case BPF_FUNC_probe_read_user:
if (!kernel_supports(FEAT_PROBE_READ_KERN))
if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read;
break;
case BPF_FUNC_probe_read_kernel_str:
case BPF_FUNC_probe_read_user_str:
if (!kernel_supports(FEAT_PROBE_READ_KERN))
if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
insn->imm = BPF_FUNC_probe_read_str;
break;
default:
@ -7088,12 +7248,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.prog_type = prog->type;
/* old kernels might not support specifying expected_attach_type */
if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
if (!kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
prog->sec_def->is_exp_attach_type_optional)
load_attr.expected_attach_type = 0;
else
load_attr.expected_attach_type = prog->expected_attach_type;
if (kernel_supports(FEAT_PROG_NAME))
if (kernel_supports(prog->obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
@ -7109,7 +7269,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
/* specify func_info/line_info only if kernel supports them */
btf_fd = bpf_object__btf_fd(prog->obj);
if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
if (btf_fd >= 0 && kernel_supports(prog->obj, FEAT_BTF_FUNC)) {
load_attr.prog_btf_fd = btf_fd;
load_attr.func_info = prog->func_info;
load_attr.func_info_rec_size = prog->func_info_rec_size;
@ -7121,6 +7281,12 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
load_attr.log_level = prog->log_level;
load_attr.prog_flags = prog->prog_flags;
if (prog->obj->gen_loader) {
bpf_gen__prog_load(prog->obj->gen_loader, &load_attr,
prog - prog->obj->programs);
*pfd = -1;
return 0;
}
retry_load:
if (log_buf_size) {
log_buf = malloc(log_buf_size);
@ -7139,7 +7305,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
pr_debug("verifier log:\n%s", log_buf);
if (prog->obj->rodata_map_idx >= 0 &&
kernel_supports(FEAT_PROG_BIND_MAP)) {
kernel_supports(prog->obj, FEAT_PROG_BIND_MAP)) {
struct bpf_map *rodata_map =
&prog->obj->maps[prog->obj->rodata_map_idx];
@ -7198,6 +7364,38 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
return ret;
}
static int bpf_program__record_externs(struct bpf_program *prog)
{
struct bpf_object *obj = prog->obj;
int i;
for (i = 0; i < prog->nr_reloc; i++) {
struct reloc_desc *relo = &prog->reloc_desc[i];
struct extern_desc *ext = &obj->externs[relo->sym_off];
switch (relo->type) {
case RELO_EXTERN_VAR:
if (ext->type != EXT_KSYM)
continue;
if (!ext->ksym.type_id) {
pr_warn("typeless ksym %s is not supported yet\n",
ext->name);
return -ENOTSUP;
}
bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_VAR,
relo->insn_idx);
break;
case RELO_EXTERN_FUNC:
bpf_gen__record_extern(obj->gen_loader, ext->name, BTF_KIND_FUNC,
relo->insn_idx);
break;
default:
continue;
}
}
return 0;
}
static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
@ -7243,6 +7441,8 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
prog->name, prog->instances.nr);
}
if (prog->obj->gen_loader)
bpf_program__record_externs(prog);
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
if (!err)
@ -7319,6 +7519,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
if (err)
return err;
}
if (obj->gen_loader)
bpf_object__free_relocs(obj);
return 0;
}
@ -7497,11 +7699,11 @@ static int bpf_object__sanitize_maps(struct bpf_object *obj)
bpf_object__for_each_map(m, obj) {
if (!bpf_map__is_internal(m))
continue;
if (!kernel_supports(FEAT_GLOBAL_DATA)) {
if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) {
pr_warn("kernel doesn't support global data\n");
return -ENOTSUP;
}
if (!kernel_supports(FEAT_ARRAY_MMAP))
if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
m->def.map_flags ^= BPF_F_MMAPABLE;
}
@ -7699,6 +7901,12 @@ static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
if (ext->type != EXT_KSYM || !ext->ksym.type_id)
continue;
if (obj->gen_loader) {
ext->is_set = true;
ext->ksym.kernel_btf_obj_fd = 0;
ext->ksym.kernel_btf_id = 0;
continue;
}
t = btf__type_by_id(obj->btf, ext->btf_id);
if (btf_is_var(t))
err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
@ -7813,6 +8021,9 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
return -EINVAL;
}
if (obj->gen_loader)
bpf_gen__init(obj->gen_loader, attr->log_level);
err = bpf_object__probe_loading(obj);
err = err ? : bpf_object__load_vmlinux_btf(obj, false);
err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
@ -7823,6 +8034,15 @@ int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
err = err ? : bpf_object__load_progs(obj, attr->log_level);
if (obj->gen_loader) {
/* reset FDs */
btf__set_fd(obj->btf, -1);
for (i = 0; i < obj->nr_maps; i++)
obj->maps[i].fd = -1;
if (!err)
err = bpf_gen__finish(obj->gen_loader);
}
/* clean up module BTFs */
for (i = 0; i < obj->btf_module_cnt; i++) {
close(obj->btf_modules[i].fd);
@ -8448,6 +8668,7 @@ void bpf_object__close(struct bpf_object *obj)
if (obj->clear_priv)
obj->clear_priv(obj, obj->priv);
bpf_gen__free(obj->gen_loader);
bpf_object__elf_finish(obj);
bpf_object__unload(obj);
btf__free(obj->btf);
@ -8538,6 +8759,22 @@ void *bpf_object__priv(const struct bpf_object *obj)
return obj ? obj->priv : ERR_PTR(-EINVAL);
}
int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
{
struct bpf_gen *gen;
if (!opts)
return -EFAULT;
if (!OPTS_VALID(opts, gen_loader_opts))
return -EINVAL;
gen = calloc(sizeof(*gen), 1);
if (!gen)
return -ENOMEM;
gen->opts = opts;
obj->gen_loader = gen;
return 0;
}
static struct bpf_program *
__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
bool forward)
@ -8884,6 +9121,8 @@ static const struct bpf_sec_def section_defs[] = {
.expected_attach_type = BPF_TRACE_ITER,
.is_attach_btf = true,
.attach_fn = attach_iter),
SEC_DEF("syscall", SYSCALL,
.is_sleepable = true),
BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
BPF_XDP_DEVMAP),
BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
@ -9173,6 +9412,28 @@ static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
#define BTF_ITER_PREFIX "bpf_iter_"
#define BTF_MAX_NAME_SIZE 128
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
const char **prefix, int *kind)
{
switch (attach_type) {
case BPF_TRACE_RAW_TP:
*prefix = BTF_TRACE_PREFIX;
*kind = BTF_KIND_TYPEDEF;
break;
case BPF_LSM_MAC:
*prefix = BTF_LSM_PREFIX;
*kind = BTF_KIND_FUNC;
break;
case BPF_TRACE_ITER:
*prefix = BTF_ITER_PREFIX;
*kind = BTF_KIND_FUNC;
break;
default:
*prefix = "";
*kind = BTF_KIND_FUNC;
}
}
static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
const char *name, __u32 kind)
{
@ -9193,21 +9454,11 @@ static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
static inline int find_attach_btf_id(struct btf *btf, const char *name,
enum bpf_attach_type attach_type)
{
int err;
const char *prefix;
int kind;
if (attach_type == BPF_TRACE_RAW_TP)
err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
BTF_KIND_TYPEDEF);
else if (attach_type == BPF_LSM_MAC)
err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
BTF_KIND_FUNC);
else if (attach_type == BPF_TRACE_ITER)
err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
BTF_KIND_FUNC);
else
err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
return err;
btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
return find_btf_by_prefix_kind(btf, prefix, name, kind);
}
int libbpf_find_vmlinux_btf_id(const char *name,
@ -9306,7 +9557,7 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
__u32 attach_prog_fd = prog->attach_prog_fd;
const char *name = prog->sec_name, *attach_name;
const struct bpf_sec_def *sec = NULL;
int i, err;
int i, err = 0;
if (!name)
return -EINVAL;
@ -9341,7 +9592,13 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd,
}
/* kernel/module BTF ID */
err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
if (prog->obj->gen_loader) {
bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
*btf_obj_fd = 0;
*btf_type_id = 1;
} else {
err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
}
if (err) {
pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
return err;
@ -9498,6 +9755,14 @@ int bpf_map__set_initial_value(struct bpf_map *map,
return 0;
}
const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
{
if (!map->mmaped)
return NULL;
*psize = map->def.value_size;
return map->mmaped;
}
bool bpf_map__is_offload_neutral(const struct bpf_map *map)
{
return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;

View file

@ -471,6 +471,7 @@ LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
LIBBPF_API void *bpf_map__priv(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_initial_value(struct bpf_map *map,
const void *data, size_t size);
LIBBPF_API const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize);
LIBBPF_API bool bpf_map__is_offload_neutral(const struct bpf_map *map);
LIBBPF_API bool bpf_map__is_internal(const struct bpf_map *map);
LIBBPF_API int bpf_map__set_pin_path(struct bpf_map *map, const char *path);
@ -498,6 +499,7 @@ LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
/* XDP related API */
struct xdp_link_info {
__u32 prog_id;
__u32 drv_prog_id;
@ -520,6 +522,49 @@ LIBBPF_API int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags);
LIBBPF_API int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags);
/* TC related API */
enum bpf_tc_attach_point {
BPF_TC_INGRESS = 1 << 0,
BPF_TC_EGRESS = 1 << 1,
BPF_TC_CUSTOM = 1 << 2,
};
#define BPF_TC_PARENT(a, b) \
((((a) << 16) & 0xFFFF0000U) | ((b) & 0x0000FFFFU))
enum bpf_tc_flags {
BPF_TC_F_REPLACE = 1 << 0,
};
struct bpf_tc_hook {
size_t sz;
int ifindex;
enum bpf_tc_attach_point attach_point;
__u32 parent;
size_t :0;
};
#define bpf_tc_hook__last_field parent
struct bpf_tc_opts {
size_t sz;
int prog_fd;
__u32 flags;
__u32 prog_id;
__u32 handle;
__u32 priority;
size_t :0;
};
#define bpf_tc_opts__last_field priority
LIBBPF_API int bpf_tc_hook_create(struct bpf_tc_hook *hook);
LIBBPF_API int bpf_tc_hook_destroy(struct bpf_tc_hook *hook);
LIBBPF_API int bpf_tc_attach(const struct bpf_tc_hook *hook,
struct bpf_tc_opts *opts);
LIBBPF_API int bpf_tc_detach(const struct bpf_tc_hook *hook,
const struct bpf_tc_opts *opts);
LIBBPF_API int bpf_tc_query(const struct bpf_tc_hook *hook,
struct bpf_tc_opts *opts);
/* Ring buffer APIs */
struct ring_buffer;
@ -756,6 +801,18 @@ LIBBPF_API int bpf_object__attach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__detach_skeleton(struct bpf_object_skeleton *s);
LIBBPF_API void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s);
struct gen_loader_opts {
size_t sz; /* size of this struct, for forward/backward compatiblity */
const char *data;
const char *insns;
__u32 data_sz;
__u32 insns_sz;
};
#define gen_loader_opts__last_field insns_sz
LIBBPF_API int bpf_object__gen_loader(struct bpf_object *obj,
struct gen_loader_opts *opts);
enum libbpf_tristate {
TRI_NO = 0,
TRI_YES = 1,
@ -768,10 +825,18 @@ struct bpf_linker_opts {
};
#define bpf_linker_opts__last_field sz
struct bpf_linker_file_opts {
/* size of this struct, for forward/backward compatiblity */
size_t sz;
};
#define bpf_linker_file_opts__last_field sz
struct bpf_linker;
LIBBPF_API struct bpf_linker *bpf_linker__new(const char *filename, struct bpf_linker_opts *opts);
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker, const char *filename);
LIBBPF_API int bpf_linker__add_file(struct bpf_linker *linker,
const char *filename,
const struct bpf_linker_file_opts *opts);
LIBBPF_API int bpf_linker__finalize(struct bpf_linker *linker);
LIBBPF_API void bpf_linker__free(struct bpf_linker *linker);

View file

@ -359,6 +359,13 @@ LIBBPF_0.4.0 {
bpf_linker__finalize;
bpf_linker__free;
bpf_linker__new;
bpf_map__initial_value;
bpf_map__inner_map;
bpf_object__gen_loader;
bpf_object__set_kversion;
bpf_tc_attach;
bpf_tc_detach;
bpf_tc_hook_create;
bpf_tc_hook_destroy;
bpf_tc_query;
} LIBBPF_0.3.0;

View file

@ -258,6 +258,8 @@ int bpf_object__section_size(const struct bpf_object *obj, const char *name,
int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
__u32 *off);
struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf);
void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
const char **prefix, int *kind);
struct btf_ext_info {
/*

View file

@ -158,7 +158,9 @@ struct bpf_linker {
static int init_output_elf(struct bpf_linker *linker, const char *file);
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj);
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts,
struct src_obj *obj);
static int linker_sanity_check_elf(struct src_obj *obj);
static int linker_sanity_check_elf_symtab(struct src_obj *obj, struct src_sec *sec);
static int linker_sanity_check_elf_relos(struct src_obj *obj, struct src_sec *sec);
@ -435,15 +437,19 @@ static int init_output_elf(struct bpf_linker *linker, const char *file)
return 0;
}
int bpf_linker__add_file(struct bpf_linker *linker, const char *filename)
int bpf_linker__add_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts)
{
struct src_obj obj = {};
int err = 0;
if (!OPTS_VALID(opts, bpf_linker_file_opts))
return -EINVAL;
if (!linker->elf)
return -EINVAL;
err = err ?: linker_load_obj_file(linker, filename, &obj);
err = err ?: linker_load_obj_file(linker, filename, opts, &obj);
err = err ?: linker_append_sec_data(linker, &obj);
err = err ?: linker_append_elf_syms(linker, &obj);
err = err ?: linker_append_elf_relos(linker, &obj);
@ -529,7 +535,9 @@ static struct src_sec *add_src_sec(struct src_obj *obj, const char *sec_name)
return sec;
}
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename, struct src_obj *obj)
static int linker_load_obj_file(struct bpf_linker *linker, const char *filename,
const struct bpf_linker_file_opts *opts,
struct src_obj *obj)
{
#if __BYTE_ORDER == __LITTLE_ENDIAN
const int host_endianness = ELFDATA2LSB;
@ -1780,7 +1788,7 @@ static void sym_update_visibility(Elf64_Sym *sym, int sym_vis)
/* libelf doesn't provide setters for ST_VISIBILITY,
* but it is stored in the lower 2 bits of st_other
*/
sym->st_other &= 0x03;
sym->st_other &= ~0x03;
sym->st_other |= sym_vis;
}

View file

@ -4,7 +4,10 @@
#include <stdlib.h>
#include <memory.h>
#include <unistd.h>
#include <arpa/inet.h>
#include <linux/bpf.h>
#include <linux/if_ether.h>
#include <linux/pkt_cls.h>
#include <linux/rtnetlink.h>
#include <sys/socket.h>
#include <errno.h>
@ -73,9 +76,20 @@ static int libbpf_netlink_open(__u32 *nl_pid)
return ret;
}
static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
__dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
void *cookie)
static void libbpf_netlink_close(int sock)
{
close(sock);
}
enum {
NL_CONT,
NL_NEXT,
NL_DONE,
};
static int libbpf_netlink_recv(int sock, __u32 nl_pid, int seq,
__dump_nlmsg_t _fn, libbpf_dump_nlmsg_t fn,
void *cookie)
{
bool multipart = true;
struct nlmsgerr *err;
@ -84,6 +98,7 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
int len, ret;
while (multipart) {
start:
multipart = false;
len = recv(sock, buf, sizeof(buf), 0);
if (len < 0) {
@ -121,8 +136,16 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
}
if (_fn) {
ret = _fn(nh, fn, cookie);
if (ret)
switch (ret) {
case NL_CONT:
break;
case NL_NEXT:
goto start;
case NL_DONE:
return 0;
default:
return ret;
}
}
}
}
@ -131,74 +154,74 @@ static int bpf_netlink_recv(int sock, __u32 nl_pid, int seq,
return ret;
}
static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
__u32 flags)
static int libbpf_netlink_send_recv(struct nlmsghdr *nh,
__dump_nlmsg_t parse_msg,
libbpf_dump_nlmsg_t parse_attr,
void *cookie)
{
int sock, seq = 0, ret;
struct nlattr *nla, *nla_xdp;
struct {
struct nlmsghdr nh;
struct ifinfomsg ifinfo;
char attrbuf[64];
} req;
__u32 nl_pid = 0;
int sock, ret;
sock = libbpf_netlink_open(&nl_pid);
if (sock < 0)
return sock;
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
req.nh.nlmsg_type = RTM_SETLINK;
req.nh.nlmsg_pid = 0;
req.nh.nlmsg_seq = ++seq;
req.ifinfo.ifi_family = AF_UNSPEC;
req.ifinfo.ifi_index = ifindex;
nh->nlmsg_pid = 0;
nh->nlmsg_seq = time(NULL);
/* started nested attribute for XDP */
nla = (struct nlattr *)(((char *)&req)
+ NLMSG_ALIGN(req.nh.nlmsg_len));
nla->nla_type = NLA_F_NESTED | IFLA_XDP;
nla->nla_len = NLA_HDRLEN;
/* add XDP fd */
nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
nla_xdp->nla_type = IFLA_XDP_FD;
nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
nla->nla_len += nla_xdp->nla_len;
/* if user passed in any flags, add those too */
if (flags) {
nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
nla_xdp->nla_type = IFLA_XDP_FLAGS;
nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
nla->nla_len += nla_xdp->nla_len;
}
if (flags & XDP_FLAGS_REPLACE) {
nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
nla_xdp->nla_type = IFLA_XDP_EXPECTED_FD;
nla_xdp->nla_len = NLA_HDRLEN + sizeof(old_fd);
memcpy((char *)nla_xdp + NLA_HDRLEN, &old_fd, sizeof(old_fd));
nla->nla_len += nla_xdp->nla_len;
}
req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
if (send(sock, nh, nh->nlmsg_len, 0) < 0) {
ret = -errno;
goto cleanup;
goto out;
}
ret = bpf_netlink_recv(sock, nl_pid, seq, NULL, NULL, NULL);
cleanup:
close(sock);
ret = libbpf_netlink_recv(sock, nl_pid, nh->nlmsg_seq,
parse_msg, parse_attr, cookie);
out:
libbpf_netlink_close(sock);
return ret;
}
static int __bpf_set_link_xdp_fd_replace(int ifindex, int fd, int old_fd,
__u32 flags)
{
struct nlattr *nla;
int ret;
struct {
struct nlmsghdr nh;
struct ifinfomsg ifinfo;
char attrbuf[64];
} req;
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
req.nh.nlmsg_type = RTM_SETLINK;
req.ifinfo.ifi_family = AF_UNSPEC;
req.ifinfo.ifi_index = ifindex;
nla = nlattr_begin_nested(&req.nh, sizeof(req), IFLA_XDP);
if (!nla)
return -EMSGSIZE;
ret = nlattr_add(&req.nh, sizeof(req), IFLA_XDP_FD, &fd, sizeof(fd));
if (ret < 0)
return ret;
if (flags) {
ret = nlattr_add(&req.nh, sizeof(req), IFLA_XDP_FLAGS, &flags,
sizeof(flags));
if (ret < 0)
return ret;
}
if (flags & XDP_FLAGS_REPLACE) {
ret = nlattr_add(&req.nh, sizeof(req), IFLA_XDP_EXPECTED_FD,
&old_fd, sizeof(old_fd));
if (ret < 0)
return ret;
}
nlattr_end_nested(&req.nh, nla);
return libbpf_netlink_send_recv(&req.nh, NULL, NULL, NULL);
}
int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
const struct bpf_xdp_set_link_opts *opts)
{
@ -212,9 +235,7 @@ int bpf_set_link_xdp_fd_opts(int ifindex, int fd, __u32 flags,
flags |= XDP_FLAGS_REPLACE;
}
return __bpf_set_link_xdp_fd_replace(ifindex, fd,
old_fd,
flags);
return __bpf_set_link_xdp_fd_replace(ifindex, fd, old_fd, flags);
}
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags)
@ -231,6 +252,7 @@ static int __dump_link_nlmsg(struct nlmsghdr *nlh,
len = nlh->nlmsg_len - NLMSG_LENGTH(sizeof(*ifi));
attr = (struct nlattr *) ((void *) ifi + NLMSG_ALIGN(sizeof(*ifi)));
if (libbpf_nla_parse(tb, IFLA_MAX, attr, len, NULL) != 0)
return -LIBBPF_ERRNO__NLPARSE;
@ -282,16 +304,21 @@ static int get_xdp_info(void *cookie, void *msg, struct nlattr **tb)
return 0;
}
static int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie);
int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
size_t info_size, __u32 flags)
{
struct xdp_id_md xdp_id = {};
int sock, ret;
__u32 nl_pid = 0;
__u32 mask;
int ret;
struct {
struct nlmsghdr nh;
struct ifinfomsg ifm;
} req = {
.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
.nh.nlmsg_type = RTM_GETLINK,
.nh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
.ifm.ifi_family = AF_PACKET,
};
if (flags & ~XDP_FLAGS_MASK || !info_size)
return -EINVAL;
@ -302,14 +329,11 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
if (flags && flags & mask)
return -EINVAL;
sock = libbpf_netlink_open(&nl_pid);
if (sock < 0)
return sock;
xdp_id.ifindex = ifindex;
xdp_id.flags = flags;
ret = libbpf_nl_get_link(sock, nl_pid, get_xdp_info, &xdp_id);
ret = libbpf_netlink_send_recv(&req.nh, __dump_link_nlmsg,
get_xdp_info, &xdp_id);
if (!ret) {
size_t sz = min(info_size, sizeof(xdp_id.info));
@ -317,7 +341,6 @@ int bpf_get_link_xdp_info(int ifindex, struct xdp_link_info *info,
memset((void *) info + sz, 0, info_size - sz);
}
close(sock);
return ret;
}
@ -349,24 +372,403 @@ int bpf_get_link_xdp_id(int ifindex, __u32 *prog_id, __u32 flags)
return ret;
}
int libbpf_nl_get_link(int sock, unsigned int nl_pid,
libbpf_dump_nlmsg_t dump_link_nlmsg, void *cookie)
typedef int (*qdisc_config_t)(struct nlmsghdr *nh, struct tcmsg *t,
size_t maxsz);
static int clsact_config(struct nlmsghdr *nh, struct tcmsg *t, size_t maxsz)
{
struct {
struct nlmsghdr nlh;
struct ifinfomsg ifm;
} req = {
.nlh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
.nlh.nlmsg_type = RTM_GETLINK,
.nlh.nlmsg_flags = NLM_F_DUMP | NLM_F_REQUEST,
.ifm.ifi_family = AF_PACKET,
};
int seq = time(NULL);
t->tcm_parent = TC_H_CLSACT;
t->tcm_handle = TC_H_MAKE(TC_H_CLSACT, 0);
req.nlh.nlmsg_seq = seq;
if (send(sock, &req, req.nlh.nlmsg_len, 0) < 0)
return -errno;
return bpf_netlink_recv(sock, nl_pid, seq, __dump_link_nlmsg,
dump_link_nlmsg, cookie);
return nlattr_add(nh, maxsz, TCA_KIND, "clsact", sizeof("clsact"));
}
static int attach_point_to_config(struct bpf_tc_hook *hook,
qdisc_config_t *config)
{
switch (OPTS_GET(hook, attach_point, 0)) {
case BPF_TC_INGRESS:
case BPF_TC_EGRESS:
case BPF_TC_INGRESS | BPF_TC_EGRESS:
if (OPTS_GET(hook, parent, 0))
return -EINVAL;
*config = &clsact_config;
return 0;
case BPF_TC_CUSTOM:
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
static int tc_get_tcm_parent(enum bpf_tc_attach_point attach_point,
__u32 *parent)
{
switch (attach_point) {
case BPF_TC_INGRESS:
case BPF_TC_EGRESS:
if (*parent)
return -EINVAL;
*parent = TC_H_MAKE(TC_H_CLSACT,
attach_point == BPF_TC_INGRESS ?
TC_H_MIN_INGRESS : TC_H_MIN_EGRESS);
break;
case BPF_TC_CUSTOM:
if (!*parent)
return -EINVAL;
break;
default:
return -EINVAL;
}
return 0;
}
static int tc_qdisc_modify(struct bpf_tc_hook *hook, int cmd, int flags)
{
qdisc_config_t config;
int ret;
struct {
struct nlmsghdr nh;
struct tcmsg tc;
char buf[256];
} req;
ret = attach_point_to_config(hook, &config);
if (ret < 0)
return ret;
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | flags;
req.nh.nlmsg_type = cmd;
req.tc.tcm_family = AF_UNSPEC;
req.tc.tcm_ifindex = OPTS_GET(hook, ifindex, 0);
ret = config(&req.nh, &req.tc, sizeof(req));
if (ret < 0)
return ret;
return libbpf_netlink_send_recv(&req.nh, NULL, NULL, NULL);
}
static int tc_qdisc_create_excl(struct bpf_tc_hook *hook)
{
return tc_qdisc_modify(hook, RTM_NEWQDISC, NLM_F_CREATE);
}
static int tc_qdisc_delete(struct bpf_tc_hook *hook)
{
return tc_qdisc_modify(hook, RTM_DELQDISC, 0);
}
int bpf_tc_hook_create(struct bpf_tc_hook *hook)
{
if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
OPTS_GET(hook, ifindex, 0) <= 0)
return -EINVAL;
return tc_qdisc_create_excl(hook);
}
static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
const struct bpf_tc_opts *opts,
const bool flush);
int bpf_tc_hook_destroy(struct bpf_tc_hook *hook)
{
if (!hook || !OPTS_VALID(hook, bpf_tc_hook) ||
OPTS_GET(hook, ifindex, 0) <= 0)
return -EINVAL;
switch (OPTS_GET(hook, attach_point, 0)) {
case BPF_TC_INGRESS:
case BPF_TC_EGRESS:
return __bpf_tc_detach(hook, NULL, true);
case BPF_TC_INGRESS | BPF_TC_EGRESS:
return tc_qdisc_delete(hook);
case BPF_TC_CUSTOM:
return -EOPNOTSUPP;
default:
return -EINVAL;
}
}
struct bpf_cb_ctx {
struct bpf_tc_opts *opts;
bool processed;
};
static int __get_tc_info(void *cookie, struct tcmsg *tc, struct nlattr **tb,
bool unicast)
{
struct nlattr *tbb[TCA_BPF_MAX + 1];
struct bpf_cb_ctx *info = cookie;
if (!info || !info->opts)
return -EINVAL;
if (unicast && info->processed)
return -EINVAL;
if (!tb[TCA_OPTIONS])
return NL_CONT;
libbpf_nla_parse_nested(tbb, TCA_BPF_MAX, tb[TCA_OPTIONS], NULL);
if (!tbb[TCA_BPF_ID])
return -EINVAL;
OPTS_SET(info->opts, prog_id, libbpf_nla_getattr_u32(tbb[TCA_BPF_ID]));
OPTS_SET(info->opts, handle, tc->tcm_handle);
OPTS_SET(info->opts, priority, TC_H_MAJ(tc->tcm_info) >> 16);
info->processed = true;
return unicast ? NL_NEXT : NL_DONE;
}
static int get_tc_info(struct nlmsghdr *nh, libbpf_dump_nlmsg_t fn,
void *cookie)
{
struct tcmsg *tc = NLMSG_DATA(nh);
struct nlattr *tb[TCA_MAX + 1];
libbpf_nla_parse(tb, TCA_MAX,
(struct nlattr *)((char *)tc + NLMSG_ALIGN(sizeof(*tc))),
NLMSG_PAYLOAD(nh, sizeof(*tc)), NULL);
if (!tb[TCA_KIND])
return NL_CONT;
return __get_tc_info(cookie, tc, tb, nh->nlmsg_flags & NLM_F_ECHO);
}
static int tc_add_fd_and_name(struct nlmsghdr *nh, size_t maxsz, int fd)
{
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
char name[256];
int len, ret;
ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (ret < 0)
return ret;
ret = nlattr_add(nh, maxsz, TCA_BPF_FD, &fd, sizeof(fd));
if (ret < 0)
return ret;
len = snprintf(name, sizeof(name), "%s:[%u]", info.name, info.id);
if (len < 0)
return -errno;
if (len >= sizeof(name))
return -ENAMETOOLONG;
return nlattr_add(nh, maxsz, TCA_BPF_NAME, name, len + 1);
}
int bpf_tc_attach(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
{
__u32 protocol, bpf_flags, handle, priority, parent, prog_id, flags;
int ret, ifindex, attach_point, prog_fd;
struct bpf_cb_ctx info = {};
struct nlattr *nla;
struct {
struct nlmsghdr nh;
struct tcmsg tc;
char buf[256];
} req;
if (!hook || !opts ||
!OPTS_VALID(hook, bpf_tc_hook) ||
!OPTS_VALID(opts, bpf_tc_opts))
return -EINVAL;
ifindex = OPTS_GET(hook, ifindex, 0);
parent = OPTS_GET(hook, parent, 0);
attach_point = OPTS_GET(hook, attach_point, 0);
handle = OPTS_GET(opts, handle, 0);
priority = OPTS_GET(opts, priority, 0);
prog_fd = OPTS_GET(opts, prog_fd, 0);
prog_id = OPTS_GET(opts, prog_id, 0);
flags = OPTS_GET(opts, flags, 0);
if (ifindex <= 0 || !prog_fd || prog_id)
return -EINVAL;
if (priority > UINT16_MAX)
return -EINVAL;
if (flags & ~BPF_TC_F_REPLACE)
return -EINVAL;
flags = (flags & BPF_TC_F_REPLACE) ? NLM_F_REPLACE : NLM_F_EXCL;
protocol = ETH_P_ALL;
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE |
NLM_F_ECHO | flags;
req.nh.nlmsg_type = RTM_NEWTFILTER;
req.tc.tcm_family = AF_UNSPEC;
req.tc.tcm_ifindex = ifindex;
req.tc.tcm_handle = handle;
req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
ret = tc_get_tcm_parent(attach_point, &parent);
if (ret < 0)
return ret;
req.tc.tcm_parent = parent;
ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND, "bpf", sizeof("bpf"));
if (ret < 0)
return ret;
nla = nlattr_begin_nested(&req.nh, sizeof(req), TCA_OPTIONS);
if (!nla)
return -EMSGSIZE;
ret = tc_add_fd_and_name(&req.nh, sizeof(req), prog_fd);
if (ret < 0)
return ret;
bpf_flags = TCA_BPF_FLAG_ACT_DIRECT;
ret = nlattr_add(&req.nh, sizeof(req), TCA_BPF_FLAGS, &bpf_flags,
sizeof(bpf_flags));
if (ret < 0)
return ret;
nlattr_end_nested(&req.nh, nla);
info.opts = opts;
ret = libbpf_netlink_send_recv(&req.nh, get_tc_info, NULL, &info);
if (ret < 0)
return ret;
if (!info.processed)
return -ENOENT;
return ret;
}
static int __bpf_tc_detach(const struct bpf_tc_hook *hook,
const struct bpf_tc_opts *opts,
const bool flush)
{
__u32 protocol = 0, handle, priority, parent, prog_id, flags;
int ret, ifindex, attach_point, prog_fd;
struct {
struct nlmsghdr nh;
struct tcmsg tc;
char buf[256];
} req;
if (!hook ||
!OPTS_VALID(hook, bpf_tc_hook) ||
!OPTS_VALID(opts, bpf_tc_opts))
return -EINVAL;
ifindex = OPTS_GET(hook, ifindex, 0);
parent = OPTS_GET(hook, parent, 0);
attach_point = OPTS_GET(hook, attach_point, 0);
handle = OPTS_GET(opts, handle, 0);
priority = OPTS_GET(opts, priority, 0);
prog_fd = OPTS_GET(opts, prog_fd, 0);
prog_id = OPTS_GET(opts, prog_id, 0);
flags = OPTS_GET(opts, flags, 0);
if (ifindex <= 0 || flags || prog_fd || prog_id)
return -EINVAL;
if (priority > UINT16_MAX)
return -EINVAL;
if (flags & ~BPF_TC_F_REPLACE)
return -EINVAL;
if (!flush) {
if (!handle || !priority)
return -EINVAL;
protocol = ETH_P_ALL;
} else {
if (handle || priority)
return -EINVAL;
}
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
req.nh.nlmsg_type = RTM_DELTFILTER;
req.tc.tcm_family = AF_UNSPEC;
req.tc.tcm_ifindex = ifindex;
if (!flush) {
req.tc.tcm_handle = handle;
req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
}
ret = tc_get_tcm_parent(attach_point, &parent);
if (ret < 0)
return ret;
req.tc.tcm_parent = parent;
if (!flush) {
ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND,
"bpf", sizeof("bpf"));
if (ret < 0)
return ret;
}
return libbpf_netlink_send_recv(&req.nh, NULL, NULL, NULL);
}
int bpf_tc_detach(const struct bpf_tc_hook *hook,
const struct bpf_tc_opts *opts)
{
return !opts ? -EINVAL : __bpf_tc_detach(hook, opts, false);
}
int bpf_tc_query(const struct bpf_tc_hook *hook, struct bpf_tc_opts *opts)
{
__u32 protocol, handle, priority, parent, prog_id, flags;
int ret, ifindex, attach_point, prog_fd;
struct bpf_cb_ctx info = {};
struct {
struct nlmsghdr nh;
struct tcmsg tc;
char buf[256];
} req;
if (!hook || !opts ||
!OPTS_VALID(hook, bpf_tc_hook) ||
!OPTS_VALID(opts, bpf_tc_opts))
return -EINVAL;
ifindex = OPTS_GET(hook, ifindex, 0);
parent = OPTS_GET(hook, parent, 0);
attach_point = OPTS_GET(hook, attach_point, 0);
handle = OPTS_GET(opts, handle, 0);
priority = OPTS_GET(opts, priority, 0);
prog_fd = OPTS_GET(opts, prog_fd, 0);
prog_id = OPTS_GET(opts, prog_id, 0);
flags = OPTS_GET(opts, flags, 0);
if (ifindex <= 0 || flags || prog_fd || prog_id ||
!handle || !priority)
return -EINVAL;
if (priority > UINT16_MAX)
return -EINVAL;
protocol = ETH_P_ALL;
memset(&req, 0, sizeof(req));
req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg));
req.nh.nlmsg_flags = NLM_F_REQUEST;
req.nh.nlmsg_type = RTM_GETTFILTER;
req.tc.tcm_family = AF_UNSPEC;
req.tc.tcm_ifindex = ifindex;
req.tc.tcm_handle = handle;
req.tc.tcm_info = TC_H_MAKE(priority << 16, htons(protocol));
ret = tc_get_tcm_parent(attach_point, &parent);
if (ret < 0)
return ret;
req.tc.tcm_parent = parent;
ret = nlattr_add(&req.nh, sizeof(req), TCA_KIND, "bpf", sizeof("bpf"));
if (ret < 0)
return ret;
info.opts = opts;
ret = libbpf_netlink_send_recv(&req.nh, get_tc_info, NULL, &info);
if (ret < 0)
return ret;
if (!info.processed)
return -ENOENT;
return ret;
}

View file

@ -10,7 +10,10 @@
#define __LIBBPF_NLATTR_H
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include <linux/netlink.h>
/* avoid multiple definition of netlink features */
#define __LINUX_NETLINK_H
@ -103,4 +106,49 @@ int libbpf_nla_parse_nested(struct nlattr *tb[], int maxtype,
int libbpf_nla_dump_errormsg(struct nlmsghdr *nlh);
static inline struct nlattr *nla_data(struct nlattr *nla)
{
return (struct nlattr *)((char *)nla + NLA_HDRLEN);
}
static inline struct nlattr *nh_tail(struct nlmsghdr *nh)
{
return (struct nlattr *)((char *)nh + NLMSG_ALIGN(nh->nlmsg_len));
}
static inline int nlattr_add(struct nlmsghdr *nh, size_t maxsz, int type,
const void *data, int len)
{
struct nlattr *nla;
if (NLMSG_ALIGN(nh->nlmsg_len) + NLA_ALIGN(NLA_HDRLEN + len) > maxsz)
return -EMSGSIZE;
if (!!data != !!len)
return -EINVAL;
nla = nh_tail(nh);
nla->nla_type = type;
nla->nla_len = NLA_HDRLEN + len;
if (data)
memcpy(nla_data(nla), data, len);
nh->nlmsg_len = NLMSG_ALIGN(nh->nlmsg_len) + NLA_ALIGN(nla->nla_len);
return 0;
}
static inline struct nlattr *nlattr_begin_nested(struct nlmsghdr *nh,
size_t maxsz, int type)
{
struct nlattr *tail;
tail = nh_tail(nh);
if (nlattr_add(nh, maxsz, type | NLA_F_NESTED, NULL, 0))
return NULL;
return tail;
}
static inline void nlattr_end_nested(struct nlmsghdr *nh, struct nlattr *tail)
{
tail->nla_len = (char *)nh_tail(nh) - (char *)tail;
}
#endif /* __LIBBPF_NLATTR_H */

View file

@ -0,0 +1,123 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
/* Copyright (c) 2021 Facebook */
#ifndef __SKEL_INTERNAL_H
#define __SKEL_INTERNAL_H
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/mman.h>
/* This file is a base header for auto-generated *.lskel.h files.
* Its contents will change and may become part of auto-generation in the future.
*
* The layout of bpf_[map|prog]_desc and bpf_loader_ctx is feature dependent
* and will change from one version of libbpf to another and features
* requested during loader program generation.
*/
struct bpf_map_desc {
union {
/* input for the loader prog */
struct {
__aligned_u64 initial_value;
__u32 max_entries;
};
/* output of the loader prog */
struct {
int map_fd;
};
};
};
struct bpf_prog_desc {
int prog_fd;
};
struct bpf_loader_ctx {
size_t sz;
__u32 log_level;
__u32 log_size;
__u64 log_buf;
};
struct bpf_load_and_run_opts {
struct bpf_loader_ctx *ctx;
const void *data;
const void *insns;
__u32 data_sz;
__u32 insns_sz;
const char *errstr;
};
static inline int skel_sys_bpf(enum bpf_cmd cmd, union bpf_attr *attr,
unsigned int size)
{
return syscall(__NR_bpf, cmd, attr, size);
}
static inline int skel_closenz(int fd)
{
if (fd > 0)
return close(fd);
return -EINVAL;
}
static inline int bpf_load_and_run(struct bpf_load_and_run_opts *opts)
{
int map_fd = -1, prog_fd = -1, key = 0, err;
union bpf_attr attr;
map_fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "__loader.map", 4,
opts->data_sz, 1, 0);
if (map_fd < 0) {
opts->errstr = "failed to create loader map";
err = -errno;
goto out;
}
err = bpf_map_update_elem(map_fd, &key, opts->data, 0);
if (err < 0) {
opts->errstr = "failed to update loader map";
err = -errno;
goto out;
}
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SYSCALL;
attr.insns = (long) opts->insns;
attr.insn_cnt = opts->insns_sz / sizeof(struct bpf_insn);
attr.license = (long) "Dual BSD/GPL";
memcpy(attr.prog_name, "__loader.prog", sizeof("__loader.prog"));
attr.fd_array = (long) &map_fd;
attr.log_level = opts->ctx->log_level;
attr.log_size = opts->ctx->log_size;
attr.log_buf = opts->ctx->log_buf;
attr.prog_flags = BPF_F_SLEEPABLE;
prog_fd = skel_sys_bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
if (prog_fd < 0) {
opts->errstr = "failed to load loader prog";
err = -errno;
goto out;
}
memset(&attr, 0, sizeof(attr));
attr.test.prog_fd = prog_fd;
attr.test.ctx_in = (long) opts->ctx;
attr.test.ctx_size_in = opts->ctx->sz;
err = skel_sys_bpf(BPF_PROG_RUN, &attr, sizeof(attr));
if (err < 0 || (int)attr.test.retval < 0) {
opts->errstr = "failed to execute loader prog";
if (err < 0)
err = -errno;
else
err = (int)attr.test.retval;
goto out;
}
err = 0;
out:
if (map_fd >= 0)
close(map_fd);
if (prog_fd >= 0)
close(prog_fd);
return err;
}
#endif

View file

@ -30,6 +30,7 @@ test_sysctl
xdping
test_cpp
*.skel.h
*.lskel.h
/no_alu32
/bpf_gcc
/tools

View file

@ -312,6 +312,10 @@ SKEL_BLACKLIST := btf__% test_pinning_invalid.c test_sk_assign.c
LINKED_SKELS := test_static_linked.skel.h linked_funcs.skel.h \
linked_vars.skel.h linked_maps.skel.h
LSKELS := kfunc_call_test.c fentry_test.c fexit_test.c fexit_sleep.c \
test_ksyms_module.c test_ringbuf.c atomics.c trace_printk.c
SKEL_BLACKLIST += $$(LSKELS)
test_static_linked.skel.h-deps := test_static_linked1.o test_static_linked2.o
linked_funcs.skel.h-deps := linked_funcs1.o linked_funcs2.o
linked_vars.skel.h-deps := linked_vars1.o linked_vars2.o
@ -339,6 +343,7 @@ TRUNNER_BPF_OBJS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.o, $$(TRUNNER_BPF_SRCS)
TRUNNER_BPF_SKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.skel.h, \
$$(filter-out $(SKEL_BLACKLIST) $(LINKED_BPF_SRCS),\
$$(TRUNNER_BPF_SRCS)))
TRUNNER_BPF_LSKELS := $$(patsubst %.c,$$(TRUNNER_OUTPUT)/%.lskel.h, $$(LSKELS))
TRUNNER_BPF_SKELS_LINKED := $$(addprefix $$(TRUNNER_OUTPUT)/,$(LINKED_SKELS))
TEST_GEN_FILES += $$(TRUNNER_BPF_OBJS)
@ -380,6 +385,14 @@ $(TRUNNER_BPF_SKELS): %.skel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
$(TRUNNER_BPF_LSKELS): %.lskel.h: %.o $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,GEN-SKEL,$(TRUNNER_BINARY),$$@)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked1.o) $$<
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked2.o) $$(<:.o=.linked1.o)
$(Q)$$(BPFTOOL) gen object $$(<:.o=.linked3.o) $$(<:.o=.linked2.o)
$(Q)diff $$(<:.o=.linked2.o) $$(<:.o=.linked3.o)
$(Q)$$(BPFTOOL) gen skeleton -L $$(<:.o=.linked3.o) name $$(notdir $$(<:.o=)) > $$@
$(TRUNNER_BPF_SKELS_LINKED): $(TRUNNER_BPF_OBJS) $(BPFTOOL) | $(TRUNNER_OUTPUT)
$$(call msg,LINK-BPF,$(TRUNNER_BINARY),$$(@:.skel.h=.o))
$(Q)$$(BPFTOOL) gen object $$(@:.skel.h=.linked1.o) $$(addprefix $(TRUNNER_OUTPUT)/,$$($$(@F)-deps))
@ -409,6 +422,7 @@ $(TRUNNER_TEST_OBJS): $(TRUNNER_OUTPUT)/%.test.o: \
$(TRUNNER_EXTRA_HDRS) \
$(TRUNNER_BPF_OBJS) \
$(TRUNNER_BPF_SKELS) \
$(TRUNNER_BPF_LSKELS) \
$(TRUNNER_BPF_SKELS_LINKED) \
$$(BPFOBJ) | $(TRUNNER_OUTPUT)
$$(call msg,TEST-OBJ,$(TRUNNER_BINARY),$$@)
@ -516,6 +530,6 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature \
$(addprefix $(OUTPUT)/,*.o *.skel.h no_alu32 bpf_gcc bpf_testmod.ko)
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean

View file

@ -2,19 +2,19 @@
#include <test_progs.h>
#include "atomics.skel.h"
#include "atomics.lskel.h"
static void test_add(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.add);
if (CHECK(IS_ERR(link), "attach(add)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__add__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(add)"))
return;
prog_fd = bpf_program__fd(skel->progs.add);
prog_fd = skel->progs.add.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@ -33,20 +33,20 @@ static void test_add(struct atomics *skel)
ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
static void test_sub(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.sub);
if (CHECK(IS_ERR(link), "attach(sub)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__sub__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
return;
prog_fd = bpf_program__fd(skel->progs.sub);
prog_fd = skel->progs.sub.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run sub",
@ -66,20 +66,20 @@ static void test_sub(struct atomics *skel)
ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
static void test_and(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.and);
if (CHECK(IS_ERR(link), "attach(and)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__and__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(and)"))
return;
prog_fd = bpf_program__fd(skel->progs.and);
prog_fd = skel->progs.and.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run and",
@ -94,20 +94,20 @@ static void test_and(struct atomics *skel)
ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
static void test_or(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.or);
if (CHECK(IS_ERR(link), "attach(or)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__or__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(or)"))
return;
prog_fd = bpf_program__fd(skel->progs.or);
prog_fd = skel->progs.or.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run or",
@ -123,20 +123,20 @@ static void test_or(struct atomics *skel)
ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
static void test_xor(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.xor);
if (CHECK(IS_ERR(link), "attach(xor)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__xor__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
return;
prog_fd = bpf_program__fd(skel->progs.xor);
prog_fd = skel->progs.xor.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run xor",
@ -151,20 +151,20 @@ static void test_xor(struct atomics *skel)
ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
static void test_cmpxchg(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.cmpxchg);
if (CHECK(IS_ERR(link), "attach(cmpxchg)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__cmpxchg__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
return;
prog_fd = bpf_program__fd(skel->progs.cmpxchg);
prog_fd = skel->progs.cmpxchg.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@ -180,20 +180,20 @@ static void test_cmpxchg(struct atomics *skel)
ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
static void test_xchg(struct atomics *skel)
{
int err, prog_fd;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
link = bpf_program__attach(skel->progs.xchg);
if (CHECK(IS_ERR(link), "attach(xchg)", "err: %ld\n", PTR_ERR(link)))
link_fd = atomics__xchg__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
return;
prog_fd = bpf_program__fd(skel->progs.xchg);
prog_fd = skel->progs.xchg.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
if (CHECK(err || retval, "test_run add",
@ -207,7 +207,7 @@ static void test_xchg(struct atomics *skel)
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
cleanup:
bpf_link__destroy(link);
close(link_fd);
}
void test_atomics(void)

View file

@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "fentry_test.skel.h"
#include "fexit_test.skel.h"
#include "fentry_test.lskel.h"
#include "fexit_test.lskel.h"
void test_fentry_fexit(void)
{
@ -26,7 +26,7 @@ void test_fentry_fexit(void)
if (CHECK(err, "fexit_attach", "fexit attach failed: %d\n", err))
goto close_prog;
prog_fd = bpf_program__fd(fexit_skel->progs.test1);
prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
CHECK(err || retval, "ipv6",

View file

@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "fentry_test.skel.h"
#include "fentry_test.lskel.h"
static int fentry_test(struct fentry_test *fentry_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
__u64 *result;
err = fentry_test__attach(fentry_skel);
@ -15,11 +15,11 @@ static int fentry_test(struct fentry_test *fentry_skel)
return err;
/* Check that already linked program can't be attached again. */
link = bpf_program__attach(fentry_skel->progs.test1);
if (!ASSERT_ERR_PTR(link, "fentry_attach_link"))
link_fd = fentry_test__test1__attach(fentry_skel);
if (!ASSERT_LT(link_fd, 0, "fentry_attach_link"))
return -1;
prog_fd = bpf_program__fd(fentry_skel->progs.test1);
prog_fd = fentry_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");

View file

@ -6,7 +6,7 @@
#include <time.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include "fexit_sleep.skel.h"
#include "fexit_sleep.lskel.h"
static int do_sleep(void *skel)
{
@ -58,8 +58,8 @@ void test_fexit_sleep(void)
* waiting for percpu_ref_kill to confirm). The other one
* will be freed quickly.
*/
close(bpf_program__fd(fexit_skel->progs.nanosleep_fentry));
close(bpf_program__fd(fexit_skel->progs.nanosleep_fexit));
close(fexit_skel->progs.nanosleep_fentry.prog_fd);
close(fexit_skel->progs.nanosleep_fexit.prog_fd);
fexit_sleep__detach(fexit_skel);
/* kill the thread to unwind sys_nanosleep stack through the trampoline */

View file

@ -1,13 +1,13 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019 Facebook */
#include <test_progs.h>
#include "fexit_test.skel.h"
#include "fexit_test.lskel.h"
static int fexit_test(struct fexit_test *fexit_skel)
{
int err, prog_fd, i;
__u32 duration = 0, retval;
struct bpf_link *link;
int link_fd;
__u64 *result;
err = fexit_test__attach(fexit_skel);
@ -15,11 +15,11 @@ static int fexit_test(struct fexit_test *fexit_skel)
return err;
/* Check that already linked program can't be attached again. */
link = bpf_program__attach(fexit_skel->progs.test1);
if (!ASSERT_ERR_PTR(link, "fexit_attach_link"))
link_fd = fexit_test__test1__attach(fexit_skel);
if (!ASSERT_LT(link_fd, 0, "fexit_attach_link"))
return -1;
prog_fd = bpf_program__fd(fexit_skel->progs.test1);
prog_fd = fexit_skel->progs.test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");

View file

@ -2,7 +2,7 @@
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <network_helpers.h>
#include "kfunc_call_test.skel.h"
#include "kfunc_call_test.lskel.h"
#include "kfunc_call_test_subprog.skel.h"
static void test_main(void)
@ -14,13 +14,13 @@ static void test_main(void)
if (!ASSERT_OK_PTR(skel, "skel"))
return;
prog_fd = bpf_program__fd(skel->progs.kfunc_call_test1);
prog_fd = skel->progs.kfunc_call_test1.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
ASSERT_OK(err, "bpf_prog_test_run(test1)");
ASSERT_EQ(retval, 12, "test1-retval");
prog_fd = bpf_program__fd(skel->progs.kfunc_call_test2);
prog_fd = skel->progs.kfunc_call_test2.prog_fd;
err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4),
NULL, NULL, (__u32 *)&retval, NULL);
ASSERT_OK(err, "bpf_prog_test_run(test2)");

View file

@ -4,7 +4,7 @@
#include <test_progs.h>
#include <bpf/libbpf.h>
#include <bpf/btf.h>
#include "test_ksyms_module.skel.h"
#include "test_ksyms_module.lskel.h"
static int duration;

View file

@ -12,7 +12,7 @@
#include <sys/sysinfo.h>
#include <linux/perf_event.h>
#include <linux/ring_buffer.h>
#include "test_ringbuf.skel.h"
#include "test_ringbuf.lskel.h"
#define EDONE 7777
@ -93,9 +93,7 @@ void test_ringbuf(void)
if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
return;
err = bpf_map__set_max_entries(skel->maps.ringbuf, page_size);
if (CHECK(err != 0, "bpf_map__set_max_entries", "bpf_map__set_max_entries failed\n"))
goto cleanup;
skel->maps.ringbuf.max_entries = page_size;
err = test_ringbuf__load(skel);
if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
@ -104,7 +102,7 @@ void test_ringbuf(void)
/* only trigger BPF program for current process */
skel->bss->pid = getpid();
ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf),
ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
process_sample, NULL, NULL);
if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
goto cleanup;

View file

@ -2,7 +2,7 @@
#include <test_progs.h>
#include "test_send_signal_kern.skel.h"
static volatile int sigusr1_received = 0;
int sigusr1_received = 0;
static void sigusr1_handler(int signum)
{

View file

@ -82,10 +82,8 @@ void test_skeleton(void)
CHECK(data->out2 != 2, "res2", "got %lld != exp %d\n", data->out2, 2);
CHECK(bss->out3 != 3, "res3", "got %d != exp %d\n", (int)bss->out3, 3);
CHECK(bss->out4 != 4, "res4", "got %lld != exp %d\n", bss->out4, 4);
CHECK(bss->handler_out5.a != 5, "res5", "got %d != exp %d\n",
bss->handler_out5.a, 5);
CHECK(bss->handler_out5.b != 6, "res6", "got %lld != exp %d\n",
bss->handler_out5.b, 6);
CHECK(bss->out5.a != 5, "res5", "got %d != exp %d\n", bss->out5.a, 5);
CHECK(bss->out5.b != 6, "res6", "got %lld != exp %d\n", bss->out5.b, 6);
CHECK(bss->out6 != 14, "res7", "got %d != exp %d\n", bss->out6, 14);
CHECK(bss->bpf_syscall != kcfg->CONFIG_BPF_SYSCALL, "ext1",

View file

@ -14,12 +14,7 @@ void test_static_linked(void)
return;
skel->rodata->rovar1 = 1;
skel->bss->static_var1 = 2;
skel->bss->static_var11 = 3;
skel->rodata->rovar2 = 4;
skel->bss->static_var2 = 5;
skel->bss->static_var22 = 6;
err = test_static_linked__load(skel);
if (!ASSERT_OK(err, "skel_load"))
@ -32,8 +27,8 @@ void test_static_linked(void)
/* trigger */
usleep(1);
ASSERT_EQ(skel->bss->var1, 1 * 2 + 2 + 3, "var1");
ASSERT_EQ(skel->bss->var2, 4 * 3 + 5 + 6, "var2");
ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1");
ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2");
cleanup:
test_static_linked__destroy(skel);

View file

@ -0,0 +1,55 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "syscall.skel.h"
struct args {
__u64 log_buf;
__u32 log_size;
int max_entries;
int map_fd;
int prog_fd;
int btf_fd;
};
void test_syscall(void)
{
static char verifier_log[8192];
struct args ctx = {
.max_entries = 1024,
.log_buf = (uintptr_t) verifier_log,
.log_size = sizeof(verifier_log),
};
struct bpf_prog_test_run_attr tattr = {
.ctx_in = &ctx,
.ctx_size_in = sizeof(ctx),
};
struct syscall *skel = NULL;
__u64 key = 12, value = 0;
int err;
skel = syscall__open_and_load();
if (!ASSERT_OK_PTR(skel, "skel_load"))
goto cleanup;
tattr.prog_fd = bpf_program__fd(skel->progs.bpf_prog);
err = bpf_prog_test_run_xattr(&tattr);
ASSERT_EQ(err, 0, "err");
ASSERT_EQ(tattr.retval, 1, "retval");
ASSERT_GT(ctx.map_fd, 0, "ctx.map_fd");
ASSERT_GT(ctx.prog_fd, 0, "ctx.prog_fd");
ASSERT_OK(memcmp(verifier_log, "processed", sizeof("processed") - 1),
"verifier_log");
err = bpf_map_lookup_elem(ctx.map_fd, &key, &value);
ASSERT_EQ(err, 0, "map_lookup");
ASSERT_EQ(value, 34, "map lookup value");
cleanup:
syscall__destroy(skel);
if (ctx.prog_fd > 0)
close(ctx.prog_fd);
if (ctx.map_fd > 0)
close(ctx.map_fd);
if (ctx.btf_fd > 0)
close(ctx.btf_fd);
}

View file

@ -0,0 +1,395 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include <linux/pkt_cls.h>
#include "test_tc_bpf.skel.h"
#define LO_IFINDEX 1
#define TEST_DECLARE_OPTS(__fd) \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_h, .handle = 1); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_p, .priority = 1); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_f, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hp, .handle = 1, .priority = 1); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hf, .handle = 1, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_pf, .priority = 1, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpf, .handle = 1, .priority = 1, .prog_fd = __fd); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpi, .handle = 1, .priority = 1, .prog_id = 42); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpr, .handle = 1, .priority = 1, \
.flags = BPF_TC_F_REPLACE); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_hpfi, .handle = 1, .priority = 1, .prog_fd = __fd, \
.prog_id = 42); \
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts_prio_max, .handle = 1, .priority = UINT16_MAX + 1);
static int test_tc_bpf_basic(const struct bpf_tc_hook *hook, int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, .prog_fd = fd);
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
int ret;
ret = bpf_obj_get_info_by_fd(fd, &info, &info_len);
if (!ASSERT_OK(ret, "bpf_obj_get_info_by_fd"))
return ret;
ret = bpf_tc_attach(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach"))
return ret;
if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
!ASSERT_EQ(opts.priority, 1, "priority set") ||
!ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
goto end;
opts.prog_id = 0;
opts.flags = BPF_TC_F_REPLACE;
ret = bpf_tc_attach(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_attach replace mode"))
goto end;
opts.flags = opts.prog_fd = opts.prog_id = 0;
ret = bpf_tc_query(hook, &opts);
if (!ASSERT_OK(ret, "bpf_tc_query"))
goto end;
if (!ASSERT_EQ(opts.handle, 1, "handle set") ||
!ASSERT_EQ(opts.priority, 1, "priority set") ||
!ASSERT_EQ(opts.prog_id, info.id, "prog_id set"))
goto end;
end:
opts.flags = opts.prog_fd = opts.prog_id = 0;
ret = bpf_tc_detach(hook, &opts);
ASSERT_OK(ret, "bpf_tc_detach");
return ret;
}
static int test_tc_bpf_api(struct bpf_tc_hook *hook, int fd)
{
DECLARE_LIBBPF_OPTS(bpf_tc_opts, attach_opts, .handle = 1, .priority = 1, .prog_fd = fd);
DECLARE_LIBBPF_OPTS(bpf_tc_hook, inv_hook, .attach_point = BPF_TC_INGRESS);
DECLARE_LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1);
int ret;
ret = bpf_tc_hook_create(NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook = NULL"))
return -EINVAL;
/* hook ifindex = 0 */
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex == 0"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex == 0"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex == 0"))
return -EINVAL;
attach_opts.prog_id = 0;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex == 0"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex == 0"))
return -EINVAL;
/* hook ifindex < 0 */
inv_hook.ifindex = -1;
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook ifindex < 0"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook ifindex < 0"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook ifindex < 0"))
return -EINVAL;
attach_opts.prog_id = 0;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook ifindex < 0"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook ifindex < 0"))
return -EINVAL;
inv_hook.ifindex = LO_IFINDEX;
/* hook.attach_point invalid */
inv_hook.attach_point = 0xabcd;
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook.attach_point"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook.attach_point"))
return -EINVAL;
inv_hook.attach_point = BPF_TC_INGRESS;
/* hook.attach_point valid, but parent invalid */
inv_hook.parent = TC_H_MAKE(1UL << 16, 10);
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_create invalid hook parent"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_hook_destroy invalid hook parent"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
return -EINVAL;
inv_hook.attach_point = BPF_TC_CUSTOM;
inv_hook.parent = 0;
/* These return EOPNOTSUPP instead of EINVAL as parent is checked after
* attach_point of the hook.
*/
ret = bpf_tc_hook_create(&inv_hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook parent"))
return -EINVAL;
ret = bpf_tc_hook_destroy(&inv_hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook parent"))
return -EINVAL;
ret = bpf_tc_attach(&inv_hook, &attach_opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_detach(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook parent"))
return -EINVAL;
ret = bpf_tc_query(&inv_hook, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook parent"))
return -EINVAL;
inv_hook.attach_point = BPF_TC_INGRESS;
/* detach */
{
TEST_DECLARE_OPTS(fd);
ret = bpf_tc_detach(NULL, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid hook = NULL"))
return -EINVAL;
ret = bpf_tc_detach(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid opts = NULL"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_hpr);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid flags set"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_hpf);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_fd set"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_hpi);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid prog_id set"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_p);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid handle unset"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_h);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority unset"))
return -EINVAL;
ret = bpf_tc_detach(hook, &opts_prio_max);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_detach invalid priority > UINT16_MAX"))
return -EINVAL;
}
/* query */
{
TEST_DECLARE_OPTS(fd);
ret = bpf_tc_query(NULL, &opts);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid hook = NULL"))
return -EINVAL;
ret = bpf_tc_query(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid opts = NULL"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_hpr);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid flags set"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_hpf);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_fd set"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_hpi);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid prog_id set"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_p);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid handle unset"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_h);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority unset"))
return -EINVAL;
ret = bpf_tc_query(hook, &opts_prio_max);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query invalid priority > UINT16_MAX"))
return -EINVAL;
/* when chain is not present, kernel returns -EINVAL */
ret = bpf_tc_query(hook, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_query valid handle, priority set"))
return -EINVAL;
}
/* attach */
{
TEST_DECLARE_OPTS(fd);
ret = bpf_tc_attach(NULL, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid hook = NULL"))
return -EINVAL;
ret = bpf_tc_attach(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid opts = NULL"))
return -EINVAL;
opts_hp.flags = 42;
ret = bpf_tc_attach(hook, &opts_hp);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid flags"))
return -EINVAL;
ret = bpf_tc_attach(hook, NULL);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_fd unset"))
return -EINVAL;
ret = bpf_tc_attach(hook, &opts_hpi);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid prog_id set"))
return -EINVAL;
ret = bpf_tc_attach(hook, &opts_pf);
if (!ASSERT_OK(ret, "bpf_tc_attach valid handle unset"))
return -EINVAL;
opts_pf.prog_fd = opts_pf.prog_id = 0;
ASSERT_OK(bpf_tc_detach(hook, &opts_pf), "bpf_tc_detach");
ret = bpf_tc_attach(hook, &opts_hf);
if (!ASSERT_OK(ret, "bpf_tc_attach valid priority unset"))
return -EINVAL;
opts_hf.prog_fd = opts_hf.prog_id = 0;
ASSERT_OK(bpf_tc_detach(hook, &opts_hf), "bpf_tc_detach");
ret = bpf_tc_attach(hook, &opts_prio_max);
if (!ASSERT_EQ(ret, -EINVAL, "bpf_tc_attach invalid priority > UINT16_MAX"))
return -EINVAL;
ret = bpf_tc_attach(hook, &opts_f);
if (!ASSERT_OK(ret, "bpf_tc_attach valid both handle and priority unset"))
return -EINVAL;
opts_f.prog_fd = opts_f.prog_id = 0;
ASSERT_OK(bpf_tc_detach(hook, &opts_f), "bpf_tc_detach");
}
return 0;
}
void test_tc_bpf(void)
{
DECLARE_LIBBPF_OPTS(bpf_tc_hook, hook, .ifindex = LO_IFINDEX,
.attach_point = BPF_TC_INGRESS);
struct test_tc_bpf *skel = NULL;
bool hook_created = false;
int cls_fd, ret;
skel = test_tc_bpf__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_tc_bpf__open_and_load"))
return;
cls_fd = bpf_program__fd(skel->progs.cls);
ret = bpf_tc_hook_create(&hook);
if (ret == 0)
hook_created = true;
ret = ret == -EEXIST ? 0 : ret;
if (!ASSERT_OK(ret, "bpf_tc_hook_create(BPF_TC_INGRESS)"))
goto end;
hook.attach_point = BPF_TC_CUSTOM;
hook.parent = TC_H_MAKE(TC_H_CLSACT, TC_H_MIN_INGRESS);
ret = bpf_tc_hook_create(&hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_create invalid hook.attach_point"))
goto end;
ret = test_tc_bpf_basic(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_internal ingress"))
goto end;
ret = bpf_tc_hook_destroy(&hook);
if (!ASSERT_EQ(ret, -EOPNOTSUPP, "bpf_tc_hook_destroy invalid hook.attach_point"))
goto end;
hook.attach_point = BPF_TC_INGRESS;
hook.parent = 0;
bpf_tc_hook_destroy(&hook);
ret = test_tc_bpf_basic(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_internal ingress"))
goto end;
bpf_tc_hook_destroy(&hook);
hook.attach_point = BPF_TC_EGRESS;
ret = test_tc_bpf_basic(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_internal egress"))
goto end;
bpf_tc_hook_destroy(&hook);
ret = test_tc_bpf_api(&hook, cls_fd);
if (!ASSERT_OK(ret, "test_tc_bpf_api"))
goto end;
bpf_tc_hook_destroy(&hook);
end:
if (hook_created) {
hook.attach_point = BPF_TC_INGRESS | BPF_TC_EGRESS;
bpf_tc_hook_destroy(&hook);
}
test_tc_bpf__destroy(skel);
}

View file

@ -3,7 +3,7 @@
#include <test_progs.h>
#include "trace_printk.skel.h"
#include "trace_printk.lskel.h"
#define TRACEBUF "/sys/kernel/debug/tracing/trace_pipe"
#define SEARCHMSG "testing,testing"
@ -21,6 +21,9 @@ void test_trace_printk(void)
if (CHECK(!skel, "skel_open", "failed to open skeleton\n"))
return;
ASSERT_EQ(skel->rodata->fmt[0], 'T', "invalid printk fmt string");
skel->rodata->fmt[0] = 't';
err = trace_printk__load(skel);
if (CHECK(err, "skel_load", "failed to load skeleton: %d\n", err))
goto cleanup;

View file

@ -9,8 +9,8 @@ __u32 map1_id = 0, map2_id = 0;
__u32 map1_accessed = 0, map2_accessed = 0;
__u64 map1_seqnum = 0, map2_seqnum1 = 0, map2_seqnum2 = 0;
static volatile const __u32 print_len;
static volatile const __u32 ret1;
volatile const __u32 print_len;
volatile const __u32 ret1;
SEC("iter/bpf_map")
int dump_bpf_map(struct bpf_iter__bpf_map *ctx)

View file

@ -109,10 +109,10 @@ int BPF_PROG(trace_kfree_skb, struct sk_buff *skb, void *location)
return 0;
}
static volatile struct {
struct {
bool fentry_test_ok;
bool fexit_test_ok;
} result;
} result = {};
SEC("fentry/eth_type_trans")
int BPF_PROG(fentry_eth_type_trans, struct sk_buff *skb, struct net_device *dev,

View file

@ -75,7 +75,7 @@ int BPF_PROG(handler_exit1)
val = bpf_map_lookup_elem(&map_weak, &key);
if (val)
output_weak1 = *val;
return 0;
}

View file

@ -0,0 +1,121 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <linux/stddef.h>
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <../../../tools/include/linux/filter.h>
#include <linux/btf.h>
char _license[] SEC("license") = "GPL";
struct args {
__u64 log_buf;
__u32 log_size;
int max_entries;
int map_fd;
int prog_fd;
int btf_fd;
};
#define BTF_INFO_ENC(kind, kind_flag, vlen) \
((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)
#define BTF_INT_ENC(encoding, bits_offset, nr_bits) \
((encoding) << 24 | (bits_offset) << 16 | (nr_bits))
#define BTF_TYPE_INT_ENC(name, encoding, bits_offset, bits, sz) \
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
BTF_INT_ENC(encoding, bits_offset, bits)
static int btf_load(void)
{
struct btf_blob {
struct btf_header btf_hdr;
__u32 types[8];
__u32 str;
} raw_btf = {
.btf_hdr = {
.magic = BTF_MAGIC,
.version = BTF_VERSION,
.hdr_len = sizeof(struct btf_header),
.type_len = sizeof(__u32) * 8,
.str_off = sizeof(__u32) * 8,
.str_len = sizeof(__u32),
},
.types = {
/* long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [1] */
/* unsigned long */
BTF_TYPE_INT_ENC(0, 0, 0, 64, 8), /* [2] */
},
};
static union bpf_attr btf_load_attr = {
.btf_size = sizeof(raw_btf),
};
btf_load_attr.btf = (long)&raw_btf;
return bpf_sys_bpf(BPF_BTF_LOAD, &btf_load_attr, sizeof(btf_load_attr));
}
SEC("syscall")
int bpf_prog(struct args *ctx)
{
static char license[] = "GPL";
static struct bpf_insn insns[] = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
static union bpf_attr map_create_attr = {
.map_type = BPF_MAP_TYPE_HASH,
.key_size = 8,
.value_size = 8,
.btf_key_type_id = 1,
.btf_value_type_id = 2,
};
static union bpf_attr map_update_attr = { .map_fd = 1, };
static __u64 key = 12;
static __u64 value = 34;
static union bpf_attr prog_load_attr = {
.prog_type = BPF_PROG_TYPE_XDP,
.insn_cnt = sizeof(insns) / sizeof(insns[0]),
};
int ret;
ret = btf_load();
if (ret <= 0)
return ret;
ctx->btf_fd = ret;
map_create_attr.max_entries = ctx->max_entries;
map_create_attr.btf_fd = ret;
prog_load_attr.license = (long) license;
prog_load_attr.insns = (long) insns;
prog_load_attr.log_buf = ctx->log_buf;
prog_load_attr.log_size = ctx->log_size;
prog_load_attr.log_level = 1;
ret = bpf_sys_bpf(BPF_MAP_CREATE, &map_create_attr, sizeof(map_create_attr));
if (ret <= 0)
return ret;
ctx->map_fd = ret;
insns[3].imm = ret;
map_update_attr.map_fd = ret;
map_update_attr.key = (long) &key;
map_update_attr.value = (long) &value;
ret = bpf_sys_bpf(BPF_MAP_UPDATE_ELEM, &map_update_attr, sizeof(map_update_attr));
if (ret < 0)
return ret;
ret = bpf_sys_bpf(BPF_PROG_LOAD, &prog_load_attr, sizeof(prog_load_attr));
if (ret <= 0)
return ret;
ctx->prog_fd = ret;
return 1;
}

View file

@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int count;
int count = 0;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)

View file

@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int selector;
int selector = 0;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \

View file

@ -10,7 +10,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int selector;
int selector = 0;
#define TAIL_FUNC(x) \
SEC("classifier/" #x) \

View file

@ -20,7 +20,7 @@ int subprog_tail(struct __sk_buff *skb)
return 1;
}
static volatile int count;
int count = 0;
SEC("classifier/0")
int bpf_func_0(struct __sk_buff *skb)

View file

@ -9,7 +9,7 @@ struct {
__uint(value_size, sizeof(__u32));
} jmp_table SEC(".maps");
static volatile int count;
int count = 0;
__noinline
int subprog_tail_2(struct __sk_buff *skb)

View file

@ -11,8 +11,8 @@
char _license[] SEC("license") = "GPL";
/* Userspace will update with MTU it can see on device */
static volatile const int GLOBAL_USER_MTU;
static volatile const __u32 GLOBAL_USER_IFINDEX;
volatile const int GLOBAL_USER_MTU;
volatile const __u32 GLOBAL_USER_IFINDEX;
/* BPF-prog will update these with MTU values it can see */
__u32 global_bpf_mtu_xdp = 0;

View file

@ -39,8 +39,8 @@ char _license[] SEC("license") = "Dual BSD/GPL";
/**
* Destination port and IP used for UDP encapsulation.
*/
static volatile const __be16 ENCAPSULATION_PORT;
static volatile const __be32 ENCAPSULATION_IP;
volatile const __be16 ENCAPSULATION_PORT;
volatile const __be32 ENCAPSULATION_IP;
typedef struct {
uint64_t processed_packets_total;

View file

@ -8,7 +8,7 @@ struct S {
int v;
};
static volatile struct S global_variable;
struct S global_variable = {};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);

View file

@ -5,7 +5,7 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
static volatile const struct {
const struct {
unsigned a[4];
/*
* if the struct's size is multiple of 16, compiler will put it into
@ -15,11 +15,11 @@ static volatile const struct {
char _y;
} rdonly_values = { .a = {2, 3, 4, 5} };
static volatile struct {
struct {
unsigned did_run;
unsigned iters;
unsigned sum;
} res;
} res = {};
SEC("raw_tracepoint/sys_enter:skip_loop")
int skip_loop(struct pt_regs *ctx)

View file

@ -35,7 +35,7 @@ long prod_pos = 0;
/* inner state */
long seq = 0;
SEC("tp/syscalls/sys_enter_getpgid")
SEC("fentry/__x64_sys_getpgid")
int test_ringbuf(void *ctx)
{
int cur_pid = bpf_get_current_pid_tgid() >> 32;
@ -48,7 +48,7 @@ int test_ringbuf(void *ctx)
sample = bpf_ringbuf_reserve(&ringbuf, sizeof(*sample), 0);
if (!sample) {
__sync_fetch_and_add(&dropped, 1);
return 1;
return 0;
}
sample->pid = pid;

View file

@ -38,11 +38,11 @@ extern int LINUX_KERNEL_VERSION __kconfig;
bool bpf_syscall = 0;
int kern_ver = 0;
struct s out5 = {};
SEC("raw_tp/sys_enter")
int handler(const void *ctx)
{
static volatile struct s out5;
out1 = in1;
out2 = in2;
out3 = in3;

View file

@ -5,7 +5,7 @@
#include <bpf/bpf_helpers.h>
/* The format string is filled from the userspace such that loading fails */
static const char fmt[10];
const char fmt[10];
SEC("raw_tp/sys_enter")
int handler(const void *ctx)

View file

@ -28,8 +28,8 @@ struct {
__type(value, unsigned int);
} verdict_map SEC(".maps");
static volatile bool test_sockmap; /* toggled by user-space */
static volatile bool test_ingress; /* toggled by user-space */
bool test_sockmap = false; /* toggled by user-space */
bool test_ingress = false; /* toggled by user-space */
SEC("sk_skb/stream_parser")
int prog_stream_parser(struct __sk_buff *skb)

View file

@ -4,10 +4,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* 8-byte aligned .bss */
static volatile long static_var1;
static volatile int static_var11;
int var1 = 0;
/* 8-byte aligned .data */
static volatile long static_var1 = 2;
static volatile int static_var2 = 3;
int var1 = -1;
/* 4-byte aligned .rodata */
const volatile int rovar1;
@ -21,7 +21,7 @@ static __noinline int subprog(int x)
SEC("raw_tp/sys_enter")
int handler1(const void *ctx)
{
var1 = subprog(rovar1) + static_var1 + static_var11;
var1 = subprog(rovar1) + static_var1 + static_var2;
return 0;
}

View file

@ -4,10 +4,10 @@
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* 4-byte aligned .bss */
static volatile int static_var2;
static volatile int static_var22;
int var2 = 0;
/* 4-byte aligned .data */
static volatile int static_var1 = 5;
static volatile int static_var2 = 6;
int var2 = -1;
/* 8-byte aligned .rodata */
const volatile long rovar2;
@ -21,7 +21,7 @@ static __noinline int subprog(int x)
SEC("raw_tp/sys_enter")
int handler2(const void *ctx)
{
var2 = subprog(rovar2) + static_var2 + static_var22;
var2 = subprog(rovar2) + static_var1 + static_var2;
return 0;
}

View file

@ -4,8 +4,18 @@
const char LICENSE[] SEC("license") = "GPL";
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, __u32);
__type(value, __u64);
} array SEC(".maps");
__noinline int sub1(int x)
{
int key = 0;
bpf_map_lookup_elem(&array, &key);
return x + 1;
}
@ -23,6 +33,9 @@ static __noinline int sub3(int z)
static __noinline int sub4(int w)
{
int key = 0;
bpf_map_lookup_elem(&array, &key);
return w + sub3(5) + sub1(6);
}

View file

@ -0,0 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
/* Dummy prog to test TC-BPF API */
SEC("classifier")
int cls(struct __sk_buff *skb)
{
return 0;
}

View file

@ -10,11 +10,11 @@ char _license[] SEC("license") = "GPL";
int trace_printk_ret = 0;
int trace_printk_ran = 0;
SEC("tp/raw_syscalls/sys_enter")
const char fmt[] = "Testing,testing %d\n";
SEC("fentry/__x64_sys_nanosleep")
int sys_enter(void *ctx)
{
static const char fmt[] = "testing,testing %d\n";
trace_printk_ret = bpf_trace_printk(fmt, sizeof(fmt),
++trace_printk_ran);
return 0;