Merge branch 'bpf: Follow up to RCU enforcement in the verifier.'

Alexei Starovoitov says:

====================

From: Alexei Starovoitov <ast@kernel.org>

The patch set is addressing a fallout from
commit 6fcd486b3a ("bpf: Refactor RCU enforcement in the verifier.")
It was too aggressive with PTR_UNTRUSTED marks.
Patches 1-6 are cleanup and adding verifier smartness to address real
use cases in bpf programs that broke with too aggressive PTR_UNTRUSTED.
The partial revert is done in patch 7 anyway.
====================

Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
This commit is contained in:
Andrii Nakryiko 2023-04-04 16:53:31 -07:00
commit e8f59d84f4
13 changed files with 131 additions and 97 deletions

View file

@ -893,8 +893,7 @@ struct bpf_verifier_ops {
struct bpf_prog *prog, u32 *target_size);
int (*btf_struct_access)(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag);
int off, int size);
};
struct bpf_prog_offload_ops {
@ -2264,7 +2263,7 @@ static inline bool bpf_tracing_btf_ctx_access(int off, int size,
int btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag);
u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name);
bool btf_struct_ids_match(struct bpf_verifier_log *log,
const struct btf *btf, u32 id, int off,
const struct btf *need_btf, u32 need_type_id,
@ -2303,7 +2302,7 @@ struct bpf_core_ctx {
bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, const char *suffix);
const char *field_name, u32 btf_id, const char *suffix);
bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log,
const struct btf *reg_btf, u32 reg_id,
@ -2518,7 +2517,8 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
static inline int btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag)
u32 *next_btf_id, enum bpf_type_flag *flag,
const char **field_name)
{
return -EACCES;
}

View file

@ -571,8 +571,7 @@ DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
extern struct mutex nf_conn_btf_access_lock;
extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag);
int off, int size);
typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx,
const struct bpf_insn *insnsi,

View file

@ -224,7 +224,7 @@ const struct bpf_func_proto bpf_cgrp_storage_get_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &bpf_cgroup_btf_id[0],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
@ -235,6 +235,6 @@ const struct bpf_func_proto bpf_cgrp_storage_delete_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &bpf_cgroup_btf_id[0],
};

View file

@ -229,7 +229,7 @@ const struct bpf_func_proto bpf_inode_storage_get_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
@ -240,6 +240,6 @@ const struct bpf_func_proto bpf_inode_storage_delete_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &bpf_inode_storage_btf_ids[0],
};

View file

@ -338,7 +338,7 @@ const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
@ -349,7 +349,7 @@ const struct bpf_func_proto bpf_task_storage_get_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
@ -360,7 +360,7 @@ const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};
@ -369,6 +369,6 @@ const struct bpf_func_proto bpf_task_storage_delete_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};

View file

@ -6166,7 +6166,8 @@ enum bpf_struct_walk_result {
static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
const struct btf_type *t, int off, int size,
u32 *next_btf_id, enum bpf_type_flag *flag)
u32 *next_btf_id, enum bpf_type_flag *flag,
const char **field_name)
{
u32 i, moff, mtrue_end, msize = 0, total_nelems = 0;
const struct btf_type *mtype, *elem_type = NULL;
@ -6395,6 +6396,8 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
if (btf_type_is_struct(stype)) {
*next_btf_id = id;
*flag |= tmp_flag;
if (field_name)
*field_name = mname;
return WALK_PTR;
}
}
@ -6421,7 +6424,8 @@ static int btf_struct_walk(struct bpf_verifier_log *log, const struct btf *btf,
int btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype __maybe_unused,
u32 *next_btf_id, enum bpf_type_flag *flag)
u32 *next_btf_id, enum bpf_type_flag *flag,
const char **field_name)
{
const struct btf *btf = reg->btf;
enum bpf_type_flag tmp_flag = 0;
@ -6453,7 +6457,7 @@ int btf_struct_access(struct bpf_verifier_log *log,
t = btf_type_by_id(btf, id);
do {
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag);
err = btf_struct_walk(log, btf, t, off, size, &id, &tmp_flag, field_name);
switch (err) {
case WALK_PTR:
@ -6528,7 +6532,7 @@ bool btf_struct_ids_match(struct bpf_verifier_log *log,
type = btf_type_by_id(btf, id);
if (!type)
return false;
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag);
err = btf_struct_walk(log, btf, type, off, 1, &id, &flag, NULL);
if (err != WALK_STRUCT)
return false;
@ -8488,16 +8492,15 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, const char *suffix)
const char *field_name, u32 btf_id, const char *suffix)
{
struct btf *btf = reg->btf;
const struct btf_type *walk_type, *safe_type;
const char *tname;
char safe_tname[64];
long ret, safe_id;
const struct btf_member *member, *m_walk = NULL;
const struct btf_member *member;
u32 i;
const char *walk_name;
walk_type = btf_type_by_id(btf, reg->btf_id);
if (!walk_type)
@ -8517,30 +8520,17 @@ bool btf_nested_type_is_trusted(struct bpf_verifier_log *log,
if (!safe_type)
return false;
for_each_member(i, walk_type, member) {
u32 moff;
/* We're looking for the PTR_TO_BTF_ID member in the struct
* type we're walking which matches the specified offset.
* Below, we'll iterate over the fields in the safe variant of
* the struct and see if any of them has a matching type /
* name.
*/
moff = __btf_member_bit_offset(walk_type, member) / 8;
if (off == moff) {
m_walk = member;
break;
}
}
if (m_walk == NULL)
return false;
walk_name = __btf_name_by_offset(btf, m_walk->name_off);
for_each_member(i, safe_type, member) {
const char *m_name = __btf_name_by_offset(btf, member->name_off);
const struct btf_type *mtype = btf_type_by_id(btf, member->type);
u32 id;
if (!btf_type_is_ptr(mtype))
continue;
btf_type_skip_modifiers(btf, mtype->type, &id);
/* If we match on both type and name, the field is considered trusted. */
if (m_walk->type == member->type && !strcmp(walk_name, m_name))
if (btf_id == id && !strcmp(field_name, m_name))
return true;
}

View file

@ -4974,6 +4974,11 @@ static bool is_rcu_reg(const struct bpf_reg_state *reg)
return reg->type & MEM_RCU;
}
static void clear_trusted_flags(enum bpf_type_flag *flag)
{
*flag &= ~(BPF_REG_TRUSTED_MODIFIERS | MEM_RCU);
}
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
const struct bpf_reg_state *reg,
int off, int size, bool strict)
@ -5378,6 +5383,7 @@ static int bpf_map_direct_read(struct bpf_map *map, int off, int size, u64 *val)
}
#define BTF_TYPE_SAFE_RCU(__type) __PASTE(__type, __safe_rcu)
#define BTF_TYPE_SAFE_RCU_OR_NULL(__type) __PASTE(__type, __safe_rcu_or_null)
#define BTF_TYPE_SAFE_TRUSTED(__type) __PASTE(__type, __safe_trusted)
/*
@ -5394,18 +5400,39 @@ BTF_TYPE_SAFE_RCU(struct task_struct) {
struct task_struct *group_leader;
};
BTF_TYPE_SAFE_RCU(struct cgroup) {
/* cgrp->kn is always accessible as documented in kernel/cgroup/cgroup.c */
struct kernfs_node *kn;
};
BTF_TYPE_SAFE_RCU(struct css_set) {
struct cgroup *dfl_cgrp;
};
/* RCU trusted: these fields are trusted in RCU CS and can be NULL */
BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct) {
struct file __rcu *exe_file;
};
/* skb->sk, req->sk are not RCU protected, but we mark them as such
* because bpf prog accessible sockets are SOCK_RCU_FREE.
*/
BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff) {
struct sock *sk;
};
BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock) {
struct sock *sk;
};
/* full trusted: these fields are trusted even outside of RCU CS and never NULL */
BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta) {
__bpf_md_ptr(struct seq_file *, seq);
struct seq_file *seq;
};
BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task) {
__bpf_md_ptr(struct bpf_iter_meta *, meta);
__bpf_md_ptr(struct task_struct *, task);
struct bpf_iter_meta *meta;
struct task_struct *task;
};
BTF_TYPE_SAFE_TRUSTED(struct linux_binprm) {
@ -5427,17 +5454,29 @@ BTF_TYPE_SAFE_TRUSTED(struct socket) {
static bool type_is_rcu(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
int off)
const char *field_name, u32 btf_id)
{
BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct task_struct));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct cgroup));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU(struct css_set));
return btf_nested_type_is_trusted(&env->log, reg, off, "__safe_rcu");
return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu");
}
static bool type_is_rcu_or_null(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
const char *field_name, u32 btf_id)
{
BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct mm_struct));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct sk_buff));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_RCU_OR_NULL(struct request_sock));
return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_rcu_or_null");
}
static bool type_is_trusted(struct bpf_verifier_env *env,
struct bpf_reg_state *reg,
int off)
const char *field_name, u32 btf_id)
{
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter_meta));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct bpf_iter__task));
@ -5446,7 +5485,7 @@ static bool type_is_trusted(struct bpf_verifier_env *env,
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct dentry));
BTF_TYPE_EMIT(BTF_TYPE_SAFE_TRUSTED(struct socket));
return btf_nested_type_is_trusted(&env->log, reg, off, "__safe_trusted");
return btf_nested_type_is_trusted(&env->log, reg, field_name, btf_id, "__safe_trusted");
}
static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
@ -5458,8 +5497,9 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
struct bpf_reg_state *reg = regs + regno;
const struct btf_type *t = btf_type_by_id(reg->btf, reg->btf_id);
const char *tname = btf_name_by_offset(reg->btf, t->name_off);
const char *field_name = NULL;
enum bpf_type_flag flag = 0;
u32 btf_id;
u32 btf_id = 0;
int ret;
if (!env->allow_ptr_leaks) {
@ -5504,12 +5544,12 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
return -EACCES;
}
if (env->ops->btf_struct_access && !type_is_alloc(reg->type)) {
if (env->ops->btf_struct_access && !type_is_alloc(reg->type) && atype == BPF_WRITE) {
if (!btf_is_kernel(reg->btf)) {
verbose(env, "verifier internal error: reg->btf must be kernel btf\n");
return -EFAULT;
}
ret = env->ops->btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
ret = env->ops->btf_struct_access(&env->log, reg, off, size);
} else {
/* Writes are permitted with default btf_struct_access for
* program allocated objects (which always have ref_obj_id > 0),
@ -5526,7 +5566,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
return -EFAULT;
}
ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag);
ret = btf_struct_access(&env->log, reg, off, size, atype, &btf_id, &flag, &field_name);
}
if (ret < 0)
@ -5554,20 +5594,21 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
* A regular RCU-protected pointer with __rcu tag can also be deemed
* trusted if we are in an RCU CS. Such pointer can be NULL.
*/
if (type_is_trusted(env, reg, off)) {
if (type_is_trusted(env, reg, field_name, btf_id)) {
flag |= PTR_TRUSTED;
} else if (in_rcu_cs(env) && !type_may_be_null(reg->type)) {
if (type_is_rcu(env, reg, off)) {
if (type_is_rcu(env, reg, field_name, btf_id)) {
/* ignore __rcu tag and mark it MEM_RCU */
flag |= MEM_RCU;
} else if (flag & MEM_RCU) {
} else if (flag & MEM_RCU ||
type_is_rcu_or_null(env, reg, field_name, btf_id)) {
/* __rcu tagged pointers can be NULL */
flag |= PTR_MAYBE_NULL;
flag |= MEM_RCU | PTR_MAYBE_NULL;
} else if (flag & (MEM_PERCPU | MEM_USER)) {
/* keep as-is */
} else {
/* walking unknown pointers yields untrusted pointer */
flag = PTR_UNTRUSTED;
/* walking unknown pointers yields old deprecated PTR_TO_BTF_ID */
clear_trusted_flags(&flag);
}
} else {
/*
@ -5581,7 +5622,7 @@ static int check_ptr_to_btf_access(struct bpf_verifier_env *env,
}
} else {
/* Old compat. Deprecated */
flag &= ~PTR_TRUSTED;
clear_trusted_flags(&flag);
}
if (atype == BPF_READ && value_regno >= 0)
@ -5640,7 +5681,7 @@ static int check_ptr_to_map_access(struct bpf_verifier_env *env,
/* Simulate access to a PTR_TO_BTF_ID */
memset(&map_reg, 0, sizeof(map_reg));
mark_btf_ld_reg(env, &map_reg, 0, PTR_TO_BTF_ID, btf_vmlinux, *map->ops->map_btf_id, 0);
ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag);
ret = btf_struct_access(&env->log, &map_reg, off, size, atype, &btf_id, &flag, NULL);
if (ret < 0)
return ret;
@ -7167,6 +7208,8 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
case PTR_TO_BTF_ID:
case PTR_TO_BTF_ID | PTR_TRUSTED:
case PTR_TO_BTF_ID | MEM_RCU:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
{
/* For bpf_sk_release, it needs to match against first member
* 'struct sock_common', hence make an exception for it. This
@ -7175,6 +7218,12 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
bool strict_type_match = arg_type_is_release(arg_type) &&
meta->func_id != BPF_FUNC_sk_release;
if (type_may_be_null(reg->type) &&
(!type_may_be_null(arg_type) || arg_type_is_release(arg_type))) {
verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno);
return -EACCES;
}
if (!arg_btf_id) {
if (!compatible->btf_id) {
verbose(env, "verifier internal error: missing arg compatible BTF ID\n");
@ -7205,10 +7254,6 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
}
break;
}
case PTR_TO_BTF_ID | PTR_MAYBE_NULL:
case PTR_TO_BTF_ID | PTR_MAYBE_NULL | MEM_RCU:
verbose(env, "Possibly NULL pointer passed to helper arg%d\n", regno);
return -EACCES;
case PTR_TO_BTF_ID | MEM_ALLOC:
if (meta->func_id != BPF_FUNC_spin_lock && meta->func_id != BPF_FUNC_spin_unlock &&
meta->func_id != BPF_FUNC_kptr_xchg) {

View file

@ -173,14 +173,11 @@ static int bpf_dummy_ops_check_member(const struct btf_type *t,
static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id,
enum bpf_type_flag *flag)
int off, int size)
{
const struct btf_type *state;
const struct btf_type *t;
s32 type_id;
int err;
type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
BTF_KIND_STRUCT);
@ -194,11 +191,12 @@ static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
return -EACCES;
}
err = btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
if (err < 0)
return err;
if (off + size > sizeof(struct bpf_dummy_ops_state)) {
bpf_log(log, "write access at off %d with size %d\n", off, size);
return -EACCES;
}
return atype == BPF_READ ? err : NOT_INIT;
return NOT_INIT;
}
static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {

View file

@ -412,7 +412,7 @@ const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
@ -424,7 +424,7 @@ const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_type = ARG_PTR_TO_BTF_ID_OR_NULL,
.arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
.allowed = bpf_sk_storage_tracing_allowed,
};

View file

@ -4998,7 +4998,7 @@ const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = {
.func = bpf_get_socket_ptr_cookie,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
.arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON | PTR_MAYBE_NULL,
};
BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
@ -8742,23 +8742,18 @@ EXPORT_SYMBOL_GPL(nf_conn_btf_access_lock);
int (*nfct_btf_struct_access)(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag);
int off, int size);
EXPORT_SYMBOL_GPL(nfct_btf_struct_access);
static int tc_cls_act_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag)
int off, int size)
{
int ret = -EACCES;
if (atype == BPF_READ)
return btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
mutex_lock(&nf_conn_btf_access_lock);
if (nfct_btf_struct_access)
ret = nfct_btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
ret = nfct_btf_struct_access(log, reg, off, size);
mutex_unlock(&nf_conn_btf_access_lock);
return ret;
@ -8825,17 +8820,13 @@ EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
static int xdp_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag)
int off, int size)
{
int ret = -EACCES;
if (atype == BPF_READ)
return btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
mutex_lock(&nf_conn_btf_access_lock);
if (nfct_btf_struct_access)
ret = nfct_btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
ret = nfct_btf_struct_access(log, reg, off, size);
mutex_unlock(&nf_conn_btf_access_lock);
return ret;

View file

@ -72,15 +72,11 @@ static bool bpf_tcp_ca_is_valid_access(int off, int size,
static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag)
int off, int size)
{
const struct btf_type *t;
size_t end;
if (atype == BPF_READ)
return btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
t = btf_type_by_id(reg->btf, reg->btf_id);
if (t != tcp_sock_type) {
bpf_log(log, "only read is supported\n");

View file

@ -192,8 +192,7 @@ BTF_ID(struct, nf_conn___init)
/* Check writes into `struct nf_conn` */
static int _nf_conntrack_btf_struct_access(struct bpf_verifier_log *log,
const struct bpf_reg_state *reg,
int off, int size, enum bpf_access_type atype,
u32 *next_btf_id, enum bpf_type_flag *flag)
int off, int size)
{
const struct btf_type *ncit, *nct, *t;
size_t end;

View file

@ -92,4 +92,20 @@ int BPF_PROG(inet_csk_accept, struct sock *sk, int flags, int *err, bool kern,
return 0;
}
SEC("tp_btf/tcp_retransmit_synack")
int BPF_PROG(tcp_retransmit_synack, struct sock* sk, struct request_sock* req)
{
/* load only test */
bpf_sk_storage_get(&sk_stg_map, sk, 0, 0);
bpf_sk_storage_get(&sk_stg_map, req->sk, 0, 0);
return 0;
}
SEC("tp_btf/tcp_bad_csum")
int BPF_PROG(tcp_bad_csum, struct sk_buff* skb)
{
bpf_sk_storage_get(&sk_stg_map, skb->sk, 0, 0);
return 0;
}
char _license[] SEC("license") = "GPL";