Daniel Borkmann says:

====================
pull-request: bpf-next 2021-11-15

We've added 72 non-merge commits during the last 13 day(s) which contain
a total of 171 files changed, 2728 insertions(+), 1143 deletions(-).

The main changes are:

1) Add btf_type_tag attributes to bring kernel annotations like __user/__rcu to
   BTF such that BPF verifier will be able to detect misuse, from Yonghong Song.

2) Big batch of libbpf improvements including various fixes, future proofing APIs,
   and adding a unified, OPTS-based bpf_prog_load() low-level API, from Andrii Nakryiko.

3) Add ingress_ifindex to BPF_SK_LOOKUP program type for selectively applying the
   programmable socket lookup logic to packets from a given netdev, from Mark Pashmfouroush.

4) Remove the 128M upper JIT limit for BPF programs on arm64 and add selftest to
   ensure exception handling still works, from Russell King and Alan Maguire.

5) Add a new bpf_find_vma() helper for tracing to map an address to the backing
   file such as shared library, from Song Liu.

6) Batch of various misc fixes to bpftool, fixing a memory leak in BPF program dump,
   updating documentation and bash-completion among others, from Quentin Monnet.

7) Deprecate libbpf bpf_program__get_prog_info_linear() API and migrate its users as
   the API is heavily tailored around perf and is non-generic, from Dave Marchevsky.

8) Enable libbpf's strict mode by default in bpftool and add a --legacy option as an
   opt-out for more relaxed BPF program requirements, from Stanislav Fomichev.

9) Fix bpftool to use libbpf_get_error() to check for errors, from Hengqi Chen.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (72 commits)
  bpftool: Use libbpf_get_error() to check error
  bpftool: Fix mixed indentation in documentation
  bpftool: Update the lists of names for maps and prog-attach types
  bpftool: Fix indent in option lists in the documentation
  bpftool: Remove inclusion of utilities.mak from Makefiles
  bpftool: Fix memory leak in prog_dump()
  selftests/bpf: Fix a tautological-constant-out-of-range-compare compiler warning
  selftests/bpf: Fix an unused-but-set-variable compiler warning
  bpf: Introduce btf_tracing_ids
  bpf: Extend BTF_ID_LIST_GLOBAL with parameter for number of IDs
  bpftool: Enable libbpf's strict mode by default
  docs/bpf: Update documentation for BTF_KIND_TYPE_TAG support
  selftests/bpf: Clarify llvm dependency with btf_tag selftest
  selftests/bpf: Add a C test for btf_type_tag
  selftests/bpf: Rename progs/tag.c to progs/btf_decl_tag.c
  selftests/bpf: Test BTF_KIND_DECL_TAG for deduplication
  selftests/bpf: Add BTF_KIND_TYPE_TAG unit tests
  selftests/bpf: Test libbpf API function btf__add_type_tag()
  bpftool: Support BTF_KIND_TYPE_TAG
  libbpf: Support BTF_KIND_TYPE_TAG
  ...
====================

Link: https://lore.kernel.org/r/20211115162008.25916-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-11-15 08:49:20 -08:00
commit a5bdc36354
160 changed files with 2349 additions and 1105 deletions

View File

@ -86,6 +86,7 @@ sequentially and type id is assigned to each recognized type starting from id
#define BTF_KIND_DATASEC 15 /* Section */
#define BTF_KIND_FLOAT 16 /* Floating point */
#define BTF_KIND_DECL_TAG 17 /* Decl Tag */
#define BTF_KIND_TYPE_TAG 18 /* Type Tag */
Note that the type section encodes debug info, not just pure types.
``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram.
@ -107,7 +108,7 @@ Each type contains the following common data::
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC, FUNC_PROTO and DECL_TAG.
* FUNC, FUNC_PROTO, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@ -492,6 +493,16 @@ the attribute is applied to a ``struct``/``union`` member or
a ``func`` argument, and ``btf_decl_tag.component_idx`` should be a
valid index (starting from 0) pointing to a member or an argument.
2.2.17 BTF_KIND_TYPE_TAG
~~~~~~~~~~~~~~~~~~~~~~~~
``struct btf_type`` encoding requirement:
* ``name_off``: offset to a non-empty string
* ``info.kind_flag``: 0
* ``info.kind``: BTF_KIND_TYPE_TAG
* ``info.vlen``: 0
* ``type``: the type with ``btf_type_tag`` attribute
3. BTF Kernel API
*****************

View File

@ -33,15 +33,6 @@ do { \
(b)->data = (tmp).data; \
} while (0)
static inline bool in_bpf_jit(struct pt_regs *regs)
{
if (!IS_ENABLED(CONFIG_BPF_JIT))
return false;
return regs->pc >= BPF_JIT_REGION_START &&
regs->pc < BPF_JIT_REGION_END;
}
#ifdef CONFIG_BPF_JIT
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs);

View File

@ -44,11 +44,8 @@
#define _PAGE_OFFSET(va) (-(UL(1) << (va)))
#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS))
#define KIMAGE_VADDR (MODULES_END)
#define BPF_JIT_REGION_START (_PAGE_END(VA_BITS_MIN))
#define BPF_JIT_REGION_SIZE (SZ_128M)
#define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE)
#define MODULES_END (MODULES_VADDR + MODULES_VSIZE)
#define MODULES_VADDR (BPF_JIT_REGION_END)
#define MODULES_VADDR (_PAGE_END(VA_BITS_MIN))
#define MODULES_VSIZE (SZ_128M)
#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT)))
#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE)

View File

@ -994,7 +994,7 @@ static struct break_hook bug_break_hook = {
static int reserved_fault_handler(struct pt_regs *regs, unsigned int esr)
{
pr_err("%s generated an invalid instruction at %pS!\n",
in_bpf_jit(regs) ? "BPF JIT" : "Kernel text patching",
"Kernel text patching",
(void *)instruction_pointer(regs));
/* We cannot handle this */

View File

@ -41,8 +41,6 @@ static struct addr_marker address_markers[] = {
{ 0 /* KASAN_SHADOW_START */, "Kasan shadow start" },
{ KASAN_SHADOW_END, "Kasan shadow end" },
#endif
{ BPF_JIT_REGION_START, "BPF start" },
{ BPF_JIT_REGION_END, "BPF end" },
{ MODULES_VADDR, "Modules start" },
{ MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" },

View File

@ -1141,15 +1141,12 @@ out:
u64 bpf_jit_alloc_exec_limit(void)
{
return BPF_JIT_REGION_SIZE;
return VMALLOC_END - VMALLOC_START;
}
void *bpf_jit_alloc_exec(unsigned long size)
{
return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
BPF_JIT_REGION_END, GFP_KERNEL,
PAGE_KERNEL, 0, NUMA_NO_NODE,
__builtin_return_address(0));
return vmalloc(size);
}
void bpf_jit_free_exec(void *addr)

View File

@ -2163,6 +2163,7 @@ extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
extern const struct bpf_func_proto bpf_sk_setsockopt_proto;
extern const struct bpf_func_proto bpf_sk_getsockopt_proto;
extern const struct bpf_func_proto bpf_kallsyms_lookup_name_proto;
extern const struct bpf_func_proto bpf_find_vma_proto;
const struct bpf_func_proto *tracing_prog_func_proto(
enum bpf_func_id func_id, const struct bpf_prog *prog);

View File

@ -73,7 +73,7 @@ asm( \
__BTF_ID_LIST(name, local) \
extern u32 name[];
#define BTF_ID_LIST_GLOBAL(name) \
#define BTF_ID_LIST_GLOBAL(name, n) \
__BTF_ID_LIST(name, globl)
/* The BTF_ID_LIST_SINGLE macro defines a BTF_ID_LIST with
@ -83,7 +83,7 @@ __BTF_ID_LIST(name, globl)
BTF_ID_LIST(name) \
BTF_ID(prefix, typename)
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) \
BTF_ID_LIST_GLOBAL(name) \
BTF_ID_LIST_GLOBAL(name, 1) \
BTF_ID(prefix, typename)
/*
@ -149,7 +149,7 @@ extern struct btf_id_set name;
#define BTF_ID_LIST(name) static u32 name[5];
#define BTF_ID(prefix, name)
#define BTF_ID_UNUSED
#define BTF_ID_LIST_GLOBAL(name) u32 name[1];
#define BTF_ID_LIST_GLOBAL(name, n) u32 name[n];
#define BTF_ID_LIST_SINGLE(name, prefix, typename) static u32 name[1];
#define BTF_ID_LIST_GLOBAL_SINGLE(name, prefix, typename) u32 name[1];
#define BTF_SET_START(name) static struct btf_id_set name = { 0 };
@ -189,6 +189,18 @@ MAX_BTF_SOCK_TYPE,
extern u32 btf_sock_ids[];
#endif
extern u32 btf_task_struct_ids[];
#define BTF_TRACING_TYPE_xxx \
BTF_TRACING_TYPE(BTF_TRACING_TYPE_TASK, task_struct) \
BTF_TRACING_TYPE(BTF_TRACING_TYPE_FILE, file) \
BTF_TRACING_TYPE(BTF_TRACING_TYPE_VMA, vm_area_struct)
enum {
#define BTF_TRACING_TYPE(name, type) name,
BTF_TRACING_TYPE_xxx
#undef BTF_TRACING_TYPE
MAX_BTF_TRACING_TYPE,
};
extern u32 btf_tracing_ids[];
#endif

View File

@ -1374,6 +1374,7 @@ struct bpf_sk_lookup_kern {
const struct in6_addr *daddr;
} v6;
struct sock *selected_sk;
u32 ingress_ifindex;
bool no_reuseport;
};
@ -1436,7 +1437,7 @@ extern struct static_key_false bpf_sk_lookup_enabled;
static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
const __be32 saddr, const __be16 sport,
const __be32 daddr, const u16 dport,
struct sock **psk)
const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@ -1452,6 +1453,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol,
.v4.daddr = daddr,
.sport = sport,
.dport = dport,
.ingress_ifindex = ifindex,
};
u32 act;
@ -1474,7 +1476,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
const __be16 sport,
const struct in6_addr *daddr,
const u16 dport,
struct sock **psk)
const int ifindex, struct sock **psk)
{
struct bpf_prog_array *run_array;
struct sock *selected_sk = NULL;
@ -1490,6 +1492,7 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
.v6.daddr = daddr,
.sport = sport,
.dport = dport,
.ingress_ifindex = ifindex,
};
u32 act;

View File

@ -4938,6 +4938,25 @@ union bpf_attr {
* **-ENOENT** if symbol is not found.
*
* **-EPERM** if caller does not have permission to obtain kernel address.
*
* long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
* Description
* Find vma of *task* that contains *addr*, call *callback_fn*
* function with *task*, *vma*, and *callback_ctx*.
* The *callback_fn* should be a static function and
* the *callback_ctx* should be a pointer to the stack.
* The *flags* is used to control certain aspects of the helper.
* Currently, the *flags* must be 0.
*
* The expected callback signature is
*
* long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
*
* Return
* 0 on success.
* **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
* **-EBUSY** if failed to try lock mmap_lock.
* **-EINVAL** for invalid **flags**.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@ -5120,6 +5139,7 @@ union bpf_attr {
FN(trace_vprintk), \
FN(skc_to_unix_sock), \
FN(kallsyms_lookup_name), \
FN(find_vma), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@ -6296,6 +6316,7 @@ struct bpf_sk_lookup {
__u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */
__u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
};
/*

View File

@ -43,7 +43,7 @@ struct btf_type {
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC, FUNC_PROTO, VAR and DECL_TAG.
* FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@ -75,6 +75,7 @@ enum {
BTF_KIND_DATASEC = 15, /* Section */
BTF_KIND_FLOAT = 16, /* Floating point */
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
NR_BTF_KINDS,
BTF_KIND_MAX = NR_BTF_KINDS - 1,

View File

@ -323,7 +323,7 @@ const struct bpf_func_proto bpf_task_storage_get_proto = {
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_task_struct_ids[0],
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
};
@ -334,5 +334,5 @@ const struct bpf_func_proto bpf_task_storage_delete_proto = {
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_task_struct_ids[0],
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};

View File

@ -282,6 +282,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
};
const char *btf_type_str(const struct btf_type *t)
@ -418,6 +419,7 @@ static bool btf_type_is_modifier(const struct btf_type *t)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPE_TAG:
return true;
}
@ -1737,6 +1739,7 @@ __btf_resolve_size(const struct btf *btf, const struct btf_type *type,
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPE_TAG:
id = type->type;
type = btf_type_by_id(btf, type->type);
break;
@ -2345,6 +2348,8 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
const struct btf_type *t,
u32 meta_left)
{
const char *value;
if (btf_type_vlen(t)) {
btf_verifier_log_type(env, t, "vlen != 0");
return -EINVAL;
@ -2360,7 +2365,7 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
return -EINVAL;
}
/* typedef type must have a valid name, and other ref types,
/* typedef/type_tag type must have a valid name, and other ref types,
* volatile, const, restrict, should have a null name.
*/
if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF) {
@ -2369,6 +2374,12 @@ static int btf_ref_type_check_meta(struct btf_verifier_env *env,
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
} else if (BTF_INFO_KIND(t->info) == BTF_KIND_TYPE_TAG) {
value = btf_name_by_offset(env->btf, t->name_off);
if (!value || !value[0]) {
btf_verifier_log_type(env, t, "Invalid name");
return -EINVAL;
}
} else {
if (t->name_off) {
btf_verifier_log_type(env, t, "Invalid name");
@ -4059,6 +4070,7 @@ static const struct btf_kind_operations * const kind_ops[NR_BTF_KINDS] = {
[BTF_KIND_DATASEC] = &datasec_ops,
[BTF_KIND_FLOAT] = &float_ops,
[BTF_KIND_DECL_TAG] = &decl_tag_ops,
[BTF_KIND_TYPE_TAG] = &modifier_ops,
};
static s32 btf_check_meta(struct btf_verifier_env *env,
@ -6342,7 +6354,10 @@ const struct bpf_func_proto bpf_btf_find_by_name_kind_proto = {
.arg4_type = ARG_ANYTHING,
};
BTF_ID_LIST_GLOBAL_SINGLE(btf_task_struct_ids, struct, task_struct)
BTF_ID_LIST_GLOBAL(btf_tracing_ids, MAX_BTF_TRACING_TYPE)
#define BTF_TRACING_TYPE(name, type) BTF_ID(struct, type)
BTF_TRACING_TYPE_xxx
#undef BTF_TRACING_TYPE
/* BTF ID set registration API for modules */

View File

@ -0,0 +1,65 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/* Copyright (c) 2021 Facebook
*/
#ifndef __MMAP_UNLOCK_WORK_H__
#define __MMAP_UNLOCK_WORK_H__
#include <linux/irq_work.h>
/* irq_work to run mmap_read_unlock() in irq_work */
struct mmap_unlock_irq_work {
struct irq_work irq_work;
struct mm_struct *mm;
};
DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
/*
* We cannot do mmap_read_unlock() when the irq is disabled, because of
* risk to deadlock with rq_lock. To look up vma when the irqs are
* disabled, we need to run mmap_read_unlock() in irq_work. We use a
* percpu variable to do the irq_work. If the irq_work is already used
* by another lookup, we fall over.
*/
static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)
{
struct mmap_unlock_irq_work *work = NULL;
bool irq_work_busy = false;
if (irqs_disabled()) {
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
work = this_cpu_ptr(&mmap_unlock_work);
if (irq_work_is_busy(&work->irq_work)) {
/* cannot queue more up_read, fallback */
irq_work_busy = true;
}
} else {
/*
* PREEMPT_RT does not allow to trylock mmap sem in
* interrupt disabled context. Force the fallback code.
*/
irq_work_busy = true;
}
}
*work_ptr = work;
return irq_work_busy;
}
static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)
{
if (!work) {
mmap_read_unlock(mm);
} else {
work->mm = mm;
/* The lock will be released once we're out of interrupt
* context. Tell lockdep that we've released it now so
* it doesn't complain that we forgot to release it.
*/
rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
irq_work_queue(&work->irq_work);
}
}
#endif /* __MMAP_UNLOCK_WORK_H__ */

View File

@ -7,10 +7,10 @@
#include <linux/kernel.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
#include <linux/irq_work.h>
#include <linux/btf_ids.h>
#include <linux/buildid.h>
#include "percpu_freelist.h"
#include "mmap_unlock_work.h"
#define STACK_CREATE_FLAG_MASK \
(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
@ -31,25 +31,6 @@ struct bpf_stack_map {
struct stack_map_bucket *buckets[];
};
/* irq_work to run up_read() for build_id lookup in nmi context */
struct stack_map_irq_work {
struct irq_work irq_work;
struct mm_struct *mm;
};
static void do_up_read(struct irq_work *entry)
{
struct stack_map_irq_work *work;
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
return;
work = container_of(entry, struct stack_map_irq_work, irq_work);
mmap_read_unlock_non_owner(work->mm);
}
static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
static inline bool stack_map_use_build_id(struct bpf_map *map)
{
return (map->map_flags & BPF_F_STACK_BUILD_ID);
@ -149,35 +130,13 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
u64 *ips, u32 trace_nr, bool user)
{
int i;
struct mmap_unlock_irq_work *work = NULL;
bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
struct vm_area_struct *vma;
bool irq_work_busy = false;
struct stack_map_irq_work *work = NULL;
if (irqs_disabled()) {
if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
work = this_cpu_ptr(&up_read_work);
if (irq_work_is_busy(&work->irq_work)) {
/* cannot queue more up_read, fallback */
irq_work_busy = true;
}
} else {
/*
* PREEMPT_RT does not allow to trylock mmap sem in
* interrupt disabled context. Force the fallback code.
*/
irq_work_busy = true;
}
}
/*
* We cannot do up_read() when the irq is disabled, because of
* risk to deadlock with rq_lock. To do build_id lookup when the
* irqs are disabled, we need to run up_read() in irq_work. We use
* a percpu variable to do the irq_work. If the irq_work is
* already used by another lookup, we fall back to report ips.
*
* Same fallback is used for kernel stack (!user) on a stackmap
* with build_id.
/* If the irq_work is in use, fall back to report ips. Same
* fallback is used for kernel stack (!user) on a stackmap with
* build_id.
*/
if (!user || !current || !current->mm || irq_work_busy ||
!mmap_read_trylock(current->mm)) {
@ -203,19 +162,7 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
- vma->vm_start;
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
}
if (!work) {
mmap_read_unlock(current->mm);
} else {
work->mm = current->mm;
/* The lock will be released once we're out of interrupt
* context. Tell lockdep that we've released it now so
* it doesn't complain that we forgot to release it.
*/
rwsem_release(&current->mm->mmap_lock.dep_map, _RET_IP_);
irq_work_queue(&work->irq_work);
}
bpf_mmap_unlock_mm(work, current->mm);
}
static struct perf_callchain_entry *
@ -542,7 +489,7 @@ const struct bpf_func_proto bpf_get_task_stack_proto = {
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &btf_task_struct_ids[0],
.arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg2_type = ARG_PTR_TO_UNINIT_MEM,
.arg3_type = ARG_CONST_SIZE_OR_ZERO,
.arg4_type = ARG_ANYTHING,
@ -719,16 +666,3 @@ const struct bpf_map_ops stack_trace_map_ops = {
.map_btf_name = "bpf_stack_map",
.map_btf_id = &stack_trace_map_btf_id,
};
static int __init stack_map_init(void)
{
int cpu;
struct stack_map_irq_work *work;
for_each_possible_cpu(cpu) {
work = per_cpu_ptr(&up_read_work, cpu);
init_irq_work(&work->irq_work, do_up_read);
}
return 0;
}
subsys_initcall(stack_map_init);

View File

@ -8,6 +8,7 @@
#include <linux/fdtable.h>
#include <linux/filter.h>
#include <linux/btf_ids.h>
#include "mmap_unlock_work.h"
struct bpf_iter_seq_task_common {
struct pid_namespace *ns;
@ -524,10 +525,6 @@ static const struct seq_operations task_vma_seq_ops = {
.show = task_vma_seq_show,
};
BTF_ID_LIST(btf_task_file_ids)
BTF_ID(struct, file)
BTF_ID(struct, vm_area_struct)
static const struct bpf_iter_seq_info task_seq_info = {
.seq_ops = &task_seq_ops,
.init_seq_private = init_seq_pidns,
@ -586,23 +583,88 @@ static struct bpf_iter_reg task_vma_reg_info = {
.seq_info = &task_vma_seq_info,
};
BPF_CALL_5(bpf_find_vma, struct task_struct *, task, u64, start,
bpf_callback_t, callback_fn, void *, callback_ctx, u64, flags)
{
struct mmap_unlock_irq_work *work = NULL;
struct vm_area_struct *vma;
bool irq_work_busy = false;
struct mm_struct *mm;
int ret = -ENOENT;
if (flags)
return -EINVAL;
if (!task)
return -ENOENT;
mm = task->mm;
if (!mm)
return -ENOENT;
irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
if (irq_work_busy || !mmap_read_trylock(mm))
return -EBUSY;
vma = find_vma(mm, start);
if (vma && vma->vm_start <= start && vma->vm_end > start) {
callback_fn((u64)(long)task, (u64)(long)vma,
(u64)(long)callback_ctx, 0, 0);
ret = 0;
}
bpf_mmap_unlock_mm(work, mm);
return ret;
}
const struct bpf_func_proto bpf_find_vma_proto = {
.func = bpf_find_vma,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_FUNC,
.arg4_type = ARG_PTR_TO_STACK_OR_NULL,
.arg5_type = ARG_ANYTHING,
};
DEFINE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);
static void do_mmap_read_unlock(struct irq_work *entry)
{
struct mmap_unlock_irq_work *work;
if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
return;
work = container_of(entry, struct mmap_unlock_irq_work, irq_work);
mmap_read_unlock_non_owner(work->mm);
}
static int __init task_iter_init(void)
{
int ret;
struct mmap_unlock_irq_work *work;
int ret, cpu;
task_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
for_each_possible_cpu(cpu) {
work = per_cpu_ptr(&mmap_unlock_work, cpu);
init_irq_work(&work->irq_work, do_mmap_read_unlock);
}
task_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
ret = bpf_iter_reg_target(&task_reg_info);
if (ret)
return ret;
task_file_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
task_file_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[0];
task_file_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
task_file_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_FILE];
ret = bpf_iter_reg_target(&task_file_reg_info);
if (ret)
return ret;
task_vma_reg_info.ctx_arg_info[0].btf_id = btf_task_struct_ids[0];
task_vma_reg_info.ctx_arg_info[1].btf_id = btf_task_file_ids[1];
task_vma_reg_info.ctx_arg_info[0].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_TASK];
task_vma_reg_info.ctx_arg_info[1].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA];
return bpf_iter_reg_target(&task_vma_reg_info);
}
late_initcall(task_iter_init);

View File

@ -6114,6 +6114,33 @@ static int set_timer_callback_state(struct bpf_verifier_env *env,
return 0;
}
static int set_find_vma_callback_state(struct bpf_verifier_env *env,
struct bpf_func_state *caller,
struct bpf_func_state *callee,
int insn_idx)
{
/* bpf_find_vma(struct task_struct *task, u64 addr,
* void *callback_fn, void *callback_ctx, u64 flags)
* (callback_fn)(struct task_struct *task,
* struct vm_area_struct *vma, void *callback_ctx);
*/
callee->regs[BPF_REG_1] = caller->regs[BPF_REG_1];
callee->regs[BPF_REG_2].type = PTR_TO_BTF_ID;
__mark_reg_known_zero(&callee->regs[BPF_REG_2]);
callee->regs[BPF_REG_2].btf = btf_vmlinux;
callee->regs[BPF_REG_2].btf_id = btf_tracing_ids[BTF_TRACING_TYPE_VMA],
/* pointer to stack or null */
callee->regs[BPF_REG_3] = caller->regs[BPF_REG_4];
/* unused */
__mark_reg_not_init(env, &callee->regs[BPF_REG_4]);
__mark_reg_not_init(env, &callee->regs[BPF_REG_5]);
callee->in_callback_fn = true;
return 0;
}
static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
{
struct bpf_verifier_state *state = env->cur_state;
@ -6471,6 +6498,13 @@ static int check_helper_call(struct bpf_verifier_env *env, struct bpf_insn *insn
return -EINVAL;
}
if (func_id == BPF_FUNC_find_vma) {
err = __check_func_call(env, insn, insn_idx_p, meta.subprogno,
set_find_vma_callback_state);
if (err < 0)
return -EINVAL;
}
if (func_id == BPF_FUNC_snprintf) {
err = check_bpf_snprintf_call(env, regs);
if (err < 0)

View File

@ -764,7 +764,7 @@ const struct bpf_func_proto bpf_get_current_task_btf_proto = {
.func = bpf_get_current_task_btf,
.gpl_only = true,
.ret_type = RET_PTR_TO_BTF_ID,
.ret_btf_id = &btf_task_struct_ids[0],
.ret_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};
BPF_CALL_1(bpf_task_pt_regs, struct task_struct *, task)
@ -779,7 +779,7 @@ const struct bpf_func_proto bpf_task_pt_regs_proto = {
.func = bpf_task_pt_regs,
.gpl_only = true,
.arg1_type = ARG_PTR_TO_BTF_ID,
.arg1_btf_id = &btf_task_struct_ids[0],
.arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.ret_type = RET_PTR_TO_BTF_ID,
.ret_btf_id = &bpf_task_pt_regs_ids[0],
};
@ -1208,6 +1208,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
return &bpf_get_func_ip_proto_tracing;
case BPF_FUNC_get_branch_snapshot:
return &bpf_get_branch_snapshot_proto;
case BPF_FUNC_find_vma:
return &bpf_find_vma_proto;
case BPF_FUNC_trace_vprintk:
return bpf_get_trace_vprintk_proto();
default:

View File

@ -10539,6 +10539,7 @@ static bool sk_lookup_is_valid_access(int off, int size,
case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
case bpf_ctx_range(struct bpf_sk_lookup, local_port):
case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
bpf_ctx_record_field_size(info, sizeof(__u32));
return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
@ -10628,6 +10629,12 @@ static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
bpf_target_off(struct bpf_sk_lookup_kern,
dport, 2, target_size));
break;
case offsetof(struct bpf_sk_lookup, ingress_ifindex):
*insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
bpf_target_off(struct bpf_sk_lookup_kern,
ingress_ifindex, 4, target_size));
break;
}
return insn - insn_buf;
@ -10652,14 +10659,10 @@ void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
}
#ifdef CONFIG_DEBUG_INFO_BTF
BTF_ID_LIST_GLOBAL(btf_sock_ids)
BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE)
#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
BTF_SOCK_TYPE_xxx
#undef BTF_SOCK_TYPE
#else
u32 btf_sock_ids[MAX_BTF_SOCK_TYPE];
#endif
BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
{

View File

@ -307,7 +307,7 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
struct inet_hashinfo *hashinfo,
struct sk_buff *skb, int doff,
__be32 saddr, __be16 sport,
__be32 daddr, u16 hnum)
__be32 daddr, u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@ -315,8 +315,8 @@ static inline struct sock *inet_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */
no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP,
saddr, sport, daddr, hnum, &sk);
no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_TCP, saddr, sport,
daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@ -340,7 +340,7 @@ struct sock *__inet_lookup_listener(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet_lookup_run_bpf(net, hashinfo, skb, doff,
saddr, sport, daddr, hnum);
saddr, sport, daddr, hnum, dif);
if (result)
goto done;
}

View File

@ -459,7 +459,7 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
struct udp_table *udptable,
struct sk_buff *skb,
__be32 saddr, __be16 sport,
__be32 daddr, u16 hnum)
__be32 daddr, u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@ -467,8 +467,8 @@ static struct sock *udp4_lookup_run_bpf(struct net *net,
if (udptable != &udp_table)
return NULL; /* only UDP is supported */
no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP,
saddr, sport, daddr, hnum, &sk);
no_reuseport = bpf_sk_lookup_run_v4(net, IPPROTO_UDP, saddr, sport,
daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@ -504,7 +504,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp4_lookup_run_bpf(net, udptable, skb,
saddr, sport, daddr, hnum);
saddr, sport, daddr, hnum, dif);
if (sk) {
result = sk;
goto done;

View File

@ -165,7 +165,7 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr,
const __be16 sport,
const struct in6_addr *daddr,
const u16 hnum)
const u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@ -173,8 +173,8 @@ static inline struct sock *inet6_lookup_run_bpf(struct net *net,
if (hashinfo != &tcp_hashinfo)
return NULL; /* only TCP is supported */
no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP,
saddr, sport, daddr, hnum, &sk);
no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_TCP, saddr, sport,
daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@ -198,7 +198,7 @@ struct sock *inet6_lookup_listener(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
result = inet6_lookup_run_bpf(net, hashinfo, skb, doff,
saddr, sport, daddr, hnum);
saddr, sport, daddr, hnum, dif);
if (result)
goto done;
}

View File

@ -195,7 +195,7 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
const struct in6_addr *saddr,
__be16 sport,
const struct in6_addr *daddr,
u16 hnum)
u16 hnum, const int dif)
{
struct sock *sk, *reuse_sk;
bool no_reuseport;
@ -203,8 +203,8 @@ static inline struct sock *udp6_lookup_run_bpf(struct net *net,
if (udptable != &udp_table)
return NULL; /* only UDP is supported */
no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP,
saddr, sport, daddr, hnum, &sk);
no_reuseport = bpf_sk_lookup_run_v6(net, IPPROTO_UDP, saddr, sport,
daddr, hnum, dif, &sk);
if (no_reuseport || IS_ERR_OR_NULL(sk))
return sk;
@ -240,7 +240,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
/* Lookup redirect from BPF */
if (static_branch_unlikely(&bpf_sk_lookup_enabled)) {
sk = udp6_lookup_run_bpf(net, udptable, skb,
saddr, sport, daddr, hnum);
saddr, sport, daddr, hnum, dif);
if (sk) {
result = sk;
goto done;

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
*.d
/bootstrap/
/bpftool

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
include ../../../scripts/Makefile.include
include ../../../scripts/utilities.mak
INSTALL ?= install
RM ?= rm -f

View File

@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **btf** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | {**-d** | **--debug** } |
{ **-B** | **--base-btf** } }
{ **-B** | **--base-btf** } }
*COMMANDS* := { **dump** | **help** }

View File

@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **cgroup** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
{ **-f** | **--bpffs** } }
{ **-f** | **--bpffs** } }
*COMMANDS* :=
{ **show** | **list** | **tree** | **attach** | **detach** | **help** }
@ -30,9 +30,9 @@ CGROUP COMMANDS
| *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* }
| *ATTACH_TYPE* := { **ingress** | **egress** | **sock_create** | **sock_ops** | **device** |
| **bind4** | **bind6** | **post_bind4** | **post_bind6** | **connect4** | **connect6** |
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
| **sock_release** }
| **getpeername4** | **getpeername6** | **getsockname4** | **getsockname6** | **sendmsg4** |
| **sendmsg6** | **recvmsg4** | **recvmsg6** | **sysctl** | **getsockopt** | **setsockopt** |
| **sock_release** }
| *ATTACH_FLAGS* := { **multi** | **override** }
DESCRIPTION
@ -98,9 +98,9 @@ DESCRIPTION
**sendmsg6** call to sendto(2), sendmsg(2), sendmmsg(2) for an
unconnected udp6 socket (since 4.18);
**recvmsg4** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
an unconnected udp4 socket (since 5.2);
an unconnected udp4 socket (since 5.2);
**recvmsg6** call to recvfrom(2), recvmsg(2), recvmmsg(2) for
an unconnected udp6 socket (since 5.2);
an unconnected udp6 socket (since 5.2);
**sysctl** sysctl access (since 5.2);
**getsockopt** call to getsockopt (since 5.3);
**setsockopt** call to setsockopt (since 5.3);

View File

@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **gen** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
{ **-L** | **--use-loader** } }
{ **-L** | **--use-loader** } }
*COMMAND* := { **object** | **skeleton** | **help** }

View File

@ -13,7 +13,7 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **link** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
{ **-f** | **--bpffs** } | { **-n** | **--nomount** } }
{ **-f** | **--bpffs** } | { **-n** | **--nomount** } }
*COMMANDS* := { **show** | **list** | **pin** | **help** }

View File

@ -13,11 +13,11 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **map** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
{ **-f** | **--bpffs** } | { **-n** | **--nomount** } }
{ **-f** | **--bpffs** } | { **-n** | **--nomount** } }
*COMMANDS* :=
{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext**
| **delete** | **pin** | **help** }
{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
**delete** | **pin** | **help** }
MAP COMMANDS
=============
@ -52,7 +52,7 @@ MAP COMMANDS
| | **devmap** | **devmap_hash** | **sockmap** | **cpumap** | **xskmap** | **sockhash**
| | **cgroup_storage** | **reuseport_sockarray** | **percpu_cgroup_storage**
| | **queue** | **stack** | **sk_storage** | **struct_ops** | **ringbuf** | **inode_storage**
| **task_storage** }
| | **task_storage** | **bloom_filter** }
DESCRIPTION
===========

View File

@ -31,44 +31,44 @@ NET COMMANDS
DESCRIPTION
===========
**bpftool net { show | list }** [ **dev** *NAME* ]
List bpf program attachments in the kernel networking subsystem.
List bpf program attachments in the kernel networking subsystem.
Currently, only device driver xdp attachments and tc filter
classification/action attachments are implemented, i.e., for
program types **BPF_PROG_TYPE_SCHED_CLS**,
**BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
For programs attached to a particular cgroup, e.g.,
**BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
**BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
users can use **bpftool cgroup** to dump cgroup attachments.
For sk_{filter, skb, msg, reuseport} and lwt/seg6
bpf programs, users should consult other tools, e.g., iproute2.
Currently, only device driver xdp attachments and tc filter
classification/action attachments are implemented, i.e., for
program types **BPF_PROG_TYPE_SCHED_CLS**,
**BPF_PROG_TYPE_SCHED_ACT** and **BPF_PROG_TYPE_XDP**.
For programs attached to a particular cgroup, e.g.,
**BPF_PROG_TYPE_CGROUP_SKB**, **BPF_PROG_TYPE_CGROUP_SOCK**,
**BPF_PROG_TYPE_SOCK_OPS** and **BPF_PROG_TYPE_CGROUP_SOCK_ADDR**,
users can use **bpftool cgroup** to dump cgroup attachments.
For sk_{filter, skb, msg, reuseport} and lwt/seg6
bpf programs, users should consult other tools, e.g., iproute2.
The current output will start with all xdp program attachments, followed by
all tc class/qdisc bpf program attachments. Both xdp programs and
tc programs are ordered based on ifindex number. If multiple bpf
programs attached to the same networking device through **tc filter**,
the order will be first all bpf programs attached to tc classes, then
all bpf programs attached to non clsact qdiscs, and finally all
bpf programs attached to root and clsact qdisc.
The current output will start with all xdp program attachments, followed by
all tc class/qdisc bpf program attachments. Both xdp programs and
tc programs are ordered based on ifindex number. If multiple bpf
programs attached to the same networking device through **tc filter**,
the order will be first all bpf programs attached to tc classes, then
all bpf programs attached to non clsact qdiscs, and finally all
bpf programs attached to root and clsact qdisc.
**bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ]
Attach bpf program *PROG* to network interface *NAME* with
type specified by *ATTACH_TYPE*. Previously attached bpf program
can be replaced by the command used with **overwrite** option.
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
Attach bpf program *PROG* to network interface *NAME* with
type specified by *ATTACH_TYPE*. Previously attached bpf program
can be replaced by the command used with **overwrite** option.
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
*ATTACH_TYPE* can be of:
**xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
**xdpdrv** - Native XDP. runs earliest point in driver's receive path;
**xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
*ATTACH_TYPE* can be of:
**xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it;
**xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb;
**xdpdrv** - Native XDP. runs earliest point in driver's receive path;
**xdpoffload** - Offload XDP. runs directly on NIC on each packet reception;
**bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME*
Detach bpf program attached to network interface *NAME* with
type specified by *ATTACH_TYPE*. To detach bpf program, same
*ATTACH_TYPE* previously used for attach must be specified.
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
Detach bpf program attached to network interface *NAME* with
type specified by *ATTACH_TYPE*. To detach bpf program, same
*ATTACH_TYPE* previously used for attach must be specified.
Currently, only XDP-related modes are supported for *ATTACH_TYPE*.
**bpftool net help**
Print short help message.

View File

@ -13,12 +13,12 @@ SYNOPSIS
**bpftool** [*OPTIONS*] **prog** *COMMAND*
*OPTIONS* := { { **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } |
{ **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
{ **-L** | **--use-loader** } }
{ **-f** | **--bpffs** } | { **-m** | **--mapcompat** } | { **-n** | **--nomount** } |
{ **-L** | **--use-loader** } }
*COMMANDS* :=
{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
| **loadall** | **help** }
{ **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** |
**loadall** | **help** }
PROG COMMANDS
=============

View File

@ -19,14 +19,14 @@ SYNOPSIS
*OBJECT* := { **map** | **program** | **cgroup** | **perf** | **net** | **feature** }
*OPTIONS* := { { **-V** | **--version** } |
{ **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
{ **-j** | **--json** } [{ **-p** | **--pretty** }] | { **-d** | **--debug** } }
*MAP-COMMANDS* :=
{ **show** | **list** | **create** | **dump** | **update** | **lookup** | **getnext** |
**delete** | **pin** | **event_pipe** | **help** }
**delete** | **pin** | **event_pipe** | **help** }
*PROG-COMMANDS* := { **show** | **list** | **dump jited** | **dump xlated** | **pin** |
**load** | **attach** | **detach** | **help** }
**load** | **attach** | **detach** | **help** }
*CGROUP-COMMANDS* := { **show** | **list** | **attach** | **detach** | **help** }

View File

@ -20,3 +20,12 @@
Print all logs available, even debug-level information. This includes
logs from libbpf as well as from the verifier, when attempting to
load programs.
-l, --legacy
Use legacy libbpf mode which has more relaxed BPF program
requirements. By default, bpftool has more strict requirements
about section names, changes pinning logic and doesn't support
some of the older non-BTF map declarations.
See https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0
for details.

View File

@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
include ../../scripts/Makefile.include
include ../../scripts/utilities.mak
ifeq ($(srctree),)
srctree := $(patsubst %/,%,$(dir $(CURDIR)))
@ -187,7 +186,8 @@ $(OUTPUT)%.bpf.o: skeleton/%.bpf.c $(OUTPUT)vmlinux.h $(LIBBPF_BOOTSTRAP)
-I$(if $(OUTPUT),$(OUTPUT),.) \
-I$(srctree)/tools/include/uapi/ \
-I$(LIBBPF_BOOTSTRAP_INCLUDE) \
-g -O2 -Wall -target bpf -c $< -o $@ && $(LLVM_STRIP) -g $@
-g -O2 -Wall -target bpf -c $< -o $@
$(Q)$(LLVM_STRIP) -g $@
$(OUTPUT)%.skel.h: $(OUTPUT)%.bpf.o $(BPFTOOL_BOOTSTRAP)
$(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) gen skeleton $< > $@
@ -202,10 +202,10 @@ endif
CFLAGS += $(if $(BUILD_BPF_SKELS),,-DBPFTOOL_WITHOUT_SKELETONS)
$(BOOTSTRAP_OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD -o $@ $<
$(QUIET_CC)$(HOSTCC) $(CFLAGS) -c -MMD $< -o $@
$(OUTPUT)disasm.o: $(srctree)/kernel/bpf/disasm.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
$(OUTPUT)feature.o:
ifneq ($(feature-zlib), 1)
@ -213,19 +213,18 @@ ifneq ($(feature-zlib), 1)
endif
$(BPFTOOL_BOOTSTRAP): $(BOOTSTRAP_OBJS) $(LIBBPF_BOOTSTRAP)
$(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) -o $@ $(BOOTSTRAP_OBJS) \
$(LIBS_BOOTSTRAP)
$(QUIET_LINK)$(HOSTCC) $(CFLAGS) $(LDFLAGS) $(BOOTSTRAP_OBJS) $(LIBS_BOOTSTRAP) -o $@
$(OUTPUT)bpftool: $(OBJS) $(LIBBPF)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS)
$(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) $(LIBS) -o $@
$(BOOTSTRAP_OUTPUT)%.o: %.c $(LIBBPF_BOOTSTRAP_INTERNAL_HDRS) | $(BOOTSTRAP_OUTPUT)
$(QUIET_CC)$(HOSTCC) \
$(subst -I$(LIBBPF_INCLUDE),-I$(LIBBPF_BOOTSTRAP_INCLUDE),$(CFLAGS)) \
-c -MMD -o $@ $<
-c -MMD $< -o $@
$(OUTPUT)%.o: %.c
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD -o $@ $<
$(QUIET_CC)$(CC) $(CFLAGS) -c -MMD $< -o $@
feature-detect-clean:
$(call QUIET_CLEAN, feature-detect)

View File

@ -261,7 +261,7 @@ _bpftool()
# Deal with options
if [[ ${words[cword]} == -* ]]; then
local c='--version --json --pretty --bpffs --mapcompat --debug \
--use-loader --base-btf'
--use-loader --base-btf --legacy'
COMPREPLY=( $( compgen -W "$c" -- "$cur" ) )
return 0
fi
@ -710,7 +710,8 @@ _bpftool()
hash_of_maps devmap devmap_hash sockmap cpumap \
xskmap sockhash cgroup_storage reuseport_sockarray \
percpu_cgroup_storage queue stack sk_storage \
struct_ops inode_storage task_storage ringbuf'
struct_ops ringbuf inode_storage task_storage \
bloom_filter'
COMPREPLY=( $( compgen -W "$BPFTOOL_MAP_CREATE_TYPES" -- "$cur" ) )
return 0
;;

View File

@ -39,6 +39,7 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = {
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
};
struct btf_attach_point {
@ -142,6 +143,7 @@ static int dump_btf_type(const struct btf *btf, __u32 id,
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
case BTF_KIND_TYPE_TAG:
if (json_output)
jsonw_uint_field(w, "type_id", t->type);
else
@ -418,9 +420,10 @@ static int dump_btf_c(const struct btf *btf,
struct btf_dump *d;
int err = 0, i;
d = btf_dump__new(btf, NULL, NULL, btf_dump_printf);
if (IS_ERR(d))
return PTR_ERR(d);
d = btf_dump__new(btf, btf_dump_printf, NULL, NULL);
err = libbpf_get_error(d);
if (err)
return err;
printf("#ifndef __VMLINUX_H__\n");
printf("#define __VMLINUX_H__\n");
@ -547,8 +550,8 @@ static int do_dump(int argc, char **argv)
}
btf = btf__parse_split(*argv, base ?: base_btf);
if (IS_ERR(btf)) {
err = -PTR_ERR(btf);
err = libbpf_get_error(btf);
if (err) {
btf = NULL;
p_err("failed to load BTF from %s: %s",
*argv, strerror(err));

View File

@ -32,14 +32,16 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
const struct btf_type *func_proto,
__u32 prog_id)
{
struct bpf_prog_info_linear *prog_info = NULL;
const struct btf_type *func_type;
int prog_fd = -1, func_sig_len;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
const char *prog_name = NULL;
struct bpf_func_info *finfo;
struct btf *prog_btf = NULL;
struct bpf_prog_info *info;
int prog_fd, func_sig_len;
struct bpf_func_info finfo;
__u32 finfo_rec_size;
char prog_str[1024];
int err;
/* Get the ptr's func_proto */
func_sig_len = btf_dump_func(d->btf, prog_str, func_proto, NULL, 0,
@ -52,25 +54,30 @@ static int dump_prog_id_as_func_ptr(const struct btf_dumper *d,
/* Get the bpf_prog's name. Obtain from func_info. */
prog_fd = bpf_prog_get_fd_by_id(prog_id);
if (prog_fd == -1)
if (prog_fd < 0)
goto print;
prog_info = bpf_program__get_prog_info_linear(prog_fd,
1UL << BPF_PROG_INFO_FUNC_INFO);
close(prog_fd);
if (IS_ERR(prog_info)) {
prog_info = NULL;
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
if (err)
goto print;
}
info = &prog_info->info;
if (!info->btf_id || !info->nr_func_info)
if (!info.btf_id || !info.nr_func_info)
goto print;
prog_btf = btf__load_from_kernel_by_id(info->btf_id);
finfo_rec_size = info.func_info_rec_size;
memset(&info, 0, sizeof(info));
info.nr_func_info = 1;
info.func_info_rec_size = finfo_rec_size;
info.func_info = ptr_to_u64(&finfo);
err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
if (err)
goto print;
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
if (libbpf_get_error(prog_btf))
goto print;
finfo = u64_to_ptr(info->func_info);
func_type = btf__type_by_id(prog_btf, finfo->type_id);
func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
goto print;
@ -92,7 +99,8 @@ print:
prog_str[sizeof(prog_str) - 1] = '\0';
jsonw_string(d->jw, prog_str);
btf__free(prog_btf);
free(prog_info);
if (prog_fd >= 0)
close(prog_fd);
return 0;
}

View File

@ -74,6 +74,7 @@ const char * const attach_type_name[__MAX_BPF_ATTACH_TYPE] = {
[BPF_XDP] = "xdp",
[BPF_SK_REUSEPORT_SELECT] = "sk_skb_reuseport_select",
[BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_skb_reuseport_select_or_migrate",
[BPF_PERF_EVENT] = "perf_event",
};
void p_err(const char *fmt, ...)

View File

@ -467,7 +467,7 @@ static bool probe_bpf_syscall(const char *define_prefix)
{
bool res;
bpf_load_program(BPF_PROG_TYPE_UNSPEC, NULL, 0, NULL, 0, NULL, 0);
bpf_prog_load(BPF_PROG_TYPE_UNSPEC, NULL, NULL, NULL, 0, NULL);
res = (errno != ENOSYS);
print_bool_feature("have_bpf_syscall",

View File

@ -218,9 +218,10 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
char sec_ident[256], map_ident[256];
int i, err = 0;
d = btf_dump__new(btf, NULL, NULL, codegen_btf_dump_printf);
if (IS_ERR(d))
return PTR_ERR(d);
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
err = libbpf_get_error(d);
if (err)
return err;
bpf_object__for_each_map(map, obj) {
/* only generate definitions for memory-mapped internal maps */
@ -719,10 +720,11 @@ static int do_skeleton(int argc, char **argv)
get_obj_name(obj_name, file);
opts.object_name = obj_name;
obj = bpf_object__open_mem(obj_data, file_sz, &opts);
if (IS_ERR(obj)) {
err = libbpf_get_error(obj);
if (err) {
char err_buf[256];
libbpf_strerror(PTR_ERR(obj), err_buf, sizeof(err_buf));
libbpf_strerror(err, err_buf, sizeof(err_buf));
p_err("failed to open BPF object file: %s", err_buf);
obj = NULL;
goto out;

View File

@ -46,7 +46,8 @@ static int do_pin(int argc, char **argv)
}
obj = bpf_object__open(objfile);
if (IS_ERR(obj)) {
err = libbpf_get_error(obj);
if (err) {
p_err("can't open objfile %s", objfile);
goto close_map_fd;
}
@ -64,8 +65,8 @@ static int do_pin(int argc, char **argv)
}
link = bpf_program__attach_iter(prog, &iter_opts);
if (IS_ERR(link)) {
err = PTR_ERR(link);
err = libbpf_get_error(link);
if (err) {
p_err("attach_iter failed for program %s",
bpf_program__name(prog));
goto close_obj;

View File

@ -31,6 +31,7 @@ bool block_mount;
bool verifier_logs;
bool relaxed_maps;
bool use_loader;
bool legacy_libbpf;
struct btf *base_btf;
struct hashmap *refs_table;
@ -396,6 +397,7 @@ int main(int argc, char **argv)
{ "debug", no_argument, NULL, 'd' },
{ "use-loader", no_argument, NULL, 'L' },
{ "base-btf", required_argument, NULL, 'B' },
{ "legacy", no_argument, NULL, 'l' },
{ 0 }
};
int opt, ret;
@ -408,7 +410,7 @@ int main(int argc, char **argv)
bin_name = argv[0];
opterr = 0;
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:",
while ((opt = getopt_long(argc, argv, "VhpjfLmndB:l",
options, NULL)) >= 0) {
switch (opt) {
case 'V':
@ -454,6 +456,9 @@ int main(int argc, char **argv)
case 'L':
use_loader = true;
break;
case 'l':
legacy_libbpf = true;
break;
default:
p_err("unrecognized option '%s'", argv[optind - 1]);
if (json_output)
@ -463,6 +468,12 @@ int main(int argc, char **argv)
}
}
if (!legacy_libbpf) {
ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
if (ret)
p_err("failed to enable libbpf strict mode: %d", ret);
}
argc -= optind;
argv += optind;
if (argc < 0)

View File

@ -57,7 +57,7 @@ static inline void *u64_to_ptr(__u64 ptr)
#define HELP_SPEC_PROGRAM \
"PROG := { id PROG_ID | pinned FILE | tag PROG_TAG | name PROG_NAME }"
#define HELP_SPEC_OPTIONS \
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug}"
"OPTIONS := { {-j|--json} [{-p|--pretty}] | {-d|--debug} | {-l|--legacy}"
#define HELP_SPEC_MAP \
"MAP := { id MAP_ID | pinned FILE | name MAP_NAME }"
#define HELP_SPEC_LINK \
@ -90,6 +90,7 @@ extern bool block_mount;
extern bool verifier_logs;
extern bool relaxed_maps;
extern bool use_loader;
extern bool legacy_libbpf;
extern struct btf *base_btf;
extern struct hashmap *refs_table;

View File

@ -53,6 +53,7 @@ const char * const map_type_name[] = {
[BPF_MAP_TYPE_RINGBUF] = "ringbuf",
[BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
[BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
[BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
};
const size_t map_type_name_size = ARRAY_SIZE(map_type_name);
@ -811,7 +812,7 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
if (info->btf_vmlinux_value_type_id) {
if (!btf_vmlinux) {
btf_vmlinux = libbpf_find_kernel_btf();
if (IS_ERR(btf_vmlinux))
if (libbpf_get_error(btf_vmlinux))
p_err("failed to get kernel btf");
}
return btf_vmlinux;
@ -831,13 +832,13 @@ static struct btf *get_map_kv_btf(const struct bpf_map_info *info)
static void free_map_kv_btf(struct btf *btf)
{
if (!IS_ERR(btf) && btf != btf_vmlinux)
if (!libbpf_get_error(btf) && btf != btf_vmlinux)
btf__free(btf);
}
static void free_btf_vmlinux(void)
{
if (!IS_ERR(btf_vmlinux))
if (!libbpf_get_error(btf_vmlinux))
btf__free(btf_vmlinux);
}
@ -862,8 +863,8 @@ map_dump(int fd, struct bpf_map_info *info, json_writer_t *wtr,
if (wtr) {
btf = get_map_kv_btf(info);
if (IS_ERR(btf)) {
err = PTR_ERR(btf);
err = libbpf_get_error(btf);
if (err) {
goto exit_free;
}
@ -1477,7 +1478,7 @@ static int do_help(int argc, char **argv)
" devmap | devmap_hash | sockmap | cpumap | xskmap | sockhash |\n"
" cgroup_storage | reuseport_sockarray | percpu_cgroup_storage |\n"
" queue | stack | sk_storage | struct_ops | ringbuf | inode_storage |\n"
" task_storage }\n"
" task_storage | bloom_filter }\n"
" " HELP_SPEC_OPTIONS " |\n"
" {-f|--bpffs} | {-n|--nomount} }\n"
"",

View File

@ -124,7 +124,7 @@ int do_event_pipe(int argc, char **argv)
.wakeup_events = 1,
};
struct bpf_map_info map_info = {};
struct perf_buffer_raw_opts opts = {};
LIBBPF_OPTS(perf_buffer_raw_opts, opts);
struct event_pipe_ctx ctx = {
.all_cpus = true,
.cpu = -1,
@ -190,14 +190,11 @@ int do_event_pipe(int argc, char **argv)
ctx.idx = 0;
}
opts.attr = &perf_attr;
opts.event_cb = print_bpf_output;
opts.ctx = &ctx;
opts.cpu_cnt = ctx.all_cpus ? 0 : 1;
opts.cpus = &ctx.cpu;
opts.map_keys = &ctx.idx;
pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &opts);
pb = perf_buffer__new_raw(map_fd, MMAP_PAGE_CNT, &perf_attr,
print_bpf_output, &ctx, &opts);
err = libbpf_get_error(pb);
if (err) {
p_err("failed to create perf buffer: %s (%d)",

View File

@ -100,6 +100,76 @@ static enum bpf_attach_type parse_attach_type(const char *str)
return __MAX_BPF_ATTACH_TYPE;
}
static int prep_prog_info(struct bpf_prog_info *const info, enum dump_mode mode,
void **info_data, size_t *const info_data_sz)
{
struct bpf_prog_info holder = {};
size_t needed = 0;
void *ptr;
if (mode == DUMP_JITED) {
holder.jited_prog_len = info->jited_prog_len;
needed += info->jited_prog_len;
} else {
holder.xlated_prog_len = info->xlated_prog_len;
needed += info->xlated_prog_len;
}
holder.nr_jited_ksyms = info->nr_jited_ksyms;
needed += info->nr_jited_ksyms * sizeof(__u64);
holder.nr_jited_func_lens = info->nr_jited_func_lens;
needed += info->nr_jited_func_lens * sizeof(__u32);
holder.nr_func_info = info->nr_func_info;
holder.func_info_rec_size = info->func_info_rec_size;
needed += info->nr_func_info * info->func_info_rec_size;
holder.nr_line_info = info->nr_line_info;
holder.line_info_rec_size = info->line_info_rec_size;
needed += info->nr_line_info * info->line_info_rec_size;
holder.nr_jited_line_info = info->nr_jited_line_info;
holder.jited_line_info_rec_size = info->jited_line_info_rec_size;
needed += info->nr_jited_line_info * info->jited_line_info_rec_size;
if (needed > *info_data_sz) {
ptr = realloc(*info_data, needed);
if (!ptr)
return -1;
*info_data = ptr;
*info_data_sz = needed;
}
ptr = *info_data;
if (mode == DUMP_JITED) {
holder.jited_prog_insns = ptr_to_u64(ptr);
ptr += holder.jited_prog_len;
} else {
holder.xlated_prog_insns = ptr_to_u64(ptr);
ptr += holder.xlated_prog_len;
}
holder.jited_ksyms = ptr_to_u64(ptr);
ptr += holder.nr_jited_ksyms * sizeof(__u64);
holder.jited_func_lens = ptr_to_u64(ptr);
ptr += holder.nr_jited_func_lens * sizeof(__u32);
holder.func_info = ptr_to_u64(ptr);
ptr += holder.nr_func_info * holder.func_info_rec_size;
holder.line_info = ptr_to_u64(ptr);
ptr += holder.nr_line_info * holder.line_info_rec_size;
holder.jited_line_info = ptr_to_u64(ptr);
ptr += holder.nr_jited_line_info * holder.jited_line_info_rec_size;
*info = holder;
return 0;
}
static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
{
struct timespec real_time_ts, boot_time_ts;
@ -639,8 +709,8 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
char func_sig[1024];
unsigned char *buf;
__u32 member_len;
int fd, err = -1;
ssize_t n;
int fd;
if (mode == DUMP_JITED) {
if (info->jited_prog_len == 0 || !info->jited_prog_insns) {
@ -679,7 +749,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (fd < 0) {
p_err("can't open file %s: %s", filepath,
strerror(errno));
return -1;
goto exit_free;
}
n = write(fd, buf, member_len);
@ -687,7 +757,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
if (n != (ssize_t)member_len) {
p_err("error writing output file: %s",
n < 0 ? strerror(errno) : "short write");
return -1;
goto exit_free;
}
if (json_output)
@ -701,7 +771,7 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
info->netns_ino,
&disasm_opt);
if (!name)
return -1;
goto exit_free;
}
if (info->nr_jited_func_lens && info->jited_func_lens) {
@ -796,23 +866,28 @@ prog_dump(struct bpf_prog_info *info, enum dump_mode mode,
kernel_syms_destroy(&dd);
}
btf__free(btf);
err = 0;
return 0;
exit_free:
btf__free(btf);
bpf_prog_linfo__free(prog_linfo);
return err;
}
static int do_dump(int argc, char **argv)
{
struct bpf_prog_info_linear *info_linear;
struct bpf_prog_info info;
__u32 info_len = sizeof(info);
size_t info_data_sz = 0;
void *info_data = NULL;
char *filepath = NULL;
bool opcodes = false;
bool visual = false;
enum dump_mode mode;
bool linum = false;
int *fds = NULL;
int nb_fds, i = 0;
int *fds = NULL;
int err = -1;
__u64 arrays;
if (is_prefix(*argv, "jited")) {
if (disasm_init())
@ -872,43 +947,44 @@ static int do_dump(int argc, char **argv)
goto exit_close;
}
if (mode == DUMP_JITED)
arrays = 1UL << BPF_PROG_INFO_JITED_INSNS;
else
arrays = 1UL << BPF_PROG_INFO_XLATED_INSNS;
arrays |= 1UL << BPF_PROG_INFO_JITED_KSYMS;
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
if (json_output && nb_fds > 1)
jsonw_start_array(json_wtr); /* root array */
for (i = 0; i < nb_fds; i++) {
info_linear = bpf_program__get_prog_info_linear(fds[i], arrays);
if (IS_ERR_OR_NULL(info_linear)) {
memset(&info, 0, sizeof(info));
err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
}
err = prep_prog_info(&info, mode, &info_data, &info_data_sz);
if (err) {
p_err("can't grow prog info_data");
break;
}
err = bpf_obj_get_info_by_fd(fds[i], &info, &info_len);
if (err) {
p_err("can't get prog info: %s", strerror(errno));
break;
}
if (json_output && nb_fds > 1) {
jsonw_start_object(json_wtr); /* prog object */
print_prog_header_json(&info_linear->info);
print_prog_header_json(&info);
jsonw_name(json_wtr, "insns");
} else if (nb_fds > 1) {
print_prog_header_plain(&info_linear->info);
print_prog_header_plain(&info);
}
err = prog_dump(&info_linear->info, mode, filepath, opcodes,
visual, linum);
err = prog_dump(&info, mode, filepath, opcodes, visual, linum);
if (json_output && nb_fds > 1)
jsonw_end_object(json_wtr); /* prog object */
else if (i != nb_fds - 1 && nb_fds > 1)
printf("\n");
free(info_linear);
if (err)
break;
close(fds[i]);
@ -920,6 +996,7 @@ exit_close:
for (; i < nb_fds; i++)
close(fds[i]);
exit_free:
free(info_data);
free(fds);
return err;
}
@ -1409,8 +1486,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
while (argc) {
if (is_prefix(*argv, "type")) {
char *type;
NEXT_ARG();
if (common_prog_type != BPF_PROG_TYPE_UNSPEC) {
@ -1420,21 +1495,26 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (!REQ_ARGS(1))
goto err_free_reuse_maps;
/* Put a '/' at the end of type to appease libbpf */
type = malloc(strlen(*argv) + 2);
if (!type) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
}
*type = 0;
strcat(type, *argv);
strcat(type, "/");
err = libbpf_prog_type_by_name(*argv, &common_prog_type,
&expected_attach_type);
if (err < 0) {
/* Put a '/' at the end of type to appease libbpf */
char *type = malloc(strlen(*argv) + 2);
err = get_prog_type_by_name(type, &common_prog_type,
&expected_attach_type);
free(type);
if (err < 0)
goto err_free_reuse_maps;
if (!type) {
p_err("mem alloc failed");
goto err_free_reuse_maps;
}
*type = 0;
strcat(type, *argv);
strcat(type, "/");
err = get_prog_type_by_name(type, &common_prog_type,
&expected_attach_type);
free(type);
if (err < 0)
goto err_free_reuse_maps;
}
NEXT_ARG();
} else if (is_prefix(*argv, "map")) {
@ -1657,6 +1737,11 @@ err_unpin:
else
bpf_object__unpin_programs(obj, pinfile);
err_close_obj:
if (!legacy_libbpf) {
p_info("Warning: bpftool is now running in libbpf strict mode and has more stringent requirements about BPF programs.\n"
"If it used to work for this object file but now doesn't, see --legacy option for more details.\n");
}
bpf_object__close(obj);
err_free_reuse_maps:
for (i = 0; i < old_map_fds; i++)
@ -2016,41 +2101,58 @@ static void profile_print_readings(void)
static char *profile_target_name(int tgt_fd)
{
struct bpf_prog_info_linear *info_linear;
struct bpf_func_info *func_info;
struct bpf_func_info func_info;
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
const struct btf_type *t;
__u32 func_info_rec_size;
struct btf *btf = NULL;
char *name = NULL;
int err;
info_linear = bpf_program__get_prog_info_linear(
tgt_fd, 1UL << BPF_PROG_INFO_FUNC_INFO);
if (IS_ERR_OR_NULL(info_linear)) {
p_err("failed to get info_linear for prog FD %d", tgt_fd);
return NULL;
err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
if (err) {
p_err("failed to bpf_obj_get_info_by_fd for prog FD %d", tgt_fd);
goto out;
}
if (info_linear->info.btf_id == 0) {
if (info.btf_id == 0) {
p_err("prog FD %d doesn't have valid btf", tgt_fd);
goto out;
}
btf = btf__load_from_kernel_by_id(info_linear->info.btf_id);
func_info_rec_size = info.func_info_rec_size;
if (info.nr_func_info == 0) {
p_err("bpf_obj_get_info_by_fd for prog FD %d found 0 func_info", tgt_fd);
goto out;
}
memset(&info, 0, sizeof(info));
info.nr_func_info = 1;
info.func_info_rec_size = func_info_rec_size;
info.func_info = ptr_to_u64(&func_info);
err = bpf_obj_get_info_by_fd(tgt_fd, &info, &info_len);
if (err) {
p_err("failed to get func_info for prog FD %d", tgt_fd);
goto out;
}
btf = btf__load_from_kernel_by_id(info.btf_id);
if (libbpf_get_error(btf)) {
p_err("failed to load btf for prog FD %d", tgt_fd);
goto out;
}
func_info = u64_to_ptr(info_linear->info.func_info);
t = btf__type_by_id(btf, func_info[0].type_id);
t = btf__type_by_id(btf, func_info.type_id);
if (!t) {
p_err("btf %d doesn't have type %d",
info_linear->info.btf_id, func_info[0].type_id);
info.btf_id, func_info.type_id);
goto out;
}
name = strdup(btf__name_by_offset(btf, t->name_off));
out:
btf__free(btf);
free(info_linear);
return name;
}

View File

@ -32,7 +32,7 @@ static const struct btf *get_btf_vmlinux(void)
return btf_vmlinux;
btf_vmlinux = libbpf_find_kernel_btf();
if (IS_ERR(btf_vmlinux))
if (libbpf_get_error(btf_vmlinux))
p_err("struct_ops requires kernel CONFIG_DEBUG_INFO_BTF=y");
return btf_vmlinux;
@ -45,7 +45,7 @@ static const char *get_kern_struct_ops_name(const struct bpf_map_info *info)
const char *st_ops_name;
kern_btf = get_btf_vmlinux();
if (IS_ERR(kern_btf))
if (libbpf_get_error(kern_btf))
return "<btf_vmlinux_not_found>";
t = btf__type_by_id(kern_btf, info->btf_vmlinux_value_type_id);
@ -63,7 +63,7 @@ static __s32 get_map_info_type_id(void)
return map_info_type_id;
kern_btf = get_btf_vmlinux();
if (IS_ERR(kern_btf)) {
if (libbpf_get_error(kern_btf)) {
map_info_type_id = PTR_ERR(kern_btf);
return map_info_type_id;
}
@ -252,7 +252,7 @@ static struct res do_one_id(const char *id_str, work_func func, void *data,
}
fd = bpf_map_get_fd_by_id(id);
if (fd == -1) {
if (fd < 0) {
p_err("can't get map by id (%lu): %s", id, strerror(errno));
res.nr_errs++;
return res;
@ -415,7 +415,7 @@ static int do_dump(int argc, char **argv)
}
kern_btf = get_btf_vmlinux();
if (IS_ERR(kern_btf))
if (libbpf_get_error(kern_btf))
return -1;
if (!json_output) {
@ -495,7 +495,7 @@ static int do_register(int argc, char **argv)
file = GET_ARG();
obj = bpf_object__open(file);
if (IS_ERR_OR_NULL(obj))
if (libbpf_get_error(obj))
return -1;
set_max_rlimit();
@ -516,7 +516,7 @@ static int do_register(int argc, char **argv)
continue;
link = bpf_map__attach_struct_ops(map);
if (IS_ERR(link)) {
if (libbpf_get_error(link)) {
p_err("can't register struct_ops %s: %s",
bpf_map__name(map),
strerror(-PTR_ERR(link)));
@ -596,7 +596,7 @@ int do_struct_ops(int argc, char **argv)
err = cmd_select(cmds, argc, argv, do_help);
if (!IS_ERR(btf_vmlinux))
if (!libbpf_get_error(btf_vmlinux))
btf__free(btf_vmlinux);
return err;

View File

@ -123,7 +123,6 @@ int main(int argc, char **argv)
.parser = parse_arg,
.doc = argp_program_doc,
};
struct perf_buffer_opts pb_opts;
struct perf_buffer *pb = NULL;
struct runqslower_bpf *obj;
int err;
@ -165,9 +164,8 @@ int main(int argc, char **argv)
printf("Tracing run queue latency higher than %llu us\n", env.min_us);
printf("%-8s %-16s %-6s %14s\n", "TIME", "COMM", "PID", "LAT(us)");
pb_opts.sample_cb = handle_event;
pb_opts.lost_cb = handle_lost_events;
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64, &pb_opts);
pb = perf_buffer__new(bpf_map__fd(obj->maps.events), 64,
handle_event, handle_lost_events, NULL, NULL);
err = libbpf_get_error(pb);
if (err) {
pb = NULL;

View File

@ -4938,6 +4938,25 @@ union bpf_attr {
* **-ENOENT** if symbol is not found.
*
* **-EPERM** if caller does not have permission to obtain kernel address.
*
* long bpf_find_vma(struct task_struct *task, u64 addr, void *callback_fn, void *callback_ctx, u64 flags)
* Description
* Find vma of *task* that contains *addr*, call *callback_fn*
* function with *task*, *vma*, and *callback_ctx*.
* The *callback_fn* should be a static function and
* the *callback_ctx* should be a pointer to the stack.
* The *flags* is used to control certain aspects of the helper.
* Currently, the *flags* must be 0.
*
* The expected callback signature is
*
* long (\*callback_fn)(struct task_struct \*task, struct vm_area_struct \*vma, void \*callback_ctx);
*
* Return
* 0 on success.
* **-ENOENT** if *task->mm* is NULL, or no vma contains *addr*.
* **-EBUSY** if failed to try lock mmap_lock.
* **-EINVAL** for invalid **flags**.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@ -5120,6 +5139,7 @@ union bpf_attr {
FN(trace_vprintk), \
FN(skc_to_unix_sock), \
FN(kallsyms_lookup_name), \
FN(find_vma), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
@ -6296,6 +6316,7 @@ struct bpf_sk_lookup {
__u32 local_ip4; /* Network byte order */
__u32 local_ip6[4]; /* Network byte order */
__u32 local_port; /* Host byte order */
__u32 ingress_ifindex; /* The arriving interface. Determined by inet_iif. */
};
/*

View File

@ -43,7 +43,7 @@ struct btf_type {
* "size" tells the size of the type it is describing.
*
* "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT,
* FUNC, FUNC_PROTO, VAR and DECL_TAG.
* FUNC, FUNC_PROTO, VAR, DECL_TAG and TYPE_TAG.
* "type" is a type_id referring to another type.
*/
union {
@ -75,6 +75,7 @@ enum {
BTF_KIND_DATASEC = 15, /* Section */
BTF_KIND_FLOAT = 16, /* Floating point */
BTF_KIND_DECL_TAG = 17, /* Decl Tag */
BTF_KIND_TYPE_TAG = 18, /* Type Tag */
NR_BTF_KINDS,
BTF_KIND_MAX = NR_BTF_KINDS - 1,

View File

@ -84,6 +84,7 @@ else
endif
# Append required CFLAGS
override CFLAGS += -std=gnu89
override CFLAGS += $(EXTRA_WARNINGS) -Wno-switch-enum
override CFLAGS += -Werror -Wall
override CFLAGS += $(INCLUDES)

View File

@ -28,6 +28,7 @@
#include <asm/unistd.h>
#include <errno.h>
#include <linux/bpf.h>
#include <limits.h>
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_internal.h"
@ -74,14 +75,15 @@ static inline int sys_bpf_fd(enum bpf_cmd cmd, union bpf_attr *attr,
return ensure_good_fd(fd);
}
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size)
#define PROG_LOAD_ATTEMPTS 5
static inline int sys_bpf_prog_load(union bpf_attr *attr, unsigned int size, int attempts)
{
int retries = 5;
int fd;
do {
fd = sys_bpf_fd(BPF_PROG_LOAD, attr, size);
} while (fd < 0 && errno == EAGAIN && retries-- > 0);
} while (fd < 0 && errno == EAGAIN && --attempts > 0);
return fd;
}
@ -253,58 +255,91 @@ alloc_zero_tailing_info(const void *orecord, __u32 cnt,
return info;
}
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
DEFAULT_VERSION(bpf_prog_load_v0_6_0, bpf_prog_load, LIBBPF_0.6.0)
int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
const struct bpf_insn *insns, size_t insn_cnt,
const struct bpf_prog_load_opts *opts)
{
void *finfo = NULL, *linfo = NULL;
const char *func_info, *line_info;
__u32 log_size, log_level, attach_prog_fd, attach_btf_obj_fd;
__u32 func_info_rec_size, line_info_rec_size;
int fd, attempts;
union bpf_attr attr;
int fd;
char *log_buf;
if (!load_attr->log_buf != !load_attr->log_buf_sz)
if (!OPTS_VALID(opts, bpf_prog_load_opts))
return libbpf_err(-EINVAL);
if (load_attr->log_level > (4 | 2 | 1) || (load_attr->log_level && !load_attr->log_buf))
attempts = OPTS_GET(opts, attempts, 0);
if (attempts < 0)
return libbpf_err(-EINVAL);
if (attempts == 0)
attempts = PROG_LOAD_ATTEMPTS;
memset(&attr, 0, sizeof(attr));
attr.prog_type = load_attr->prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
if (load_attr->attach_prog_fd)
attr.attach_prog_fd = load_attr->attach_prog_fd;
attr.prog_type = prog_type;
attr.expected_attach_type = OPTS_GET(opts, expected_attach_type, 0);
attr.prog_btf_fd = OPTS_GET(opts, prog_btf_fd, 0);
attr.prog_flags = OPTS_GET(opts, prog_flags, 0);
attr.prog_ifindex = OPTS_GET(opts, prog_ifindex, 0);
attr.kern_version = OPTS_GET(opts, kern_version, 0);
if (prog_name)
strncat(attr.prog_name, prog_name, sizeof(attr.prog_name) - 1);
attr.license = ptr_to_u64(license);
if (insn_cnt > UINT_MAX)
return libbpf_err(-E2BIG);
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = (__u32)insn_cnt;
attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
attach_btf_obj_fd = OPTS_GET(opts, attach_btf_obj_fd, 0);
if (attach_prog_fd && attach_btf_obj_fd)
return libbpf_err(-EINVAL);
attr.attach_btf_id = OPTS_GET(opts, attach_btf_id, 0);
if (attach_prog_fd)
attr.attach_prog_fd = attach_prog_fd;
else
attr.attach_btf_obj_fd = load_attr->attach_btf_obj_fd;
attr.attach_btf_id = load_attr->attach_btf_id;
attr.attach_btf_obj_fd = attach_btf_obj_fd;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.kern_version = load_attr->kern_version;
log_buf = OPTS_GET(opts, log_buf, NULL);
log_size = OPTS_GET(opts, log_size, 0);
log_level = OPTS_GET(opts, log_level, 0);
attr.insn_cnt = (__u32)load_attr->insn_cnt;
attr.insns = ptr_to_u64(load_attr->insns);
attr.license = ptr_to_u64(load_attr->license);
if (!!log_buf != !!log_size)
return libbpf_err(-EINVAL);
if (log_level > (4 | 2 | 1))
return libbpf_err(-EINVAL);
if (log_level && !log_buf)
return libbpf_err(-EINVAL);
attr.log_level = load_attr->log_level;
if (attr.log_level) {
attr.log_buf = ptr_to_u64(load_attr->log_buf);
attr.log_size = load_attr->log_buf_sz;
}
attr.log_level = log_level;
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_size;
attr.prog_btf_fd = load_attr->prog_btf_fd;
attr.prog_flags = load_attr->prog_flags;
func_info_rec_size = OPTS_GET(opts, func_info_rec_size, 0);
func_info = OPTS_GET(opts, func_info, NULL);
attr.func_info_rec_size = func_info_rec_size;
attr.func_info = ptr_to_u64(func_info);
attr.func_info_cnt = OPTS_GET(opts, func_info_cnt, 0);
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_cnt = load_attr->func_info_cnt;
attr.func_info = ptr_to_u64(load_attr->func_info);
line_info_rec_size = OPTS_GET(opts, line_info_rec_size, 0);
line_info = OPTS_GET(opts, line_info, NULL);
attr.line_info_rec_size = line_info_rec_size;
attr.line_info = ptr_to_u64(line_info);
attr.line_info_cnt = OPTS_GET(opts, line_info_cnt, 0);
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_cnt = load_attr->line_info_cnt;
attr.line_info = ptr_to_u64(load_attr->line_info);
attr.fd_array = ptr_to_u64(load_attr->fd_array);
attr.fd_array = ptr_to_u64(OPTS_GET(opts, fd_array, NULL));
if (load_attr->name)
memcpy(attr.prog_name, load_attr->name,
min(strlen(load_attr->name), (size_t)BPF_OBJ_NAME_LEN - 1));
fd = sys_bpf_prog_load(&attr, sizeof(attr));
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
if (fd >= 0)
return fd;
@ -314,11 +349,11 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
*/
while (errno == E2BIG && (!finfo || !linfo)) {
if (!finfo && attr.func_info_cnt &&
attr.func_info_rec_size < load_attr->func_info_rec_size) {
attr.func_info_rec_size < func_info_rec_size) {
/* try with corrected func info records */
finfo = alloc_zero_tailing_info(load_attr->func_info,
load_attr->func_info_cnt,
load_attr->func_info_rec_size,
finfo = alloc_zero_tailing_info(func_info,
attr.func_info_cnt,
func_info_rec_size,
attr.func_info_rec_size);
if (!finfo) {
errno = E2BIG;
@ -326,13 +361,12 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
}
attr.func_info = ptr_to_u64(finfo);
attr.func_info_rec_size = load_attr->func_info_rec_size;
attr.func_info_rec_size = func_info_rec_size;
} else if (!linfo && attr.line_info_cnt &&
attr.line_info_rec_size <
load_attr->line_info_rec_size) {
linfo = alloc_zero_tailing_info(load_attr->line_info,
load_attr->line_info_cnt,
load_attr->line_info_rec_size,
attr.line_info_rec_size < line_info_rec_size) {
linfo = alloc_zero_tailing_info(line_info,
attr.line_info_cnt,
line_info_rec_size,
attr.line_info_rec_size);
if (!linfo) {
errno = E2BIG;
@ -340,26 +374,26 @@ int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr)
}
attr.line_info = ptr_to_u64(linfo);
attr.line_info_rec_size = load_attr->line_info_rec_size;
attr.line_info_rec_size = line_info_rec_size;
} else {
break;
}
fd = sys_bpf_prog_load(&attr, sizeof(attr));
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
if (fd >= 0)
goto done;
}
if (load_attr->log_level || !load_attr->log_buf)
if (log_level || !log_buf)
goto done;
/* Try again with log */
attr.log_buf = ptr_to_u64(load_attr->log_buf);
attr.log_size = load_attr->log_buf_sz;
log_buf[0] = 0;
attr.log_buf = ptr_to_u64(log_buf);
attr.log_size = log_size;
attr.log_level = 1;
load_attr->log_buf[0] = 0;
fd = sys_bpf_prog_load(&attr, sizeof(attr));
fd = sys_bpf_prog_load(&attr, sizeof(attr), attempts);
done:
/* free() doesn't affect errno, so we don't need to restore it */
free(finfo);
@ -367,17 +401,20 @@ done:
return libbpf_err_errno(fd);
}
__attribute__((alias("bpf_load_program_xattr2")))
int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz)
char *log_buf, size_t log_buf_sz);
static int bpf_load_program_xattr2(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz)
{
struct bpf_prog_load_params p = {};
LIBBPF_OPTS(bpf_prog_load_opts, p);
if (!load_attr || !log_buf != !log_buf_sz)
return libbpf_err(-EINVAL);
p.prog_type = load_attr->prog_type;
p.expected_attach_type = load_attr->expected_attach_type;
switch (p.prog_type) {
switch (load_attr->prog_type) {
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_LSM:
p.attach_btf_id = load_attr->attach_btf_id;
@ -391,12 +428,9 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
p.prog_ifindex = load_attr->prog_ifindex;
p.kern_version = load_attr->kern_version;
}
p.insn_cnt = load_attr->insns_cnt;
p.insns = load_attr->insns;
p.license = load_attr->license;
p.log_level = load_attr->log_level;
p.log_buf = log_buf;
p.log_buf_sz = log_buf_sz;
p.log_size = log_buf_sz;
p.prog_btf_fd = load_attr->prog_btf_fd;
p.func_info_rec_size = load_attr->func_info_rec_size;
p.func_info_cnt = load_attr->func_info_cnt;
@ -404,10 +438,10 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
p.line_info_rec_size = load_attr->line_info_rec_size;
p.line_info_cnt = load_attr->line_info_cnt;
p.line_info = load_attr->line_info;
p.name = load_attr->name;
p.prog_flags = load_attr->prog_flags;
return libbpf__bpf_prog_load(&p);
return bpf_prog_load(load_attr->prog_type, load_attr->name, load_attr->license,
load_attr->insns, load_attr->insns_cnt, &p);
}
int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@ -426,7 +460,7 @@ int bpf_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
load_attr.license = license;
load_attr.kern_version = kern_version;
return bpf_load_program_xattr(&load_attr, log_buf, log_buf_sz);
return bpf_load_program_xattr2(&load_attr, log_buf, log_buf_sz);
}
int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
@ -449,7 +483,7 @@ int bpf_verify_program(enum bpf_prog_type type, const struct bpf_insn *insns,
attr.kern_version = kern_version;
attr.prog_flags = prog_flags;
fd = sys_bpf_prog_load(&attr, sizeof(attr));
fd = sys_bpf_prog_load(&attr, sizeof(attr), PROG_LOAD_ATTEMPTS);
return libbpf_err_errno(fd);
}

View File

@ -29,6 +29,7 @@
#include <stdint.h>
#include "libbpf_common.h"
#include "libbpf_legacy.h"
#ifdef __cplusplus
extern "C" {
@ -71,6 +72,71 @@ LIBBPF_API int bpf_create_map_in_map(enum bpf_map_type map_type,
int inner_map_fd, int max_entries,
__u32 map_flags);
struct bpf_prog_load_opts {
size_t sz; /* size of this struct for forward/backward compatibility */
/* libbpf can retry BPF_PROG_LOAD command if bpf() syscall returns
* -EAGAIN. This field determines how many attempts libbpf has to
* make. If not specified, libbpf will use default value of 5.
*/
int attempts;
enum bpf_attach_type expected_attach_type;
__u32 prog_btf_fd;
__u32 prog_flags;
__u32 prog_ifindex;
__u32 kern_version;
__u32 attach_btf_id;
__u32 attach_prog_fd;
__u32 attach_btf_obj_fd;
const int *fd_array;
/* .BTF.ext func info data */
const void *func_info;
__u32 func_info_cnt;
__u32 func_info_rec_size;
/* .BTF.ext line info data */
const void *line_info;
__u32 line_info_cnt;
__u32 line_info_rec_size;
/* verifier log options */
__u32 log_level;
__u32 log_size;
char *log_buf;
};
#define bpf_prog_load_opts__last_field log_buf
LIBBPF_API int bpf_prog_load(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
const struct bpf_insn *insns, size_t insn_cnt,
const struct bpf_prog_load_opts *opts);
/* this "specialization" should go away in libbpf 1.0 */
LIBBPF_API int bpf_prog_load_v0_6_0(enum bpf_prog_type prog_type,
const char *prog_name, const char *license,
const struct bpf_insn *insns, size_t insn_cnt,
const struct bpf_prog_load_opts *opts);
/* This is an elaborate way to not conflict with deprecated bpf_prog_load()
* API, defined in libbpf.h. Once we hit libbpf 1.0, all this will be gone.
* With this approach, if someone is calling bpf_prog_load() with
* 4 arguments, they will use the deprecated API, which keeps backwards
* compatibility (both source code and binary). If bpf_prog_load() is called
* with 6 arguments, though, it gets redirected to __bpf_prog_load.
* So looking forward to libbpf 1.0 when this hack will be gone and
* __bpf_prog_load() will be called just bpf_prog_load().
*/
#ifndef bpf_prog_load
#define bpf_prog_load(...) ___libbpf_overload(___bpf_prog_load, __VA_ARGS__)
#define ___bpf_prog_load4(file, type, pobj, prog_fd) \
bpf_prog_load_deprecated(file, type, pobj, prog_fd)
#define ___bpf_prog_load6(prog_type, prog_name, license, insns, insn_cnt, opts) \
bpf_prog_load(prog_type, prog_name, license, insns, insn_cnt, opts)
#endif /* bpf_prog_load */
struct bpf_load_program_attr {
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
@ -102,13 +168,15 @@ struct bpf_load_program_attr {
/* Recommend log buffer size */
#define BPF_LOG_BUF_SIZE (UINT32_MAX >> 8) /* verifier maximum in kernels <= 5.1 */
LIBBPF_API int
bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz);
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
LIBBPF_API int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
char *log_buf, size_t log_buf_sz);
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
LIBBPF_API int bpf_load_program(enum bpf_prog_type type,
const struct bpf_insn *insns, size_t insns_cnt,
const char *license, __u32 kern_version,
char *log_buf, size_t log_buf_sz);
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_prog_load() instead")
LIBBPF_API int bpf_verify_program(enum bpf_prog_type type,
const struct bpf_insn *insns,
size_t insns_cnt, __u32 prog_flags,

View File

@ -3,6 +3,8 @@
#ifndef __BPF_GEN_INTERNAL_H
#define __BPF_GEN_INTERNAL_H
#include "bpf.h"
struct ksym_relo_desc {
const char *name;
int kind;
@ -50,8 +52,10 @@ int bpf_gen__finish(struct bpf_gen *gen);
void bpf_gen__free(struct bpf_gen *gen);
void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size);
void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx);
struct bpf_prog_load_params;
void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx);
void bpf_gen__prog_load(struct bpf_gen *gen,
enum bpf_prog_type prog_type, const char *prog_name,
const char *license, struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *load_attr, int prog_idx);
void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx);
void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *name, enum bpf_attach_type type);

View File

@ -299,6 +299,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
@ -349,6 +350,7 @@ static int btf_bswap_type_rest(struct btf_type *t)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
case BTF_KIND_TYPE_TAG:
return 0;
case BTF_KIND_INT:
*(__u32 *)(t + 1) = bswap_32(*(__u32 *)(t + 1));
@ -649,6 +651,7 @@ int btf__align_of(const struct btf *btf, __u32 id)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPE_TAG:
return btf__align_of(btf, t->type);
case BTF_KIND_ARRAY:
return btf__align_of(btf, btf_array(t)->type);
@ -2235,6 +2238,22 @@ int btf__add_restrict(struct btf *btf, int ref_type_id)
return btf_add_ref_kind(btf, BTF_KIND_RESTRICT, NULL, ref_type_id);
}
/*
* Append new BTF_KIND_TYPE_TAG type with:
* - *value*, non-empty/non-NULL tag value;
* - *ref_type_id* - referenced type ID, it might not exist yet;
* Returns:
* - >0, type ID of newly added BTF type;
* - <0, on error.
*/
int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id)
{
if (!value|| !value[0])
return libbpf_err(-EINVAL);
return btf_add_ref_kind(btf, BTF_KIND_TYPE_TAG, value, ref_type_id);
}
/*
* Append new BTF_KIND_FUNC type with:
* - *name*, non-empty/non-NULL name;
@ -2846,8 +2865,7 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
struct btf_dedup;
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts);
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts);
static void btf_dedup_free(struct btf_dedup *d);
static int btf_dedup_prep(struct btf_dedup *d);
static int btf_dedup_strings(struct btf_dedup *d);
@ -2994,12 +3012,17 @@ static int btf_dedup_remap_types(struct btf_dedup *d);
* deduplicating structs/unions is described in greater details in comments for
* `btf_dedup_is_equiv` function.
*/
int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts)
DEFAULT_VERSION(btf__dedup_v0_6_0, btf__dedup, LIBBPF_0.6.0)
int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
struct btf_dedup *d;
int err;
if (!OPTS_VALID(opts, btf_dedup_opts))
return libbpf_err(-EINVAL);
d = btf_dedup_new(btf, opts);
if (IS_ERR(d)) {
pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
return libbpf_err(-EINVAL);
@ -3051,6 +3074,19 @@ done:
return libbpf_err(err);
}
COMPAT_VERSION(bpf__dedup_deprecated, btf__dedup, LIBBPF_0.0.2)
int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *unused_opts)
{
LIBBPF_OPTS(btf_dedup_opts, opts, .btf_ext = btf_ext);
if (unused_opts) {
pr_warn("please use new version of btf__dedup() that supports options\n");
return libbpf_err(-ENOTSUP);
}
return btf__dedup(btf, &opts);
}
#define BTF_UNPROCESSED_ID ((__u32)-1)
#define BTF_IN_PROGRESS_ID ((__u32)-2)
@ -3163,8 +3199,7 @@ static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
return k1 == k2;
}
static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts)
static struct btf_dedup *btf_dedup_new(struct btf *btf, const struct btf_dedup_opts *opts)
{
struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
@ -3173,13 +3208,11 @@ static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
if (!d)
return ERR_PTR(-ENOMEM);
d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
/* dedup_table_size is now used only to force collisions in tests */
if (opts && opts->dedup_table_size == 1)
if (OPTS_GET(opts, force_collisions, false))
hash_fn = btf_dedup_collision_hash_fn;
d->btf = btf;
d->btf_ext = btf_ext;
d->btf_ext = OPTS_GET(opts, btf_ext, NULL);
d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
if (IS_ERR(d->dedup_table)) {
@ -3625,6 +3658,7 @@ static int btf_dedup_prep(struct btf_dedup *d)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
case BTF_KIND_TYPE_TAG:
h = btf_hash_common(t);
break;
case BTF_KIND_INT:
@ -3685,6 +3719,7 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_VAR:
case BTF_KIND_DATASEC:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
return 0;
case BTF_KIND_INT:
@ -3708,8 +3743,6 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
new_id = cand_id;
break;
}
if (d->opts.dont_resolve_fwds)
continue;
if (btf_compat_enum(t, cand)) {
if (btf_is_enum_fwd(t)) {
/* resolve fwd to full enum */
@ -3952,8 +3985,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return 0;
/* FWD <--> STRUCT/UNION equivalence check, if enabled */
if (!d->opts.dont_resolve_fwds
&& (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
if ((cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
&& cand_kind != canon_kind) {
__u16 real_kind;
__u16 fwd_kind;
@ -3979,10 +4011,7 @@ static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
return btf_equal_int_tag(cand_type, canon_type);
case BTF_KIND_ENUM:
if (d->opts.dont_resolve_fwds)
return btf_equal_enum(cand_type, canon_type);
else
return btf_compat_enum(cand_type, canon_type);
return btf_compat_enum(cand_type, canon_type);
case BTF_KIND_FWD:
case BTF_KIND_FLOAT:
@ -4289,6 +4318,7 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
case BTF_KIND_PTR:
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_TYPE_TAG:
ref_type_id = btf_dedup_ref_type(d, t->type);
if (ref_type_id < 0)
return ref_type_id;
@ -4595,6 +4625,7 @@ int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ct
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
return visit(&t->type, ctx);
case BTF_KIND_ARRAY: {

View File

@ -227,6 +227,7 @@ LIBBPF_API int btf__add_typedef(struct btf *btf, const char *name, int ref_type_
LIBBPF_API int btf__add_volatile(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_const(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_restrict(struct btf *btf, int ref_type_id);
LIBBPF_API int btf__add_type_tag(struct btf *btf, const char *value, int ref_type_id);
/* func and func_proto construction APIs */
LIBBPF_API int btf__add_func(struct btf *btf, const char *name,
@ -245,25 +246,80 @@ LIBBPF_API int btf__add_decl_tag(struct btf *btf, const char *value, int ref_typ
int component_idx);
struct btf_dedup_opts {
unsigned int dedup_table_size;
bool dont_resolve_fwds;
size_t sz;
/* optional .BTF.ext info to dedup along the main BTF info */
struct btf_ext *btf_ext;
/* force hash collisions (used for testing) */
bool force_collisions;
size_t :0;
};
#define btf_dedup_opts__last_field force_collisions
LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
const struct btf_dedup_opts *opts);
LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
LIBBPF_API int btf__dedup_v0_6_0(struct btf *btf, const struct btf_dedup_opts *opts);
LIBBPF_DEPRECATED_SINCE(0, 7, "use btf__dedup() instead")
LIBBPF_API int btf__dedup_deprecated(struct btf *btf, struct btf_ext *btf_ext, const void *opts);
#define btf__dedup(...) ___libbpf_overload(___btf_dedup, __VA_ARGS__)
#define ___btf_dedup3(btf, btf_ext, opts) btf__dedup_deprecated(btf, btf_ext, opts)
#define ___btf_dedup2(btf, opts) btf__dedup(btf, opts)
struct btf_dump;
struct btf_dump_opts {
void *ctx;
union {
size_t sz;
void *ctx; /* DEPRECATED: will be gone in v1.0 */
};
};
typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
btf_dump_printf_fn_t printf_fn);
btf_dump_printf_fn_t printf_fn,
void *ctx,
const struct btf_dump_opts *opts);
LIBBPF_API struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
btf_dump_printf_fn_t printf_fn,
void *ctx,
const struct btf_dump_opts *opts);
LIBBPF_API struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
btf_dump_printf_fn_t printf_fn);
/* Choose either btf_dump__new() or btf_dump__new_deprecated() based on the
* type of 4th argument. If it's btf_dump's print callback, use deprecated
* API; otherwise, choose the new btf_dump__new(). ___libbpf_override()
* doesn't work here because both variants have 4 input arguments.
*
* (void *) casts are necessary to avoid compilation warnings about type
* mismatches, because even though __builtin_choose_expr() only ever evaluates
* one side the other side still has to satisfy type constraints (this is
* compiler implementation limitation which might be lifted eventually,
* according to the documentation). So passing struct btf_ext in place of
* btf_dump_printf_fn_t would be generating compilation warning. Casting to
* void * avoids this issue.
*
* Also, two type compatibility checks for a function and function pointer are
* required because passing function reference into btf_dump__new() as
* btf_dump__new(..., my_callback, ...) and as btf_dump__new(...,
* &my_callback, ...) (not explicit ampersand in the latter case) actually
* differs as far as __builtin_types_compatible_p() is concerned. Thus two
* checks are combined to detect callback argument.
*
* The rest works just like in case of ___libbpf_override() usage with symbol
* versioning.
*/
#define btf_dump__new(a1, a2, a3, a4) __builtin_choose_expr( \
__builtin_types_compatible_p(typeof(a4), btf_dump_printf_fn_t) || \
__builtin_types_compatible_p(typeof(a4), void(void *, const char *, va_list)), \
btf_dump__new_deprecated((void *)a1, (void *)a2, (void *)a3, (void *)a4), \
btf_dump__new((void *)a1, (void *)a2, (void *)a3, (void *)a4))
LIBBPF_API void btf_dump__free(struct btf_dump *d);
LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
@ -403,7 +459,8 @@ static inline bool btf_is_mod(const struct btf_type *t)
return kind == BTF_KIND_VOLATILE ||
kind == BTF_KIND_CONST ||
kind == BTF_KIND_RESTRICT;
kind == BTF_KIND_RESTRICT ||
kind == BTF_KIND_TYPE_TAG;
}
static inline bool btf_is_func(const struct btf_type *t)
@ -436,6 +493,11 @@ static inline bool btf_is_decl_tag(const struct btf_type *t)
return btf_kind(t) == BTF_KIND_DECL_TAG;
}
static inline bool btf_is_type_tag(const struct btf_type *t)
{
return btf_kind(t) == BTF_KIND_TYPE_TAG;
}
static inline __u8 btf_int_encoding(const struct btf_type *t)
{
return BTF_INT_ENCODING(*(__u32 *)(t + 1));

View File

@ -77,9 +77,8 @@ struct btf_dump_data {
struct btf_dump {
const struct btf *btf;
const struct btf_ext *btf_ext;
btf_dump_printf_fn_t printf_fn;
struct btf_dump_opts opts;
void *cb_ctx;
int ptr_sz;
bool strip_mods;
bool skip_anon_defs;
@ -138,29 +137,32 @@ static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
va_list args;
va_start(args, fmt);
d->printf_fn(d->opts.ctx, fmt, args);
d->printf_fn(d->cb_ctx, fmt, args);
va_end(args);
}
static int btf_dump_mark_referenced(struct btf_dump *d);
static int btf_dump_resize(struct btf_dump *d);
struct btf_dump *btf_dump__new(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
btf_dump_printf_fn_t printf_fn)
DEFAULT_VERSION(btf_dump__new_v0_6_0, btf_dump__new, LIBBPF_0.6.0)
struct btf_dump *btf_dump__new_v0_6_0(const struct btf *btf,
btf_dump_printf_fn_t printf_fn,
void *ctx,
const struct btf_dump_opts *opts)
{
struct btf_dump *d;
int err;
if (!printf_fn)
return libbpf_err_ptr(-EINVAL);
d = calloc(1, sizeof(struct btf_dump));
if (!d)
return libbpf_err_ptr(-ENOMEM);
d->btf = btf;
d->btf_ext = btf_ext;
d->printf_fn = printf_fn;
d->opts.ctx = opts ? opts->ctx : NULL;
d->cb_ctx = ctx;
d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
@ -186,6 +188,17 @@ err:
return libbpf_err_ptr(err);
}
COMPAT_VERSION(btf_dump__new_deprecated, btf_dump__new, LIBBPF_0.0.4)
struct btf_dump *btf_dump__new_deprecated(const struct btf *btf,
const struct btf_ext *btf_ext,
const struct btf_dump_opts *opts,
btf_dump_printf_fn_t printf_fn)
{
if (!printf_fn)
return libbpf_err_ptr(-EINVAL);
return btf_dump__new_v0_6_0(btf, printf_fn, opts ? opts->ctx : NULL, opts);
}
static int btf_dump_resize(struct btf_dump *d)
{
int err, last_id = btf__type_cnt(d->btf) - 1;
@ -317,6 +330,7 @@ static int btf_dump_mark_referenced(struct btf_dump *d)
case BTF_KIND_FUNC:
case BTF_KIND_VAR:
case BTF_KIND_DECL_TAG:
case BTF_KIND_TYPE_TAG:
d->type_states[t->type].referenced = 1;
break;
@ -560,6 +574,7 @@ static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPE_TAG:
return btf_dump_order_type(d, t->type, through_ptr);
case BTF_KIND_FUNC_PROTO: {
@ -734,6 +749,7 @@ static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
case BTF_KIND_VOLATILE:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPE_TAG:
btf_dump_emit_type(d, t->type, cont_id);
break;
case BTF_KIND_ARRAY:
@ -1154,6 +1170,7 @@ skip_mod:
case BTF_KIND_CONST:
case BTF_KIND_RESTRICT:
case BTF_KIND_FUNC_PROTO:
case BTF_KIND_TYPE_TAG:
id = t->type;
break;
case BTF_KIND_ARRAY:
@ -1322,6 +1339,11 @@ static void btf_dump_emit_type_chain(struct btf_dump *d,
case BTF_KIND_RESTRICT:
btf_dump_printf(d, " restrict");
break;
case BTF_KIND_TYPE_TAG:
btf_dump_emit_mods(d, decls);
name = btf_name_of(d, t->name_off);
btf_dump_printf(d, " __attribute__((btf_type_tag(\"%s\")))", name);
break;
case BTF_KIND_ARRAY: {
const struct btf_array *a = btf_array(t);
const struct btf_type *next_t;

View File

@ -584,8 +584,9 @@ void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
{
struct ksym_desc *kdesc;
int i;
for (int i = 0; i < gen->nr_ksyms; i++) {
for (i = 0; i < gen->nr_ksyms; i++) {
if (!strcmp(gen->ksyms[i].name, relo->name)) {
gen->ksyms[i].ref++;
return &gen->ksyms[i];
@ -900,27 +901,27 @@ static void cleanup_relos(struct bpf_gen *gen, int insns)
}
void bpf_gen__prog_load(struct bpf_gen *gen,
struct bpf_prog_load_params *load_attr, int prog_idx)
enum bpf_prog_type prog_type, const char *prog_name,
const char *license, struct bpf_insn *insns, size_t insn_cnt,
struct bpf_prog_load_opts *load_attr, int prog_idx)
{
int attr_size = offsetofend(union bpf_attr, fd_array);
int prog_load_attr, license, insns, func_info, line_info;
int prog_load_attr, license_off, insns_off, func_info, line_info;
union bpf_attr attr;
memset(&attr, 0, attr_size);
pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
load_attr->prog_type, load_attr->insn_cnt);
pr_debug("gen: prog_load: type %d insns_cnt %zd\n", prog_type, insn_cnt);
/* add license string to blob of bytes */
license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
license_off = add_data(gen, license, strlen(license) + 1);
/* add insns to blob of bytes */
insns = add_data(gen, load_attr->insns,
load_attr->insn_cnt * sizeof(struct bpf_insn));
insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn));
attr.prog_type = load_attr->prog_type;
attr.prog_type = prog_type;
attr.expected_attach_type = load_attr->expected_attach_type;
attr.attach_btf_id = load_attr->attach_btf_id;
attr.prog_ifindex = load_attr->prog_ifindex;
attr.kern_version = 0;
attr.insn_cnt = (__u32)load_attr->insn_cnt;
attr.insn_cnt = (__u32)insn_cnt;
attr.prog_flags = load_attr->prog_flags;
attr.func_info_rec_size = load_attr->func_info_rec_size;
@ -933,15 +934,15 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
line_info = add_data(gen, load_attr->line_info,
attr.line_info_cnt * attr.line_info_rec_size);
memcpy(attr.prog_name, load_attr->name,
min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
memcpy(attr.prog_name, prog_name,
min((unsigned)strlen(prog_name), BPF_OBJ_NAME_LEN - 1));
prog_load_attr = add_data(gen, &attr, attr_size);
/* populate union bpf_attr with a pointer to license */
emit_rel_store(gen, attr_field(prog_load_attr, license), license);
emit_rel_store(gen, attr_field(prog_load_attr, license), license_off);
/* populate union bpf_attr with a pointer to instructions */
emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off);
/* populate union bpf_attr with a pointer to func_info */
emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
@ -973,12 +974,12 @@ void bpf_gen__prog_load(struct bpf_gen *gen,
emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
offsetof(union bpf_attr, attach_btf_obj_fd)));
}
emit_relos(gen, insns);
emit_relos(gen, insns_off);
/* emit PROG_LOAD command */
emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
cleanup_relos(gen, insns);
cleanup_relos(gen, insns_off);
if (gen->attach_kind)
emit_sys_close_blob(gen,
attr_field(prog_load_attr, attach_btf_obj_fd));

View File

@ -197,6 +197,8 @@ enum kern_feature_id {
FEAT_PERF_LINK,
/* BTF_KIND_DECL_TAG support */
FEAT_BTF_DECL_TAG,
/* BTF_KIND_TYPE_TAG support */
FEAT_BTF_TYPE_TAG,
__FEAT_CNT,
};
@ -221,7 +223,7 @@ struct reloc_desc {
struct bpf_sec_def;
typedef int (*init_fn_t)(struct bpf_program *prog, long cookie);
typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie);
typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_opts *opts, long cookie);
typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie);
/* stored as sec_def->cookie for all libbpf-supported SEC()s */
@ -2076,6 +2078,7 @@ static const char *__btf_kind_str(__u16 kind)
case BTF_KIND_DATASEC: return "datasec";
case BTF_KIND_FLOAT: return "float";
case BTF_KIND_DECL_TAG: return "decl_tag";
case BTF_KIND_TYPE_TAG: return "type_tag";
default: return "unknown";
}
}
@ -2588,8 +2591,10 @@ static bool btf_needs_sanitization(struct bpf_object *obj)
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag;
return !has_func || !has_datasec || !has_func_global || !has_float ||
!has_decl_tag || !has_type_tag;
}
static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
@ -2599,6 +2604,7 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
struct btf_type *t;
int i, j, vlen;
@ -2657,6 +2663,10 @@ static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
*/
t->name_off = 0;
t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
} else if (!has_type_tag && btf_is_type_tag(t)) {
/* replace TYPE_TAG with a CONST */
t->name_off = 0;
t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
}
}
}
@ -2752,13 +2762,12 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
t_var = btf__type_by_id(btf, vsi->type);
var = btf_var(t_var);
if (!btf_is_var(t_var)) {
if (!t_var || !btf_is_var(t_var)) {
pr_debug("Non-VAR type seen in section %s\n", name);
return -EINVAL;
}
var = btf_var(t_var);
if (var->linkage == BTF_VAR_STATIC)
continue;
@ -3191,11 +3200,11 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
Elf_Scn *scn;
Elf64_Shdr *sh;
/* ELF section indices are 1-based, so allocate +1 element to keep
* indexing simple. Also include 0th invalid section into sec_cnt for
* simpler and more traditional iteration logic.
/* ELF section indices are 0-based, but sec #0 is special "invalid"
* section. e_shnum does include sec #0, so e_shnum is the necessary
* size of an array to keep all the sections.
*/
obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum;
obj->efile.sec_cnt = obj->efile.ehdr->e_shnum;
obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
if (!obj->efile.secs)
return -ENOMEM;
@ -3271,8 +3280,12 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (strcmp(name, MAPS_ELF_SEC) == 0) {
obj->efile.btf_maps_shndx = idx;
} else if (strcmp(name, BTF_ELF_SEC) == 0) {
if (sh->sh_type != SHT_PROGBITS)
return -LIBBPF_ERRNO__FORMAT;
btf_data = data;
} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
if (sh->sh_type != SHT_PROGBITS)
return -LIBBPF_ERRNO__FORMAT;
btf_ext_data = data;
} else if (sh->sh_type == SHT_SYMTAB) {
/* already processed during the first pass above */
@ -3303,6 +3316,10 @@ static int bpf_object__elf_collect(struct bpf_object *obj)
} else if (sh->sh_type == SHT_REL) {
int targ_sec_idx = sh->sh_info; /* points to other section */
if (sh->sh_entsize != sizeof(Elf64_Rel) ||
targ_sec_idx >= obj->efile.sec_cnt)
return -LIBBPF_ERRNO__FORMAT;
/* Only do relo for section with exec instructions */
if (!section_have_execinstr(obj, targ_sec_idx) &&
strcmp(name, ".rel" STRUCT_OPS_SEC) &&
@ -3555,7 +3572,7 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
sh = elf_sec_hdr(obj, scn);
if (!sh)
if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
return -LIBBPF_ERRNO__FORMAT;
dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
@ -4022,7 +4039,7 @@ static int
bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
{
const char *relo_sec_name, *sec_name;
size_t sec_idx = shdr->sh_info;
size_t sec_idx = shdr->sh_info, sym_idx;
struct bpf_program *prog;
struct reloc_desc *relos;
int err, i, nrels;
@ -4033,6 +4050,9 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
Elf64_Sym *sym;
Elf64_Rel *rel;
if (sec_idx >= obj->efile.sec_cnt)
return -EINVAL;
scn = elf_sec_by_idx(obj, sec_idx);
scn_data = elf_sec_data(obj, scn);
@ -4052,16 +4072,23 @@ bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Dat
return -LIBBPF_ERRNO__FORMAT;
}
sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
sym_idx = ELF64_R_SYM(rel->r_info);
sym = elf_sym_by_idx(obj, sym_idx);
if (!sym) {
pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
relo_sec_name, sym_idx, i);
return -LIBBPF_ERRNO__FORMAT;
}
if (sym->st_shndx >= obj->efile.sec_cnt) {
pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
return -LIBBPF_ERRNO__FORMAT;
}
if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i);
relo_sec_name, (size_t)rel->r_offset, i);
return -LIBBPF_ERRNO__FORMAT;
}
@ -4265,30 +4292,20 @@ int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
static int
bpf_object__probe_loading(struct bpf_object *obj)
{
struct bpf_load_program_attr attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int ret;
int ret, insn_cnt = ARRAY_SIZE(insns);
if (obj->gen_loader)
return 0;
/* make sure basic loading works */
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insns = insns;
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
ret = bpf_load_program_xattr(&attr, NULL, 0);
if (ret < 0) {
attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
ret = bpf_load_program_xattr(&attr, NULL, 0);
}
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
if (ret < 0)
ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
if (ret < 0) {
ret = errno;
cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
@ -4312,28 +4329,19 @@ static int probe_fd(int fd)
static int probe_kern_prog_name(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int ret;
int ret, insn_cnt = ARRAY_SIZE(insns);
/* make sure loading with name works */
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insns = insns;
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
attr.name = "test";
ret = bpf_load_program_xattr(&attr, NULL, 0);
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "test", "GPL", insns, insn_cnt, NULL);
return probe_fd(ret);
}
static int probe_kern_global_data(void)
{
struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
@ -4342,7 +4350,7 @@ static int probe_kern_global_data(void)
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int ret, map;
int ret, map, insn_cnt = ARRAY_SIZE(insns);
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_ARRAY;
@ -4361,13 +4369,7 @@ static int probe_kern_global_data(void)
insns[0].imm = map;
memset(&prg_attr, 0, sizeof(prg_attr));
prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
prg_attr.insns = insns;
prg_attr.insns_cnt = ARRAY_SIZE(insns);
prg_attr.license = "GPL";
ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
close(map);
return probe_fd(ret);
}
@ -4468,6 +4470,22 @@ static int probe_kern_btf_decl_tag(void)
strs, sizeof(strs)));
}
static int probe_kern_btf_type_tag(void)
{
static const char strs[] = "\0tag";
__u32 types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
/* attr */
BTF_TYPE_TYPE_TAG_ENC(1, 1), /* [2] */
/* ptr */
BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_PTR, 0, 0), 2), /* [3] */
};
return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
strs, sizeof(strs)));
}
static int probe_kern_array_mmap(void)
{
struct bpf_create_map_attr attr = {
@ -4483,30 +4501,24 @@ static int probe_kern_array_mmap(void)
static int probe_kern_exp_attach_type(void)
{
struct bpf_load_program_attr attr;
LIBBPF_OPTS(bpf_prog_load_opts, opts, .expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE);
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int fd, insn_cnt = ARRAY_SIZE(insns);
memset(&attr, 0, sizeof(attr));
/* use any valid combination of program type and (optional)
* non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
* to see if kernel supports expected_attach_type field for
* BPF_PROG_LOAD command
*/
attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
attr.insns = insns;
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
fd = bpf_prog_load(BPF_PROG_TYPE_CGROUP_SOCK, NULL, "GPL", insns, insn_cnt, &opts);
return probe_fd(fd);
}
static int probe_kern_probe_read_kernel(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
@ -4515,26 +4527,21 @@ static int probe_kern_probe_read_kernel(void)
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
};
int fd, insn_cnt = ARRAY_SIZE(insns);
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_KPROBE;
attr.insns = insns;
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
return probe_fd(fd);
}
static int probe_prog_bind_map(void)
{
struct bpf_load_program_attr prg_attr;
struct bpf_create_map_attr map_attr;
char *cp, errmsg[STRERR_BUFSIZE];
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int ret, map, prog;
int ret, map, prog, insn_cnt = ARRAY_SIZE(insns);
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_ARRAY;
@ -4551,13 +4558,7 @@ static int probe_prog_bind_map(void)
return ret;
}
memset(&prg_attr, 0, sizeof(prg_attr));
prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
prg_attr.insns = insns;
prg_attr.insns_cnt = ARRAY_SIZE(insns);
prg_attr.license = "GPL";
prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, NULL);
if (prog < 0) {
close(map);
return 0;
@ -4602,19 +4603,14 @@ static int probe_module_btf(void)
static int probe_perf_link(void)
{
struct bpf_load_program_attr attr;
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
int prog_fd, link_fd, err;
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_TRACEPOINT;
attr.insns = insns;
attr.insns_cnt = ARRAY_SIZE(insns);
attr.license = "GPL";
prog_fd = bpf_load_program_xattr(&attr, NULL, 0);
prog_fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL",
insns, ARRAY_SIZE(insns), NULL);
if (prog_fd < 0)
return -errno;
@ -4687,6 +4683,9 @@ static struct kern_feature_desc {
[FEAT_BTF_DECL_TAG] = {
"BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag,
},
[FEAT_BTF_TYPE_TAG] = {
"BTF_KIND_TYPE_TAG support", probe_kern_btf_type_tag,
},
};
static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
@ -6374,16 +6373,16 @@ static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attac
/* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */
static int libbpf_preload_prog(struct bpf_program *prog,
struct bpf_prog_load_params *attr, long cookie)
struct bpf_prog_load_opts *opts, long cookie)
{
enum sec_def_flags def = cookie;
/* old kernels might not support specifying expected_attach_type */
if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
attr->expected_attach_type = 0;
opts->expected_attach_type = 0;
if (def & SEC_SLEEPABLE)
attr->prog_flags |= BPF_F_SLEEPABLE;
opts->prog_flags |= BPF_F_SLEEPABLE;
if ((prog->type == BPF_PROG_TYPE_TRACING ||
prog->type == BPF_PROG_TYPE_LSM ||
@ -6402,21 +6401,22 @@ static int libbpf_preload_prog(struct bpf_program *prog,
/* but by now libbpf common logic is not utilizing
* prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
* this callback is called after attrs were populated by
* libbpf, so this callback has to update attr explicitly here
* this callback is called after opts were populated by
* libbpf, so this callback has to update opts explicitly here
*/
attr->attach_btf_obj_fd = btf_obj_fd;
attr->attach_btf_id = btf_type_id;
opts->attach_btf_obj_fd = btf_obj_fd;
opts->attach_btf_id = btf_type_id;
}
return 0;
}
static int
load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
char *license, __u32 kern_version, int *pfd)
static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
struct bpf_insn *insns, int insns_cnt,
const char *license, __u32 kern_version,
int *prog_fd)
{
struct bpf_prog_load_params load_attr = {};
struct bpf_object *obj = prog->obj;
LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
const char *prog_name = NULL;
char *cp, errmsg[STRERR_BUFSIZE];
size_t log_buf_size = 0;
char *log_buf = NULL;
@ -6435,13 +6435,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
if (!insns || !insns_cnt)
return -EINVAL;
load_attr.prog_type = prog->type;
load_attr.expected_attach_type = prog->expected_attach_type;
if (kernel_supports(obj, FEAT_PROG_NAME))
load_attr.name = prog->name;
load_attr.insns = insns;
load_attr.insn_cnt = insns_cnt;
load_attr.license = license;
prog_name = prog->name;
load_attr.attach_btf_id = prog->attach_btf_id;
load_attr.attach_prog_fd = prog->attach_prog_fd;
load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
@ -6475,9 +6471,10 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
}
if (obj->gen_loader) {
bpf_gen__prog_load(obj->gen_loader, &load_attr,
bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
license, insns, insns_cnt, &load_attr,
prog - obj->programs);
*pfd = -1;
*prog_fd = -1;
return 0;
}
retry_load:
@ -6490,8 +6487,8 @@ retry_load:
}
load_attr.log_buf = log_buf;
load_attr.log_buf_sz = log_buf_size;
ret = libbpf__bpf_prog_load(&load_attr);
load_attr.log_size = log_buf_size;
ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
if (ret >= 0) {
if (log_buf && load_attr.log_level)
@ -6515,7 +6512,7 @@ retry_load:
}
}
*pfd = ret;
*prog_fd = ret;
ret = 0;
goto out;
}
@ -6537,19 +6534,19 @@ retry_load:
pr_warn("-- BEGIN DUMP LOG ---\n");
pr_warn("\n%s\n", log_buf);
pr_warn("-- END LOG --\n");
} else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
pr_warn("Program too large (%zu insns), at most %d insns\n",
load_attr.insn_cnt, BPF_MAXINSNS);
} else if (insns_cnt >= BPF_MAXINSNS) {
pr_warn("Program too large (%d insns), at most %d insns\n",
insns_cnt, BPF_MAXINSNS);
ret = -LIBBPF_ERRNO__PROG2BIG;
} else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
} else if (prog->type != BPF_PROG_TYPE_KPROBE) {
/* Wrong program type? */
int fd;
load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
load_attr.expected_attach_type = 0;
load_attr.log_buf = NULL;
load_attr.log_buf_sz = 0;
fd = libbpf__bpf_prog_load(&load_attr);
load_attr.log_size = 0;
fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, prog_name, license,
insns, insns_cnt, &load_attr);
if (fd >= 0) {
close(fd);
ret = -LIBBPF_ERRNO__PROGTYPE;
@ -6591,11 +6588,12 @@ static int bpf_program__record_externs(struct bpf_program *prog)
return 0;
}
int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
const char *license, __u32 kern_ver)
{
int err = 0, fd, i;
if (prog->obj->loaded) {
if (obj->loaded) {
pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
return libbpf_err(-EINVAL);
}
@ -6621,10 +6619,11 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
prog->name, prog->instances.nr);
}
if (prog->obj->gen_loader)
if (obj->gen_loader)
bpf_program__record_externs(prog);
err = load_program(prog, prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
err = bpf_object_load_prog_instance(obj, prog,
prog->insns, prog->insns_cnt,
license, kern_ver, &fd);
if (!err)
prog->instances.fds[0] = fd;
goto out;
@ -6652,8 +6651,9 @@ int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
continue;
}
err = load_program(prog, result.new_insn_ptr,
result.new_insn_cnt, license, kern_ver, &fd);
err = bpf_object_load_prog_instance(obj, prog,
result.new_insn_ptr, result.new_insn_cnt,
license, kern_ver, &fd);
if (err) {
pr_warn("Loading the %dth instance of program '%s' failed\n",
i, prog->name);
@ -6670,6 +6670,11 @@ out:
return libbpf_err(err);
}
int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_ver)
{
return bpf_object_load_prog(prog->obj, prog, license, kern_ver);
}
static int
bpf_object__load_progs(struct bpf_object *obj, int log_level)
{
@ -6693,7 +6698,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
continue;
}
prog->log_level |= log_level;
err = bpf_program__load(prog, obj->license, obj->kern_version);
err = bpf_object_load_prog(obj, prog, obj->license, obj->kern_version);
if (err)
return err;
}
@ -7733,7 +7738,7 @@ int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
return 0;
err_unpin_maps:
while ((map = bpf_map__prev(map, obj))) {
while ((map = bpf_object__prev_map(obj, map))) {
if (!map->pin_path)
continue;
@ -7813,7 +7818,7 @@ int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
return 0;
err_unpin_programs:
while ((prog = bpf_program__prev(prog, obj))) {
while ((prog = bpf_object__prev_program(obj, prog))) {
char buf[PATH_MAX];
int len;
@ -8154,9 +8159,11 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
return 0;
}
static int bpf_program_nth_fd(const struct bpf_program *prog, int n);
int bpf_program__fd(const struct bpf_program *prog)
{
return bpf_program__nth_fd(prog, 0);
return bpf_program_nth_fd(prog, 0);
}
size_t bpf_program__size(const struct bpf_program *prog)
@ -8202,7 +8209,10 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
return 0;
}
int bpf_program__nth_fd(const struct bpf_program *prog, int n)
__attribute__((alias("bpf_program_nth_fd")))
int bpf_program__nth_fd(const struct bpf_program *prog, int n);
static int bpf_program_nth_fd(const struct bpf_program *prog, int n)
{
int fd;
@ -8281,6 +8291,20 @@ void bpf_program__set_expected_attach_type(struct bpf_program *prog,
prog->expected_attach_type = type;
}
__u32 bpf_program__flags(const struct bpf_program *prog)
{
return prog->prog_flags;
}
int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags)
{
if (prog->obj->loaded)
return libbpf_err(-EBUSY);
prog->prog_flags |= extra_flags;
return 0;
}
#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
.sec = sec_pfx, \
.prog_type = BPF_PROG_TYPE_##ptype, \
@ -9028,7 +9052,10 @@ int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
pr_warn("error: inner_map_fd already specified\n");
return libbpf_err(-EINVAL);
}
zfree(&map->inner_map);
if (map->inner_map) {
bpf_map__destroy(map->inner_map);
zfree(&map->inner_map);
}
map->inner_map_fd = fd;
return 0;
}
@ -9145,21 +9172,12 @@ long libbpf_get_error(const void *ptr)
return -errno;
}
int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
struct bpf_prog_load_attr attr;
memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
attr.file = file;
attr.prog_type = type;
attr.expected_attach_type = 0;
return bpf_prog_load_xattr(&attr, pobj, prog_fd);
}
__attribute__((alias("bpf_prog_load_xattr2")))
int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd)
struct bpf_object **pobj, int *prog_fd);
static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd)
{
struct bpf_object_open_attr open_attr = {};
struct bpf_program *prog, *first_prog = NULL;
@ -9230,6 +9248,20 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
return 0;
}
COMPAT_VERSION(bpf_prog_load_deprecated, bpf_prog_load, LIBBPF_0.0.1)
int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd)
{
struct bpf_prog_load_attr attr;
memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
attr.file = file;
attr.prog_type = type;
attr.expected_attach_type = 0;
return bpf_prog_load_xattr2(&attr, pobj, prog_fd);
}
struct bpf_link {
int (*detach)(struct bpf_link *link);
void (*dealloc)(struct bpf_link *link);
@ -10575,11 +10607,18 @@ error:
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p);
struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
const struct perf_buffer_opts *opts)
DEFAULT_VERSION(perf_buffer__new_v0_6_0, perf_buffer__new, LIBBPF_0.6.0)
struct perf_buffer *perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
perf_buffer_sample_fn sample_cb,
perf_buffer_lost_fn lost_cb,
void *ctx,
const struct perf_buffer_opts *opts)
{
struct perf_buffer_params p = {};
struct perf_event_attr attr = { 0, };
struct perf_event_attr attr = {};
if (!OPTS_VALID(opts, perf_buffer_opts))
return libbpf_err_ptr(-EINVAL);
attr.config = PERF_COUNT_SW_BPF_OUTPUT;
attr.type = PERF_TYPE_SOFTWARE;
@ -10588,29 +10627,62 @@ struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
attr.wakeup_events = 1;
p.attr = &attr;
p.sample_cb = opts ? opts->sample_cb : NULL;
p.lost_cb = opts ? opts->lost_cb : NULL;
p.ctx = opts ? opts->ctx : NULL;
p.sample_cb = sample_cb;
p.lost_cb = lost_cb;
p.ctx = ctx;
return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
struct perf_buffer *
perf_buffer__new_raw(int map_fd, size_t page_cnt,
const struct perf_buffer_raw_opts *opts)
COMPAT_VERSION(perf_buffer__new_deprecated, perf_buffer__new, LIBBPF_0.0.4)
struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
const struct perf_buffer_opts *opts)
{
return perf_buffer__new_v0_6_0(map_fd, page_cnt,
opts ? opts->sample_cb : NULL,
opts ? opts->lost_cb : NULL,
opts ? opts->ctx : NULL,
NULL);
}
DEFAULT_VERSION(perf_buffer__new_raw_v0_6_0, perf_buffer__new_raw, LIBBPF_0.6.0)
struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
struct perf_event_attr *attr,
perf_buffer_event_fn event_cb, void *ctx,
const struct perf_buffer_raw_opts *opts)
{
struct perf_buffer_params p = {};
p.attr = opts->attr;
p.event_cb = opts->event_cb;
p.ctx = opts->ctx;
p.cpu_cnt = opts->cpu_cnt;
p.cpus = opts->cpus;
p.map_keys = opts->map_keys;
if (page_cnt == 0 || !attr)
return libbpf_err_ptr(-EINVAL);
if (!OPTS_VALID(opts, perf_buffer_raw_opts))
return libbpf_err_ptr(-EINVAL);
p.attr = attr;
p.event_cb = event_cb;
p.ctx = ctx;
p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
p.cpus = OPTS_GET(opts, cpus, NULL);
p.map_keys = OPTS_GET(opts, map_keys, NULL);
return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
}
COMPAT_VERSION(perf_buffer__new_raw_deprecated, perf_buffer__new_raw, LIBBPF_0.0.4)
struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
const struct perf_buffer_raw_opts *opts)
{
LIBBPF_OPTS(perf_buffer_raw_opts, inner_opts,
.cpu_cnt = opts->cpu_cnt,
.cpus = opts->cpus,
.map_keys = opts->map_keys,
);
return perf_buffer__new_raw_v0_6_0(map_fd, page_cnt, opts->attr,
opts->event_cb, opts->ctx, &inner_opts);
}
static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
struct perf_buffer_params *p)
{

View File

@ -262,8 +262,8 @@ LIBBPF_API const struct bpf_insn *bpf_program__insns(const struct bpf_program *p
*/
LIBBPF_API size_t bpf_program__insn_cnt(const struct bpf_program *prog);
LIBBPF_API int bpf_program__load(struct bpf_program *prog, char *license,
__u32 kern_version);
LIBBPF_DEPRECATED_SINCE(0, 6, "use bpf_object__load() instead")
LIBBPF_API int bpf_program__load(struct bpf_program *prog, const char *license, __u32 kern_version);
LIBBPF_API int bpf_program__fd(const struct bpf_program *prog);
LIBBPF_DEPRECATED_SINCE(0, 7, "multi-instance bpf_program support is deprecated")
LIBBPF_API int bpf_program__pin_instance(struct bpf_program *prog,
@ -431,7 +431,6 @@ bpf_program__attach_iter(const struct bpf_program *prog,
* one instance. In this case bpf_program__fd(prog) is equal to
* bpf_program__nth_fd(prog, 0).
*/
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__insns() for getting bpf_program instructions")
struct bpf_prog_prep_result {
/*
* If not NULL, load new instruction array.
@ -494,6 +493,9 @@ LIBBPF_API void
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
LIBBPF_API __u32 bpf_program__flags(const struct bpf_program *prog);
LIBBPF_API int bpf_program__set_extra_flags(struct bpf_program *prog, __u32 extra_flags);
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
const char *attach_func_name);
@ -676,8 +678,9 @@ struct bpf_prog_load_attr {
LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
struct bpf_object **pobj, int *prog_fd);
LIBBPF_API int bpf_prog_load(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_object__open() and bpf_object__load() instead")
LIBBPF_API int bpf_prog_load_deprecated(const char *file, enum bpf_prog_type type,
struct bpf_object **pobj, int *prog_fd);
/* XDP related API */
struct xdp_link_info {
@ -775,18 +778,52 @@ typedef void (*perf_buffer_lost_fn)(void *ctx, int cpu, __u64 cnt);
/* common use perf buffer options */
struct perf_buffer_opts {
/* if specified, sample_cb is called for each sample */
perf_buffer_sample_fn sample_cb;
/* if specified, lost_cb is called for each batch of lost samples */
perf_buffer_lost_fn lost_cb;
/* ctx is provided to sample_cb and lost_cb */
void *ctx;
union {
size_t sz;
struct { /* DEPRECATED: will be removed in v1.0 */
/* if specified, sample_cb is called for each sample */
perf_buffer_sample_fn sample_cb;
/* if specified, lost_cb is called for each batch of lost samples */
perf_buffer_lost_fn lost_cb;
/* ctx is provided to sample_cb and lost_cb */
void *ctx;
};
};
};
#define perf_buffer_opts__last_field sz
/**
* @brief **perf_buffer__new()** creates BPF perfbuf manager for a specified
* BPF_PERF_EVENT_ARRAY map
* @param map_fd FD of BPF_PERF_EVENT_ARRAY BPF map that will be used by BPF
* code to send data over to user-space
* @param page_cnt number of memory pages allocated for each per-CPU buffer
* @param sample_cb function called on each received data record
* @param lost_cb function called when record loss has occurred
* @param ctx user-provided extra context passed into *sample_cb* and *lost_cb*
* @return a new instance of struct perf_buffer on success, NULL on error with
* *errno* containing an error code
*/
LIBBPF_API struct perf_buffer *
perf_buffer__new(int map_fd, size_t page_cnt,
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
const struct perf_buffer_opts *opts);
LIBBPF_API struct perf_buffer *
perf_buffer__new_v0_6_0(int map_fd, size_t page_cnt,
perf_buffer_sample_fn sample_cb, perf_buffer_lost_fn lost_cb, void *ctx,
const struct perf_buffer_opts *opts);
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new() instead")
struct perf_buffer *perf_buffer__new_deprecated(int map_fd, size_t page_cnt,
const struct perf_buffer_opts *opts);
#define perf_buffer__new(...) ___libbpf_overload(___perf_buffer_new, __VA_ARGS__)
#define ___perf_buffer_new6(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts) \
perf_buffer__new(map_fd, page_cnt, sample_cb, lost_cb, ctx, opts)
#define ___perf_buffer_new3(map_fd, page_cnt, opts) \
perf_buffer__new_deprecated(map_fd, page_cnt, opts)
enum bpf_perf_event_ret {
LIBBPF_PERF_EVENT_DONE = 0,
LIBBPF_PERF_EVENT_ERROR = -1,
@ -800,12 +837,21 @@ typedef enum bpf_perf_event_ret
/* raw perf buffer options, giving most power and control */
struct perf_buffer_raw_opts {
/* perf event attrs passed directly into perf_event_open() */
struct perf_event_attr *attr;
/* raw event callback */
perf_buffer_event_fn event_cb;
/* ctx is provided to event_cb */
void *ctx;
union {
struct {
size_t sz;
long :0;
long :0;
};
struct { /* DEPRECATED: will be removed in v1.0 */
/* perf event attrs passed directly into perf_event_open() */
struct perf_event_attr *attr;
/* raw event callback */
perf_buffer_event_fn event_cb;
/* ctx is provided to event_cb */
void *ctx;
};
};
/* if cpu_cnt == 0, open all on all possible CPUs (up to the number of
* max_entries of given PERF_EVENT_ARRAY map)
*/
@ -815,11 +861,28 @@ struct perf_buffer_raw_opts {
/* if cpu_cnt > 0, map_keys specify map keys to set per-CPU FDs for */
int *map_keys;
};
#define perf_buffer_raw_opts__last_field map_keys
LIBBPF_API struct perf_buffer *
perf_buffer__new_raw(int map_fd, size_t page_cnt,
perf_buffer__new_raw(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
perf_buffer_event_fn event_cb, void *ctx,
const struct perf_buffer_raw_opts *opts);
LIBBPF_API struct perf_buffer *
perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt, struct perf_event_attr *attr,
perf_buffer_event_fn event_cb, void *ctx,
const struct perf_buffer_raw_opts *opts);
LIBBPF_API LIBBPF_DEPRECATED_SINCE(0, 7, "use new variant of perf_buffer__new_raw() instead")
struct perf_buffer *perf_buffer__new_raw_deprecated(int map_fd, size_t page_cnt,
const struct perf_buffer_raw_opts *opts);
#define perf_buffer__new_raw(...) ___libbpf_overload(___perf_buffer_new_raw, __VA_ARGS__)
#define ___perf_buffer_new_raw6(map_fd, page_cnt, attr, event_cb, ctx, opts) \
perf_buffer__new_raw(map_fd, page_cnt, attr, event_cb, ctx, opts)
#define ___perf_buffer_new_raw3(map_fd, page_cnt, opts) \
perf_buffer__new_raw_deprecated(map_fd, page_cnt, opts)
LIBBPF_API void perf_buffer__free(struct perf_buffer *pb);
LIBBPF_API int perf_buffer__epoll_fd(const struct perf_buffer *pb);
LIBBPF_API int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms);
@ -918,12 +981,15 @@ struct bpf_prog_info_linear {
__u8 data[];
};
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
LIBBPF_API struct bpf_prog_info_linear *
bpf_program__get_prog_info_linear(int fd, __u64 arrays);
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
LIBBPF_API void
bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear);
LIBBPF_DEPRECATED_SINCE(0, 6, "use a custom linear prog_info wrapper")
LIBBPF_API void
bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear);

View File

@ -395,10 +395,23 @@ LIBBPF_0.6.0 {
bpf_object__next_program;
bpf_object__prev_map;
bpf_object__prev_program;
bpf_prog_load_deprecated;
bpf_prog_load;
bpf_program__flags;
bpf_program__insn_cnt;
bpf_program__insns;
bpf_program__set_extra_flags;
btf__add_btf;
btf__add_decl_tag;
btf__add_type_tag;
btf__dedup;
btf__dedup_deprecated;
btf__raw_data;
btf__type_cnt;
btf_dump__new;
btf_dump__new_deprecated;
perf_buffer__new;
perf_buffer__new_deprecated;
perf_buffer__new_raw;
perf_buffer__new_raw_deprecated;
} LIBBPF_0.5.0;

View File

@ -41,6 +41,18 @@
#define __LIBBPF_MARK_DEPRECATED_0_7(X)
#endif
/* This set of internal macros allows to do "function overloading" based on
* number of arguments provided by used in backwards-compatible way during the
* transition to libbpf 1.0
* It's ugly but necessary evil that will be cleaned up when we get to 1.0.
* See bpf_prog_load() overload for example.
*/
#define ___libbpf_cat(A, B) A ## B
#define ___libbpf_select(NAME, NUM) ___libbpf_cat(NAME, NUM)
#define ___libbpf_nth(_1, _2, _3, _4, _5, _6, N, ...) N
#define ___libbpf_cnt(...) ___libbpf_nth(__VA_ARGS__, 6, 5, 4, 3, 2, 1)
#define ___libbpf_overload(NAME, ...) ___libbpf_select(NAME, ___libbpf_cnt(__VA_ARGS__))(__VA_ARGS__)
/* Helper macro to declare and initialize libbpf options struct
*
* This dance with uninitialized declaration, followed by memset to zero,
@ -54,7 +66,7 @@
* including any extra padding, it with memset() and then assigns initial
* values provided by users in struct initializer-syntax as varargs.
*/
#define DECLARE_LIBBPF_OPTS(TYPE, NAME, ...) \
#define LIBBPF_OPTS(TYPE, NAME, ...) \
struct TYPE NAME = ({ \
memset(&NAME, 0, sizeof(struct TYPE)); \
(struct TYPE) { \

View File

@ -73,6 +73,8 @@
BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz)
#define BTF_TYPE_DECL_TAG_ENC(value, type, component_idx) \
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_DECL_TAG, 0, 0), type), (component_idx)
#define BTF_TYPE_TYPE_TAG_ENC(value, type) \
BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TYPE_TAG, 0, 0), type)
#ifndef likely
#define likely(x) __builtin_expect(!!(x), 1)
@ -276,37 +278,6 @@ int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz);
int libbpf__load_raw_btf(const char *raw_types, size_t types_len,
const char *str_sec, size_t str_len);
struct bpf_prog_load_params {
enum bpf_prog_type prog_type;
enum bpf_attach_type expected_attach_type;
const char *name;
const struct bpf_insn *insns;
size_t insn_cnt;
const char *license;
__u32 kern_version;
__u32 attach_prog_fd;
__u32 attach_btf_obj_fd;
__u32 attach_btf_id;
__u32 prog_ifindex;
__u32 prog_btf_fd;
__u32 prog_flags;
__u32 func_info_rec_size;
const void *func_info;
__u32 func_info_cnt;
__u32 line_info_rec_size;
const void *line_info;
__u32 line_info_cnt;
__u32 log_level;
char *log_buf;
size_t log_buf_sz;
int *fd_array;
};
int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr);
struct bpf_create_map_params {
const char *name;
enum bpf_map_type map_type;

View File

@ -69,6 +69,7 @@ enum libbpf_strict_mode {
LIBBPF_API int libbpf_set_strict_mode(enum libbpf_strict_mode mode);
#define DECLARE_LIBBPF_OPTS LIBBPF_OPTS
#ifdef __cplusplus
} /* extern "C" */

View File

@ -68,21 +68,21 @@ static void
probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
size_t insns_cnt, char *buf, size_t buf_len, __u32 ifindex)
{
struct bpf_load_program_attr xattr = {};
LIBBPF_OPTS(bpf_prog_load_opts, opts);
int fd;
switch (prog_type) {
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
xattr.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
opts.expected_attach_type = BPF_CGROUP_INET4_CONNECT;
break;
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
xattr.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
opts.expected_attach_type = BPF_CGROUP_GETSOCKOPT;
break;
case BPF_PROG_TYPE_SK_LOOKUP:
xattr.expected_attach_type = BPF_SK_LOOKUP;
opts.expected_attach_type = BPF_SK_LOOKUP;
break;
case BPF_PROG_TYPE_KPROBE:
xattr.kern_version = get_kernel_version();
opts.kern_version = get_kernel_version();
break;
case BPF_PROG_TYPE_UNSPEC:
case BPF_PROG_TYPE_SOCKET_FILTER:
@ -115,13 +115,11 @@ probe_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns,
break;
}
xattr.prog_type = prog_type;
xattr.insns = insns;
xattr.insns_cnt = insns_cnt;
xattr.license = "GPL";
xattr.prog_ifindex = ifindex;
opts.prog_ifindex = ifindex;
opts.log_buf = buf;
opts.log_size = buf_len;
fd = bpf_load_program_xattr(&xattr, buf, buf_len);
fd = bpf_prog_load(prog_type, NULL, "GPL", insns, insns_cnt, NULL);
if (fd >= 0)
close(fd);
}

View File

@ -2650,6 +2650,7 @@ static int emit_elf_data_sec(struct bpf_linker *linker, const char *sec_name,
static int finalize_btf(struct bpf_linker *linker)
{
LIBBPF_OPTS(btf_dedup_opts, opts);
struct btf *btf = linker->btf;
const void *raw_data;
int i, j, id, err;
@ -2686,7 +2687,8 @@ static int finalize_btf(struct bpf_linker *linker)
return err;
}
err = btf__dedup(linker->btf, linker->btf_ext, NULL);
opts.btf_ext = linker->btf_ext;
err = btf__dedup(linker->btf, &opts);
if (err) {
pr_warn("BTF dedup failed: %d\n", err);
return err;

View File

@ -364,7 +364,6 @@ int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area,
static enum xsk_prog get_xsk_prog(void)
{
enum xsk_prog detected = XSK_PROG_FALLBACK;
struct bpf_load_program_attr prog_attr;
struct bpf_create_map_attr map_attr;
__u32 size_out, retval, duration;
char data_in = 0, data_out;
@ -375,7 +374,7 @@ static enum xsk_prog get_xsk_prog(void)
BPF_EMIT_CALL(BPF_FUNC_redirect_map),
BPF_EXIT_INSN(),
};
int prog_fd, map_fd, ret;
int prog_fd, map_fd, ret, insn_cnt = ARRAY_SIZE(insns);
memset(&map_attr, 0, sizeof(map_attr));
map_attr.map_type = BPF_MAP_TYPE_XSKMAP;
@ -389,13 +388,7 @@ static enum xsk_prog get_xsk_prog(void)
insns[0].imm = map_fd;
memset(&prog_attr, 0, sizeof(prog_attr));
prog_attr.prog_type = BPF_PROG_TYPE_XDP;
prog_attr.insns = insns;
prog_attr.insns_cnt = ARRAY_SIZE(insns);
prog_attr.license = "GPL";
prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
if (prog_fd < 0) {
close(map_fd);
return detected;
@ -495,10 +488,13 @@ static int xsk_load_xdp_prog(struct xsk_socket *xsk)
};
struct bpf_insn *progs[] = {prog, prog_redirect_flags};
enum xsk_prog option = get_xsk_prog();
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.log_buf = log_buf,
.log_size = log_buf_size,
);
prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, progs[option], insns_cnt[option],
"LGPL-2.1 or BSD-2-Clause", 0, log_buf,
log_buf_size);
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "LGPL-2.1 or BSD-2-Clause",
progs[option], insns_cnt[option], &opts);
if (prog_fd < 0) {
pr_warn("BPF log buffer:\n%s", log_buf);
return prog_fd;
@ -725,14 +721,12 @@ static int xsk_link_lookup(int ifindex, __u32 *prog_id, int *link_fd)
static bool xsk_probe_bpf_link(void)
{
DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
.flags = XDP_FLAGS_SKB_MODE);
struct bpf_load_program_attr prog_attr;
LIBBPF_OPTS(bpf_link_create_opts, opts, .flags = XDP_FLAGS_SKB_MODE);
struct bpf_insn insns[2] = {
BPF_MOV64_IMM(BPF_REG_0, XDP_PASS),
BPF_EXIT_INSN()
};
int prog_fd, link_fd = -1;
int prog_fd, link_fd = -1, insn_cnt = ARRAY_SIZE(insns);
int ifindex_lo = 1;
bool ret = false;
int err;
@ -744,13 +738,7 @@ static bool xsk_probe_bpf_link(void)
if (link_fd >= 0)
return true;
memset(&prog_attr, 0, sizeof(prog_attr));
prog_attr.prog_type = BPF_PROG_TYPE_XDP;
prog_attr.insns = insns;
prog_attr.insns_cnt = ARRAY_SIZE(insns);
prog_attr.license = "GPL";
prog_fd = bpf_load_program_xattr(&prog_attr, NULL, 0);
prog_fd = bpf_prog_load(BPF_PROG_TYPE_XDP, NULL, "GPL", insns, insn_cnt, NULL);
if (prog_fd < 0)
return ret;

View File

@ -23,9 +23,8 @@ BPF_GCC ?= $(shell command -v bpf-gcc;)
SAN_CFLAGS ?=
CFLAGS += -g -O0 -rdynamic -Wall $(GENFLAGS) $(SAN_CFLAGS) \
-I$(CURDIR) -I$(INCLUDE_DIR) -I$(GENDIR) -I$(LIBDIR) \
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT) \
-Dbpf_prog_load=bpf_prog_test_load \
-Dbpf_load_program=bpf_test_load_program
-I$(TOOLSINCDIR) -I$(APIDIR) -I$(OUTPUT)
LDFLAGS += $(SAN_CFLAGS)
LDLIBS += -lcap -lelf -lz -lrt -lpthread
# Silence some warnings when compiled with clang
@ -46,10 +45,8 @@ ifneq ($(BPF_GCC),)
TEST_GEN_PROGS += test_progs-bpf_gcc
endif
TEST_GEN_FILES = test_lwt_ip_encap.o \
test_tc_edt.o
TEST_FILES = xsk_prereqs.sh \
$(wildcard progs/btf_dump_test_case_*.c)
TEST_GEN_FILES = test_lwt_ip_encap.o test_tc_edt.o
TEST_FILES = xsk_prereqs.sh $(wildcard progs/btf_dump_test_case_*.c)
# Order correspond to 'make run_tests' order
TEST_PROGS := test_kmod.sh \
@ -108,7 +105,10 @@ endif
OVERRIDE_TARGETS := 1
override define CLEAN
$(call msg,CLEAN)
$(Q)$(RM) -r $(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(EXTRA_CLEAN)
$(Q)$(RM) -r $(TEST_GEN_PROGS)
$(Q)$(RM) -r $(TEST_GEN_PROGS_EXTENDED)
$(Q)$(RM) -r $(TEST_GEN_FILES)
$(Q)$(RM) -r $(EXTRA_CLEAN)
$(Q)$(MAKE) -C bpf_testmod clean
$(Q)$(MAKE) docs-clean
endef
@ -170,7 +170,7 @@ $(OUTPUT)/%:%.c
$(OUTPUT)/urandom_read: urandom_read.c
$(call msg,BINARY,,$@)
$(Q)$(CC) $(LDFLAGS) -o $@ $< $(LDLIBS) -Wl,--build-id=sha1
$(Q)$(CC) $(LDFLAGS) $< $(LDLIBS) -Wl,--build-id=sha1 -o $@
$(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_testmod/*.[ch])
$(call msg,MOD,,$@)
@ -178,10 +178,6 @@ $(OUTPUT)/bpf_testmod.ko: $(VMLINUX_BTF) $(wildcard bpf_testmod/Makefile bpf_tes
$(Q)$(MAKE) $(submake_extras) -C bpf_testmod
$(Q)cp bpf_testmod/bpf_testmod.ko $@
$(OUTPUT)/test_stub.o: test_stub.c $(BPFOBJ)
$(call msg,CC,,$@)
$(Q)$(CC) -c $(CFLAGS) -o $@ $<
DEFAULT_BPFTOOL := $(HOST_SCRATCH_DIR)/sbin/bpftool
$(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
@ -194,18 +190,24 @@ $(OUTPUT)/runqslower: $(BPFOBJ) | $(DEFAULT_BPFTOOL) $(RUNQSLOWER_OUTPUT)
TEST_GEN_PROGS_EXTENDED += $(DEFAULT_BPFTOOL)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(BPFOBJ)
$(TEST_GEN_PROGS) $(TEST_GEN_PROGS_EXTENDED): $(BPFOBJ)
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_sock: cgroup_helpers.c
$(OUTPUT)/test_sock_addr: cgroup_helpers.c
$(OUTPUT)/test_sockmap: cgroup_helpers.c
$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c
$(OUTPUT)/test_sock_fields: cgroup_helpers.c
$(OUTPUT)/test_sysctl: cgroup_helpers.c
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_sock: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_sock_addr: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_sockmap: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_tcpnotify_user: cgroup_helpers.c trace_helpers.c testing_helpers.o
$(OUTPUT)/get_cgroup_id_user: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_cgroup_storage: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_sock_fields: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_sysctl: cgroup_helpers.c testing_helpers.o
$(OUTPUT)/test_tag: testing_helpers.o
$(OUTPUT)/test_lirc_mode2_user: testing_helpers.o
$(OUTPUT)/xdping: testing_helpers.o
$(OUTPUT)/flow_dissector_load: testing_helpers.o
$(OUTPUT)/test_maps: testing_helpers.o
$(OUTPUT)/test_verifier: testing_helpers.o
BPFTOOL ?= $(DEFAULT_BPFTOOL)
$(DEFAULT_BPFTOOL): $(wildcard $(BPFTOOLDIR)/*.[ch] $(BPFTOOLDIR)/Makefile) \
@ -231,16 +233,16 @@ docs-clean:
prefix= OUTPUT=$(OUTPUT)/ DESTDIR=$(OUTPUT)/ $@
$(BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
../../../include/uapi/linux/bpf.h \
$(APIDIR)/linux/bpf.h \
| $(BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) OUTPUT=$(BUILD_DIR)/libbpf/ \
EXTRA_CFLAGS='-g -O0' \
DESTDIR=$(SCRATCH_DIR) prefix= all install_headers
ifneq ($(BPFOBJ),$(HOST_BPFOBJ))
$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
../../../include/uapi/linux/bpf.h \
| $(HOST_BUILD_DIR)/libbpf
$(HOST_BPFOBJ): $(wildcard $(BPFDIR)/*.[ch] $(BPFDIR)/Makefile) \
$(APIDIR)/linux/bpf.h \
| $(HOST_BUILD_DIR)/libbpf
$(Q)$(MAKE) $(submake_extras) -C $(BPFDIR) \
EXTRA_CFLAGS='-g -O0' \
OUTPUT=$(HOST_BUILD_DIR)/libbpf/ CC=$(HOSTCC) LD=$(HOSTLD) \
@ -304,12 +306,12 @@ $(OUTPUT)/flow_dissector_load.o: flow_dissector_load.h
# $3 - CFLAGS
define CLANG_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
$(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v3
$(Q)$(CLANG) $3 -O2 -target bpf -c $1 -mcpu=v3 -o $2
endef
# Similar to CLANG_BPF_BUILD_RULE, but with disabled alu32
define CLANG_NOALU32_BPF_BUILD_RULE
$(call msg,CLNG-BPF,$(TRUNNER_BINARY),$2)
$(Q)$(CLANG) $3 -O2 -target bpf -c $1 -o $2 -mcpu=v2
$(Q)$(CLANG) $3 -O2 -target bpf -c $1 -mcpu=v2 -o $2
endef
# Build BPF object using GCC
define GCC_BPF_BUILD_RULE
@ -471,13 +473,12 @@ TRUNNER_TESTS_DIR := prog_tests
TRUNNER_BPF_PROGS_DIR := progs
TRUNNER_EXTRA_SOURCES := test_progs.c cgroup_helpers.c trace_helpers.c \
network_helpers.c testing_helpers.c \
btf_helpers.c flow_dissector_load.h
btf_helpers.c flow_dissector_load.h
TRUNNER_EXTRA_FILES := $(OUTPUT)/urandom_read $(OUTPUT)/bpf_testmod.ko \
ima_setup.sh \
$(wildcard progs/btf_dump_test_case_*.c)
TRUNNER_BPF_BUILD_RULE := CLANG_BPF_BUILD_RULE
TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS)
TRUNNER_BPF_CFLAGS += -DENABLE_ATOMICS_TESTS
TRUNNER_BPF_CFLAGS := $(BPF_CFLAGS) $(CLANG_CFLAGS) -DENABLE_ATOMICS_TESTS
$(eval $(call DEFINE_TEST_RUNNER,test_progs))
# Define test_progs-no_alu32 test runner.
@ -539,7 +540,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \
$(OUTPUT)/bench_ringbufs.o \
$(OUTPUT)/bench_bloom_filter_map.o
$(call msg,BINARY,,$@)
$(Q)$(CC) $(LDFLAGS) -o $@ $(filter %.a %.o,$^) $(LDLIBS)
$(Q)$(CC) $(LDFLAGS) $(filter %.a %.o,$^) $(LDLIBS) -o $@
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \

View File

@ -204,16 +204,17 @@ __ https://reviews.llvm.org/D93563
btf_tag test and Clang version
==============================
The btf_tag selftest require LLVM support to recognize the btf_decl_tag attribute.
It was introduced in `Clang 14`__.
The btf_tag selftest requires LLVM support to recognize the btf_decl_tag and
btf_type_tag attributes. They are introduced in `Clang 14` [0_, 1_].
Without it, the btf_tag selftest will be skipped and you will observe:
Without them, the btf_tag selftest will be skipped and you will observe:
.. code-block:: console
#<test_num> btf_tag:SKIP
__ https://reviews.llvm.org/D111588
.. _0: https://reviews.llvm.org/D111588
.. _1: https://reviews.llvm.org/D111199
Clang dependencies for static linking tests
===========================================

View File

@ -63,29 +63,34 @@ static const struct argp_option opts[] = {
static error_t parse_arg(int key, char *arg, struct argp_state *state)
{
long ret;
switch (key) {
case ARG_NR_ENTRIES:
args.nr_entries = strtol(arg, NULL, 10);
if (args.nr_entries == 0) {
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > UINT_MAX) {
fprintf(stderr, "Invalid nr_entries count.");
argp_usage(state);
}
args.nr_entries = ret;
break;
case ARG_NR_HASH_FUNCS:
args.nr_hash_funcs = strtol(arg, NULL, 10);
if (args.nr_hash_funcs == 0 || args.nr_hash_funcs > 15) {
ret = strtol(arg, NULL, 10);
if (ret < 1 || ret > 15) {
fprintf(stderr,
"The bloom filter must use 1 to 15 hash functions.");
argp_usage(state);
}
args.nr_hash_funcs = ret;
break;
case ARG_VALUE_SIZE:
args.value_size = strtol(arg, NULL, 10);
if (args.value_size < 2 || args.value_size > 256) {
ret = strtol(arg, NULL, 10);
if (ret < 2 || ret > 256) {
fprintf(stderr,
"Invalid value size. Must be between 2 and 256 bytes");
argp_usage(state);
}
args.value_size = ret;
break;
default:
return ARGP_ERR_UNKNOWN;

View File

@ -394,11 +394,6 @@ static void perfbuf_libbpf_setup()
{
struct perfbuf_libbpf_ctx *ctx = &perfbuf_libbpf_ctx;
struct perf_event_attr attr;
struct perf_buffer_raw_opts pb_opts = {
.event_cb = perfbuf_process_sample_raw,
.ctx = (void *)(long)0,
.attr = &attr,
};
struct bpf_link *link;
ctx->skel = perfbuf_setup_skeleton();
@ -423,7 +418,8 @@ static void perfbuf_libbpf_setup()
}
ctx->perfbuf = perf_buffer__new_raw(bpf_map__fd(ctx->skel->maps.perfbuf),
args.perfbuf_sz, &pb_opts);
args.perfbuf_sz, &attr,
perfbuf_process_sample_raw, NULL, NULL);
if (!ctx->perfbuf) {
fprintf(stderr, "failed to create perfbuf\n");
exit(1);

View File

@ -25,11 +25,12 @@ static const char * const btf_kind_str_mapping[] = {
[BTF_KIND_DATASEC] = "DATASEC",
[BTF_KIND_FLOAT] = "FLOAT",
[BTF_KIND_DECL_TAG] = "DECL_TAG",
[BTF_KIND_TYPE_TAG] = "TYPE_TAG",
};
static const char *btf_kind_str(__u16 kind)
{
if (kind > BTF_KIND_DECL_TAG)
if (kind > BTF_KIND_TYPE_TAG)
return "UNKNOWN";
return btf_kind_str_mapping[kind];
}
@ -109,6 +110,7 @@ int fprintf_btf_type_raw(FILE *out, const struct btf *btf, __u32 id)
case BTF_KIND_VOLATILE:
case BTF_KIND_RESTRICT:
case BTF_KIND_TYPEDEF:
case BTF_KIND_TYPE_TAG:
fprintf(out, " type_id=%u", t->type);
break;
case BTF_KIND_ARRAY: {
@ -238,7 +240,6 @@ const char *btf_type_c_dump(const struct btf *btf)
static char buf[16 * 1024];
FILE *buf_file;
struct btf_dump *d = NULL;
struct btf_dump_opts opts = {};
int err, i;
buf_file = fmemopen(buf, sizeof(buf) - 1, "w");
@ -247,22 +248,26 @@ const char *btf_type_c_dump(const struct btf *btf)
return NULL;
}
opts.ctx = buf_file;
d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
d = btf_dump__new(btf, btf_dump_printf, buf_file, NULL);
if (libbpf_get_error(d)) {
fprintf(stderr, "Failed to create btf_dump instance: %ld\n", libbpf_get_error(d));
return NULL;
goto err_out;
}
for (i = 1; i < btf__type_cnt(btf); i++) {
err = btf_dump__dump_type(d, i);
if (err) {
fprintf(stderr, "Failed to dump type [%d]: %d\n", i, err);
return NULL;
goto err_out;
}
}
btf_dump__free(d);
fflush(buf_file);
fclose(buf_file);
return buf;
err_out:
btf_dump__free(d);
fclose(buf_file);
return NULL;
}

View File

@ -4,6 +4,7 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
#include "testing_helpers.h"
static inline int bpf_flow_load(struct bpf_object **obj,
const char *path,
@ -18,7 +19,7 @@ static inline int bpf_flow_load(struct bpf_object **obj,
int prog_array_fd;
int ret, fd, i;
ret = bpf_prog_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
ret = bpf_prog_test_load(path, BPF_PROG_TYPE_FLOW_DISSECTOR, obj,
prog_fd);
if (ret)
return ret;

View File

@ -19,6 +19,7 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
#include "testing_helpers.h"
#include "bpf_rlimit.h"
#define CHECK(condition, tag, format...) ({ \
@ -66,8 +67,8 @@ int main(int argc, char **argv)
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");

View File

@ -594,6 +594,12 @@ static int do_test_single(struct bpf_align_test *test)
struct bpf_insn *prog = test->insns;
int prog_type = test->prog_type;
char bpf_vlog_copy[32768];
LIBBPF_OPTS(bpf_prog_load_opts, opts,
.prog_flags = BPF_F_STRICT_ALIGNMENT,
.log_buf = bpf_vlog,
.log_size = sizeof(bpf_vlog),
.log_level = 2,
);
const char *line_ptr;
int cur_line = -1;
int prog_len, i;
@ -601,9 +607,8 @@ static int do_test_single(struct bpf_align_test *test)
int ret;
prog_len = probe_filter_length(prog);
fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
prog, prog_len, BPF_F_STRICT_ALIGNMENT,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
prog, prog_len, &opts);
if (fd_prog < 0 && test->result != REJECT) {
printf("Failed to load program.\n");
printf("%s", bpf_vlog);

View File

@ -699,14 +699,13 @@ static void test_bpf_percpu_hash_map(void)
char buf[64];
void *val;
val = malloc(8 * bpf_num_possible_cpus());
skel = bpf_iter_bpf_percpu_hash_map__open();
if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__open",
"skeleton open failed\n"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_hash_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_percpu_hash_map__load",
@ -770,6 +769,7 @@ free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_hash_map__destroy(skel);
free(val);
}
static void test_bpf_array_map(void)
@ -870,14 +870,13 @@ static void test_bpf_percpu_array_map(void)
void *val;
int len;
val = malloc(8 * bpf_num_possible_cpus());
skel = bpf_iter_bpf_percpu_array_map__open();
if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__open",
"skeleton open failed\n"))
return;
skel->rodata->num_cpus = bpf_num_possible_cpus();
val = malloc(8 * bpf_num_possible_cpus());
err = bpf_iter_bpf_percpu_array_map__load(skel);
if (CHECK(!skel, "bpf_iter_bpf_percpu_array_map__load",
@ -933,6 +932,7 @@ free_link:
bpf_link__destroy(link);
out:
bpf_iter_bpf_percpu_array_map__destroy(skel);
free(val);
}
/* An iterator program deletes all local storage in a map. */

View File

@ -48,7 +48,7 @@ void serial_test_bpf_obj_id(void)
bzero(zeros, sizeof(zeros));
for (i = 0; i < nr_iters; i++) {
now = time(NULL);
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT,
&objs[i], &prog_fds[i]);
/* test_obj_id.o is a dumb prog. It should never fail
* to load.

View File

@ -3939,6 +3939,23 @@ static struct btf_raw_test raw_tests[] = {
.btf_load_err = true,
.err_str = "Invalid component_idx",
},
{
.descr = "type_tag test #1",
.raw_types = {
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [2] */
BTF_PTR_ENC(2), /* [3] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag"),
.map_type = BPF_MAP_TYPE_ARRAY,
.map_name = "tag_type_check_btf",
.key_size = sizeof(int),
.value_size = 4,
.key_type_id = 1,
.value_type_id = 1,
.max_entries = 1,
},
}; /* struct btf_raw_test raw_tests[] */
@ -4046,11 +4063,9 @@ static void *btf_raw_create(const struct btf_header *hdr,
next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
done:
free(strs_idx);
if (err) {
if (raw_btf)
free(raw_btf);
if (strs_idx)
free(strs_idx);
free(raw_btf);
return NULL;
}
return raw_btf;
@ -6629,7 +6644,7 @@ struct btf_dedup_test {
struct btf_dedup_opts opts;
};
const struct btf_dedup_test dedup_tests[] = {
static struct btf_dedup_test dedup_tests[] = {
{
.descr = "dedup: unused strings filtering",
@ -6649,9 +6664,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0long"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: strings deduplication",
@ -6674,9 +6686,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0long int"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: struct example #1",
@ -6757,9 +6766,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: struct <-> fwd resolution w/ hash collision",
@ -6802,8 +6808,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0s\0x"),
},
.opts = {
.dont_resolve_fwds = false,
.dedup_table_size = 1, /* force hash collisions */
.force_collisions = true, /* force hash collisions */
},
},
{
@ -6849,8 +6854,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0s\0x"),
},
.opts = {
.dont_resolve_fwds = false,
.dedup_table_size = 1, /* force hash collisions */
.force_collisions = true, /* force hash collisions */
},
},
{
@ -6874,15 +6878,16 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"),
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
},
.expect = {
.raw_types = {
@ -6903,18 +6908,16 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_RESTRICT_ENC(8), /* [11] restrict */
BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 18),
BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */
BTF_DECL_TAG_ENC(NAME_TBD, 13, -1), /* [15] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 13, 1), /* [16] decl_tag */
BTF_DECL_TAG_ENC(NAME_TBD, 7, -1), /* [17] decl_tag */
BTF_TYPE_TAG_ENC(NAME_TBD, 8), /* [18] type_tag */
BTF_END_RAW,
},
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q"),
},
.opts = {
.dont_resolve_fwds = false,
BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P\0Q\0R"),
},
},
{
@ -6967,9 +6970,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0int\0some other int\0float"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: enum fwd resolution",
@ -7011,9 +7011,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0e1\0e1_val\0e2\0e2_val"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: datasec and vars pass-through",
@ -7056,8 +7053,7 @@ const struct btf_dedup_test dedup_tests[] = {
BTF_STR_SEC("\0.bss\0t"),
},
.opts = {
.dont_resolve_fwds = false,
.dedup_table_size = 1
.force_collisions = true
},
},
{
@ -7101,9 +7097,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: func/func_param tags",
@ -7154,9 +7147,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: struct/struct_member tags",
@ -7202,9 +7192,6 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"),
},
.opts = {
.dont_resolve_fwds = false,
},
},
{
.descr = "dedup: typedef tags",
@ -7235,8 +7222,134 @@ const struct btf_dedup_test dedup_tests[] = {
},
BTF_STR_SEC("\0t\0tag1\0tag2\0tag3"),
},
.opts = {
.dont_resolve_fwds = false,
},
{
.descr = "dedup: btf_type_tag #1",
.input = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [5] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 5), /* [6] */
BTF_PTR_ENC(6), /* [7] */
/* ptr -> tag1 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [8] */
BTF_PTR_ENC(8), /* [9] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
.expect = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag1 -> int */
BTF_PTR_ENC(2), /* [5] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
},
{
.descr = "dedup: btf_type_tag #2",
.input = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
.expect = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
},
{
.descr = "dedup: btf_type_tag #3",
.input = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag1 -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
BTF_PTR_ENC(6), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
.expect = {
.raw_types = {
/* ptr -> tag2 -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 2), /* [3] */
BTF_PTR_ENC(3), /* [4] */
/* ptr -> tag1 -> tag2 -> int */
BTF_TYPE_TAG_ENC(NAME_NTH(2), 1), /* [5] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 5), /* [6] */
BTF_PTR_ENC(6), /* [7] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1\0tag2"),
},
},
{
.descr = "dedup: btf_type_tag #4",
.input = {
.raw_types = {
/* ptr -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_PTR_ENC(2), /* [3] */
/* ptr -> tag1 -> long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1"),
},
.expect = {
.raw_types = {
/* ptr -> tag1 -> int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 1), /* [2] */
BTF_PTR_ENC(2), /* [3] */
/* ptr -> tag1 -> long */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 64, 8), /* [4] */
BTF_TYPE_TAG_ENC(NAME_NTH(1), 4), /* [5] */
BTF_PTR_ENC(5), /* [6] */
BTF_END_RAW,
},
BTF_STR_SEC("\0tag1"),
},
},
@ -7257,6 +7370,7 @@ static int btf_type_size(const struct btf_type *t)
case BTF_KIND_TYPEDEF:
case BTF_KIND_FUNC:
case BTF_KIND_FLOAT:
case BTF_KIND_TYPE_TAG:
return base_size;
case BTF_KIND_INT:
return base_size + sizeof(__u32);
@ -7295,7 +7409,7 @@ static void dump_btf_strings(const char *strs, __u32 len)
static void do_test_dedup(unsigned int test_num)
{
const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
struct btf_dedup_test *test = &dedup_tests[test_num - 1];
__u32 test_nr_types, expect_nr_types, test_btf_size, expect_btf_size;
const struct btf_header *test_hdr, *expect_hdr;
struct btf *test_btf = NULL, *expect_btf = NULL;
@ -7339,7 +7453,8 @@ static void do_test_dedup(unsigned int test_num)
goto done;
}
err = btf__dedup(test_btf, NULL, &test->opts);
test->opts.sz = sizeof(test->opts);
err = btf__dedup(test_btf, &test->opts);
if (CHECK(err, "btf_dedup failed errno:%d", err)) {
err = -1;
goto done;

View File

@ -92,7 +92,7 @@ struct s2 {\n\
int *f3;\n\
};\n\n", "c_dump");
err = btf__dedup(btf2, NULL, NULL);
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@ -186,7 +186,7 @@ static void test_split_fwd_resolve() {
"\t'f1' type_id=7 bits_offset=0\n"
"\t'f2' type_id=9 bits_offset=64");
err = btf__dedup(btf2, NULL, NULL);
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;
@ -283,7 +283,7 @@ static void test_split_struct_duped() {
"[13] STRUCT 's3' size=8 vlen=1\n"
"\t'f1' type_id=12 bits_offset=0");
err = btf__dedup(btf2, NULL, NULL);
err = btf__dedup(btf2, NULL);
if (!ASSERT_OK(err, "btf_dedup"))
goto cleanup;

View File

@ -13,25 +13,23 @@ static struct btf_dump_test_case {
const char *name;
const char *file;
bool known_ptr_sz;
struct btf_dump_opts opts;
} btf_dump_test_cases[] = {
{"btf_dump: syntax", "btf_dump_test_case_syntax", true, {}},
{"btf_dump: ordering", "btf_dump_test_case_ordering", false, {}},
{"btf_dump: padding", "btf_dump_test_case_padding", true, {}},
{"btf_dump: packing", "btf_dump_test_case_packing", true, {}},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", true, {}},
{"btf_dump: multidim", "btf_dump_test_case_multidim", false, {}},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", false, {}},
{"btf_dump: syntax", "btf_dump_test_case_syntax", true},
{"btf_dump: ordering", "btf_dump_test_case_ordering", false},
{"btf_dump: padding", "btf_dump_test_case_padding", true},
{"btf_dump: packing", "btf_dump_test_case_packing", true},
{"btf_dump: bitfields", "btf_dump_test_case_bitfields", true},
{"btf_dump: multidim", "btf_dump_test_case_multidim", false},
{"btf_dump: namespacing", "btf_dump_test_case_namespacing", false},
};
static int btf_dump_all_types(const struct btf *btf,
const struct btf_dump_opts *opts)
static int btf_dump_all_types(const struct btf *btf, void *ctx)
{
size_t type_cnt = btf__type_cnt(btf);
struct btf_dump *d;
int err = 0, id;
d = btf_dump__new(btf, NULL, opts, btf_dump_printf);
d = btf_dump__new(btf, btf_dump_printf, ctx, NULL);
err = libbpf_get_error(d);
if (err)
return err;
@ -88,8 +86,7 @@ static int test_btf_dump_case(int n, struct btf_dump_test_case *t)
goto done;
}
t->opts.ctx = f;
err = btf_dump_all_types(btf, &t->opts);
err = btf_dump_all_types(btf, f);
fclose(f);
close(fd);
if (CHECK(err, "btf_dump", "failure during C dumping: %d\n", err)) {
@ -137,7 +134,6 @@ static void test_btf_dump_incremental(void)
{
struct btf *btf = NULL;
struct btf_dump *d = NULL;
struct btf_dump_opts opts;
int id, err, i;
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
@ -146,8 +142,7 @@ static void test_btf_dump_incremental(void)
btf = btf__new_empty();
if (!ASSERT_OK_PTR(btf, "new_empty"))
goto err_out;
opts.ctx = dump_buf_file;
d = btf_dump__new(btf, NULL, &opts, btf_dump_printf);
d = btf_dump__new(btf, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK(libbpf_get_error(d), "btf_dump__new"))
goto err_out;
@ -814,26 +809,28 @@ static void test_btf_datasec(struct btf *btf, struct btf_dump *d, char *str,
static void test_btf_dump_datasec_data(char *str)
{
struct btf *btf = btf__parse("xdping_kern.o", NULL);
struct btf_dump_opts opts = { .ctx = str };
struct btf *btf;
char license[4] = "GPL";
struct btf_dump *d;
btf = btf__parse("xdping_kern.o", NULL);
if (!ASSERT_OK_PTR(btf, "xdping_kern.o BTF not found"))
return;
d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf);
d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
return;
goto out;
test_btf_datasec(btf, d, str, "license",
"SEC(\"license\") char[4] _license = (char[4])['G','P','L',];",
license, sizeof(license));
out:
btf_dump__free(d);
btf__free(btf);
}
void test_btf_dump() {
char str[STRSIZE];
struct btf_dump_opts opts = { .ctx = str };
struct btf_dump *d;
struct btf *btf;
int i;
@ -853,7 +850,7 @@ void test_btf_dump() {
if (!ASSERT_OK_PTR(btf, "no kernel BTF found"))
return;
d = btf_dump__new(btf, NULL, &opts, btf_dump_snprintf);
d = btf_dump__new(btf, btf_dump_snprintf, str, NULL);
if (!ASSERT_OK_PTR(d, "could not create BTF dump"))
return;

View File

@ -13,7 +13,6 @@ static void btf_dump_printf(void *ctx, const char *fmt, va_list args)
}
void test_btf_split() {
struct btf_dump_opts opts;
struct btf_dump *d = NULL;
const struct btf_type *t;
struct btf *btf1, *btf2;
@ -68,8 +67,7 @@ void test_btf_split() {
dump_buf_file = open_memstream(&dump_buf, &dump_buf_sz);
if (!ASSERT_OK_PTR(dump_buf_file, "dump_memstream"))
return;
opts.ctx = dump_buf_file;
d = btf_dump__new(btf2, NULL, &opts, btf_dump_printf);
d = btf_dump__new(btf2, btf_dump_printf, dump_buf_file, NULL);
if (!ASSERT_OK_PTR(d, "btf_dump__new"))
goto cleanup;
for (i = 1; i < btf__type_cnt(btf2); i++) {

View File

@ -1,20 +1,50 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include "tag.skel.h"
#include "btf_decl_tag.skel.h"
void test_btf_tag(void)
/* struct btf_type_tag_test is referenced in btf_type_tag.skel.h */
struct btf_type_tag_test {
int **p;
};
#include "btf_type_tag.skel.h"
static void test_btf_decl_tag(void)
{
struct tag *skel;
struct btf_decl_tag *skel;
skel = tag__open_and_load();
if (!ASSERT_OK_PTR(skel, "btf_tag"))
skel = btf_decl_tag__open_and_load();
if (!ASSERT_OK_PTR(skel, "btf_decl_tag"))
return;
if (skel->rodata->skip_tests) {
printf("%s:SKIP: btf_tag attribute not supported", __func__);
printf("%s:SKIP: btf_decl_tag attribute not supported", __func__);
test__skip();
}
tag__destroy(skel);
btf_decl_tag__destroy(skel);
}
static void test_btf_type_tag(void)
{
struct btf_type_tag *skel;
skel = btf_type_tag__open_and_load();
if (!ASSERT_OK_PTR(skel, "btf_type_tag"))
return;
if (skel->rodata->skip_tests) {
printf("%s:SKIP: btf_type_tag attribute not supported", __func__);
test__skip();
}
btf_type_tag__destroy(skel);
}
void test_btf_tag(void)
{
if (test__start_subtest("btf_decl_tag"))
test_btf_decl_tag();
if (test__start_subtest("btf_type_tag"))
test_btf_type_tag();
}

View File

@ -297,6 +297,16 @@ static void gen_btf(struct btf *btf)
ASSERT_EQ(btf_decl_tag(t)->component_idx, 1, "tag_component_idx");
ASSERT_STREQ(btf_type_raw_dump(btf, 19),
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1", "raw_dump");
/* TYPE_TAG */
id = btf__add_type_tag(btf, "tag1", 1);
ASSERT_EQ(id, 20, "tag_id");
t = btf__type_by_id(btf, 20);
ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value");
ASSERT_EQ(btf_kind(t), BTF_KIND_TYPE_TAG, "tag_kind");
ASSERT_EQ(t->type, 1, "tag_type");
ASSERT_STREQ(btf_type_raw_dump(btf, 20),
"[20] TYPE_TAG 'tag1' type_id=1", "raw_dump");
}
static void test_btf_add()
@ -337,7 +347,8 @@ static void test_btf_add()
"[17] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1");
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
"[20] TYPE_TAG 'tag1' type_id=1");
btf__free(btf);
}
@ -359,7 +370,7 @@ static void test_btf_add_btf()
gen_btf(btf2);
id = btf__add_btf(btf1, btf2);
if (!ASSERT_EQ(id, 20, "id"))
if (!ASSERT_EQ(id, 21, "id"))
goto cleanup;
VALIDATE_RAW_BTF(
@ -391,35 +402,37 @@ static void test_btf_add_btf()
"\ttype_id=1 offset=4 size=8",
"[18] DECL_TAG 'tag1' type_id=16 component_idx=-1",
"[19] DECL_TAG 'tag2' type_id=14 component_idx=1",
"[20] TYPE_TAG 'tag1' type_id=1",
/* types appended from the second BTF */
"[20] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[21] PTR '(anon)' type_id=20",
"[22] CONST '(anon)' type_id=24",
"[23] VOLATILE '(anon)' type_id=22",
"[24] RESTRICT '(anon)' type_id=23",
"[25] ARRAY '(anon)' type_id=21 index_type_id=20 nr_elems=10",
"[26] STRUCT 's1' size=8 vlen=2\n"
"\t'f1' type_id=20 bits_offset=0\n"
"\t'f2' type_id=20 bits_offset=32 bitfield_size=16",
"[27] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=20 bits_offset=0 bitfield_size=16",
"[28] ENUM 'e1' size=4 vlen=2\n"
"[21] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED",
"[22] PTR '(anon)' type_id=21",
"[23] CONST '(anon)' type_id=25",
"[24] VOLATILE '(anon)' type_id=23",
"[25] RESTRICT '(anon)' type_id=24",
"[26] ARRAY '(anon)' type_id=22 index_type_id=21 nr_elems=10",
"[27] STRUCT 's1' size=8 vlen=2\n"
"\t'f1' type_id=21 bits_offset=0\n"
"\t'f2' type_id=21 bits_offset=32 bitfield_size=16",
"[28] UNION 'u1' size=8 vlen=1\n"
"\t'f1' type_id=21 bits_offset=0 bitfield_size=16",
"[29] ENUM 'e1' size=4 vlen=2\n"
"\t'v1' val=1\n"
"\t'v2' val=2",
"[29] FWD 'struct_fwd' fwd_kind=struct",
"[30] FWD 'union_fwd' fwd_kind=union",
"[31] ENUM 'enum_fwd' size=4 vlen=0",
"[32] TYPEDEF 'typedef1' type_id=20",
"[33] FUNC 'func1' type_id=34 linkage=global",
"[34] FUNC_PROTO '(anon)' ret_type_id=20 vlen=2\n"
"\t'p1' type_id=20\n"
"\t'p2' type_id=21",
"[35] VAR 'var1' type_id=20, linkage=global-alloc",
"[36] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=20 offset=4 size=8",
"[37] DECL_TAG 'tag1' type_id=35 component_idx=-1",
"[38] DECL_TAG 'tag2' type_id=33 component_idx=1");
"[30] FWD 'struct_fwd' fwd_kind=struct",
"[31] FWD 'union_fwd' fwd_kind=union",
"[32] ENUM 'enum_fwd' size=4 vlen=0",
"[33] TYPEDEF 'typedef1' type_id=21",
"[34] FUNC 'func1' type_id=35 linkage=global",
"[35] FUNC_PROTO '(anon)' ret_type_id=21 vlen=2\n"
"\t'p1' type_id=21\n"
"\t'p2' type_id=22",
"[36] VAR 'var1' type_id=21, linkage=global-alloc",
"[37] DATASEC 'datasec1' size=12 vlen=1\n"
"\ttype_id=21 offset=4 size=8",
"[38] DECL_TAG 'tag1' type_id=36 component_idx=-1",
"[39] DECL_TAG 'tag2' type_id=34 component_idx=1",
"[40] TYPE_TAG 'tag1' type_id=21");
cleanup:
btf__free(btf1);

View File

@ -16,7 +16,7 @@ static int prog_load(void)
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}

View File

@ -66,7 +66,7 @@ static int prog_load_cnt(int verdict, int val)
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
int ret;
ret = bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);

View File

@ -18,7 +18,7 @@ static int prog_load(int verdict)
};
size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
return bpf_load_program(BPF_PROG_TYPE_CGROUP_SKB,
return bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB,
prog, insns_cnt, "GPL", 0,
bpf_log_buf, BPF_LOG_BUF_SIZE);
}

View File

@ -433,7 +433,7 @@ static int setup_type_id_case_local(struct core_reloc_test_case *test)
static int setup_type_id_case_success(struct core_reloc_test_case *test) {
struct core_reloc_type_id_output *exp = (void *)test->output;
struct btf *targ_btf = btf__parse(test->btf_src_file, NULL);
struct btf *targ_btf;
int err;
err = setup_type_id_case_local(test);

View File

@ -0,0 +1,43 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021, Oracle and/or its affiliates. */
#include <test_progs.h>
/* Test that verifies exception handling is working. fork()
* triggers task_newtask tracepoint; that new task will have a
* NULL pointer task_works, and the associated task->task_works->func
* should not be NULL if task_works itself is non-NULL.
*
* So to verify exception handling we want to see a NULL task_works
* and task_works->func; if we see this we can conclude that the
* exception handler ran when we attempted to dereference task->task_works
* and zeroed the destination register.
*/
#include "exhandler_kern.skel.h"
void test_exhandler(void)
{
int err = 0, duration = 0, status;
struct exhandler_kern *skel;
pid_t cpid;
skel = exhandler_kern__open_and_load();
if (CHECK(!skel, "skel_load", "skeleton failed: %d\n", err))
goto cleanup;
skel->bss->test_pid = getpid();
err = exhandler_kern__attach(skel);
if (!ASSERT_OK(err, "attach"))
goto cleanup;
cpid = fork();
if (!ASSERT_GT(cpid, -1, "fork failed"))
goto cleanup;
if (cpid == 0)
_exit(0);
waitpid(cpid, &status, 0);
ASSERT_NEQ(skel->bss->exception_triggered, 0, "verify exceptions occurred");
cleanup:
exhandler_kern__destroy(skel);
}

View File

@ -65,7 +65,7 @@ static void test_fexit_bpf2bpf_common(const char *obj_file,
int err, tgt_fd, i;
struct btf *btf;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
if (!ASSERT_OK(err, "tgt_prog_load"))
return;
@ -224,7 +224,7 @@ static int test_second_attach(struct bpf_object *obj)
if (CHECK(!prog, "find_prog", "prog %s not found\n", prog_name))
return -ENOENT;
err = bpf_prog_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
err = bpf_prog_test_load(tgt_obj_file, BPF_PROG_TYPE_UNSPEC,
&tgt_obj, &tgt_fd);
if (CHECK(err, "second_prog_load", "file %s err %d errno %d\n",
tgt_obj_file, err, errno))
@ -274,7 +274,7 @@ static void test_fmod_ret_freplace(void)
__u32 duration = 0;
int err, pkt_fd, attach_prog_fd;
err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
err = bpf_prog_test_load(tgt_name, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",
@ -341,7 +341,7 @@ static void test_obj_load_failure_common(const char *obj_file,
int err, pkt_fd;
__u32 duration = 0;
err = bpf_prog_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
err = bpf_prog_test_load(target_obj_file, BPF_PROG_TYPE_UNSPEC,
&pkt_obj, &pkt_fd);
/* the target prog should load fine */
if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n",

View File

@ -20,34 +20,33 @@ void test_fexit_stress(void)
BPF_EXIT_INSN(),
};
struct bpf_load_program_attr load_attr = {
.prog_type = BPF_PROG_TYPE_TRACING,
.license = "GPL",
.insns = trace_program,
.insns_cnt = sizeof(trace_program) / sizeof(struct bpf_insn),
LIBBPF_OPTS(bpf_prog_load_opts, trace_opts,
.expected_attach_type = BPF_TRACE_FEXIT,
};
.log_buf = error,
.log_size = sizeof(error),
);
const struct bpf_insn skb_program[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
struct bpf_load_program_attr skb_load_attr = {
.prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
.license = "GPL",
.insns = skb_program,
.insns_cnt = sizeof(skb_program) / sizeof(struct bpf_insn),
};
LIBBPF_OPTS(bpf_prog_load_opts, skb_opts,
.log_buf = error,
.log_size = sizeof(error),
);
err = libbpf_find_vmlinux_btf_id("bpf_fentry_test1",
load_attr.expected_attach_type);
trace_opts.expected_attach_type);
if (CHECK(err <= 0, "find_vmlinux_btf_id", "failed: %d\n", err))
goto out;
load_attr.attach_btf_id = err;
trace_opts.attach_btf_id = err;
for (i = 0; i < CNT; i++) {
fexit_fd[i] = bpf_load_program_xattr(&load_attr, error, sizeof(error));
fexit_fd[i] = bpf_prog_load(BPF_PROG_TYPE_TRACING, NULL, "GPL",
trace_program,
sizeof(trace_program) / sizeof(struct bpf_insn),
&trace_opts);
if (CHECK(fexit_fd[i] < 0, "fexit loaded",
"failed: %d errno %d\n", fexit_fd[i], errno))
goto out;
@ -57,7 +56,9 @@ void test_fexit_stress(void)
goto out;
}
filter_fd = bpf_load_program_xattr(&skb_load_attr, error, sizeof(error));
filter_fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
skb_program, sizeof(skb_program) / sizeof(struct bpf_insn),
&skb_opts);
if (CHECK(filter_fd < 0, "test_program_loaded", "failed: %d errno %d\n",
filter_fd, errno))
goto out;

View File

@ -0,0 +1,117 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
#include <test_progs.h>
#include <sys/types.h>
#include <unistd.h>
#include "find_vma.skel.h"
#include "find_vma_fail1.skel.h"
#include "find_vma_fail2.skel.h"
static void test_and_reset_skel(struct find_vma *skel, int expected_find_zero_ret)
{
ASSERT_EQ(skel->bss->found_vm_exec, 1, "found_vm_exec");
ASSERT_EQ(skel->data->find_addr_ret, 0, "find_addr_ret");
ASSERT_EQ(skel->data->find_zero_ret, expected_find_zero_ret, "find_zero_ret");
ASSERT_OK_PTR(strstr(skel->bss->d_iname, "test_progs"), "find_test_progs");
skel->bss->found_vm_exec = 0;
skel->data->find_addr_ret = -1;
skel->data->find_zero_ret = -1;
skel->bss->d_iname[0] = 0;
}
static int open_pe(void)
{
struct perf_event_attr attr = {0};
int pfd;
/* create perf event */
attr.size = sizeof(attr);
attr.type = PERF_TYPE_HARDWARE;
attr.config = PERF_COUNT_HW_CPU_CYCLES;
attr.freq = 1;
attr.sample_freq = 4000;
pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC);
return pfd >= 0 ? pfd : -errno;
}
static void test_find_vma_pe(struct find_vma *skel)
{
struct bpf_link *link = NULL;
volatile int j = 0;
int pfd, i;
pfd = open_pe();
if (pfd < 0) {
if (pfd == -ENOENT || pfd == -EOPNOTSUPP) {
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", __func__);
test__skip();
goto cleanup;
}
if (!ASSERT_GE(pfd, 0, "perf_event_open"))
goto cleanup;
}
link = bpf_program__attach_perf_event(skel->progs.handle_pe, pfd);
if (!ASSERT_OK_PTR(link, "attach_perf_event"))
goto cleanup;
for (i = 0; i < 1000000; ++i)
++j;
test_and_reset_skel(skel, -EBUSY /* in nmi, irq_work is busy */);
cleanup:
bpf_link__destroy(link);
close(pfd);
}
static void test_find_vma_kprobe(struct find_vma *skel)
{
int err;
err = find_vma__attach(skel);
if (!ASSERT_OK(err, "get_branch_snapshot__attach"))
return;
getpgid(skel->bss->target_pid);
test_and_reset_skel(skel, -ENOENT /* could not find vma for ptr 0 */);
}
static void test_illegal_write_vma(void)
{
struct find_vma_fail1 *skel;
skel = find_vma_fail1__open_and_load();
if (!ASSERT_ERR_PTR(skel, "find_vma_fail1__open_and_load"))
find_vma_fail1__destroy(skel);
}
static void test_illegal_write_task(void)
{
struct find_vma_fail2 *skel;
skel = find_vma_fail2__open_and_load();
if (!ASSERT_ERR_PTR(skel, "find_vma_fail2__open_and_load"))
find_vma_fail2__destroy(skel);
}
void serial_test_find_vma(void)
{
struct find_vma *skel;
skel = find_vma__open_and_load();
if (!ASSERT_OK_PTR(skel, "find_vma__open_and_load"))
return;
skel->bss->target_pid = getpid();
skel->bss->addr = (__u64)(uintptr_t)test_find_vma_pe;
test_find_vma_pe(skel);
usleep(100000); /* allow the irq_work to finish */
test_find_vma_kprobe(skel);
find_vma__destroy(skel);
test_illegal_write_vma();
test_illegal_write_task();
}

View File

@ -30,7 +30,7 @@ void serial_test_flow_dissector_load_bytes(void)
/* make sure bpf_skb_load_bytes is not allowed from skb-less context
*/
fd = bpf_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
fd = bpf_test_load_program(BPF_PROG_TYPE_FLOW_DISSECTOR, prog,
ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
CHECK(fd < 0,
"flow_dissector-bpf_skb_load_bytes-load",

View File

@ -47,9 +47,9 @@ static int load_prog(enum bpf_prog_type type)
};
int fd;
fd = bpf_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
fd = bpf_test_load_program(type, prog, ARRAY_SIZE(prog), "GPL", 0, NULL, 0);
if (CHECK_FAIL(fd < 0))
perror("bpf_load_program");
perror("bpf_test_load_program");
return fd;
}

View File

@ -85,7 +85,6 @@ void test_get_stack_raw_tp(void)
const char *file_err = "./test_get_stack_rawtp_err.o";
const char *prog_name = "raw_tracepoint/sys_enter";
int i, err, prog_fd, exp_cnt = MAX_CNT_RAWTP;
struct perf_buffer_opts pb_opts = {};
struct perf_buffer *pb = NULL;
struct bpf_link *link = NULL;
struct timespec tv = {0, 10};
@ -94,11 +93,11 @@ void test_get_stack_raw_tp(void)
struct bpf_map *map;
cpu_set_t cpu_set;
err = bpf_prog_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
err = bpf_prog_test_load(file_err, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err >= 0, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
err = bpf_prog_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "prog_load raw tp", "err %d errno %d\n", err, errno))
return;
@ -124,8 +123,8 @@ void test_get_stack_raw_tp(void)
if (!ASSERT_OK_PTR(link, "attach_raw_tp"))
goto close_prog;
pb_opts.sample_cb = get_stack_print_output;
pb = perf_buffer__new(bpf_map__fd(map), 8, &pb_opts);
pb = perf_buffer__new(bpf_map__fd(map), 8, get_stack_print_output,
NULL, NULL, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;

View File

@ -136,7 +136,7 @@ void test_global_data(void)
struct bpf_object *obj;
int err, prog_fd;
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK(err, "load program", "error %d loading %s\n", err, file))
return;

View File

@ -44,7 +44,7 @@ void test_global_func_args(void)
struct bpf_object *obj;
int err, prog_fd;
err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd);
if (CHECK(err, "load program", "error %d loading %s\n", err, file))
return;

View File

@ -66,7 +66,6 @@ void serial_test_kfree_skb(void)
struct bpf_map *perf_buf_map, *global_data;
struct bpf_program *prog, *fentry, *fexit;
struct bpf_object *obj, *obj2 = NULL;
struct perf_buffer_opts pb_opts = {};
struct perf_buffer *pb = NULL;
int err, kfree_skb_fd;
bool passed = false;
@ -74,7 +73,7 @@ void serial_test_kfree_skb(void)
const int zero = 0;
bool test_ok[2];
err = bpf_prog_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
err = bpf_prog_test_load("./test_pkt_access.o", BPF_PROG_TYPE_SCHED_CLS,
&obj, &tattr.prog_fd);
if (CHECK(err, "prog_load sched cls", "err %d errno %d\n", err, errno))
return;
@ -112,9 +111,8 @@ void serial_test_kfree_skb(void)
goto close_prog;
/* set up perf buffer */
pb_opts.sample_cb = on_sample;
pb_opts.ctx = &passed;
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1, &pb_opts);
pb = perf_buffer__new(bpf_map__fd(perf_buf_map), 1,
on_sample, NULL, &passed, NULL);
if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
goto close_prog;

View File

@ -30,7 +30,7 @@ static void test_l4lb(const char *file)
char buf[128];
u32 *magic = (u32 *)buf;
err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
err = bpf_prog_test_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd);
if (CHECK_FAIL(err))
return;

View File

@ -27,7 +27,7 @@ void test_load_bytes_relative(void)
if (CHECK_FAIL(server_fd < 0))
goto close_cgroup_fd;
err = bpf_prog_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB,
err = bpf_prog_test_load("./load_bytes_relative.o", BPF_PROG_TYPE_CGROUP_SKB,
&obj, &prog_fd);
if (CHECK_FAIL(err))
goto close_server_fd;

Some files were not shown because too many files have changed in this diff Show More