Daniel Borkmann says:

====================
pull-request: bpf-next 2022-03-04

We've added 32 non-merge commits during the last 14 day(s) which contain
a total of 59 files changed, 1038 insertions(+), 473 deletions(-).

The main changes are:

1) Optimize BPF stackmap's build_id retrieval by caching last valid build_id,
   as consecutive stack frames are likely to be in the same VMA and therefore
   have the same build id, from Hao Luo.

2) Several improvements to arm64 BPF JIT, that is, support for JITing
   the atomic[64]_fetch_add, atomic[64]_[fetch_]{and,or,xor} and lastly
   atomic[64]_{xchg|cmpxchg}. Also fix the BTF line info dump for JITed
   programs, from Hou Tao.

3) Optimize generic BPF map batch deletion by only enforcing synchronize_rcu()
   barrier once upon return to user space, from Eric Dumazet.

4) For kernel build parse DWARF and generate BTF through pahole with enabled
   multithreading, from Kui-Feng Lee.

5) BPF verifier usability improvements by making log info more concise and
   replacing inv with scalar type name, from Mykola Lysenko.

6) Two follow-up fixes for BPF prog JIT pack allocator, from Song Liu.

7) Add a new Kconfig to allow for loading kernel modules with non-matching
   BTF type info; their BTF info is then removed on load, from Connor O'Brien.

8) Remove reallocarray() usage from bpftool and switch to libbpf_reallocarray()
   in order to fix compilation errors for older glibc, from Mauricio Vásquez.

9) Fix libbpf to error on conflicting name in BTF when type declaration
   appears before the definition, from Xu Kuohai.

10) Fix issue in BPF preload for in-kernel light skeleton where loaded BPF
    program fds prevent init process from setting up fd 0-2, from Yucong Sun.

11) Fix libbpf reuse of pinned perf RB map when max_entries is auto-determined
    by libbpf, from Stijn Tintel.

12) Several cleanups for libbpf and a fix to enforce perf RB map #pages to be
    non-zero, from Yuntao Wang.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (32 commits)
  bpf: Small BPF verifier log improvements
  libbpf: Add a check to ensure that page_cnt is non-zero
  bpf, x86: Set header->size properly before freeing it
  x86: Disable HAVE_ARCH_HUGE_VMALLOC on 32-bit x86
  bpf, test_run: Fix overflow in XDP frags bpf_test_finish
  selftests/bpf: Update btf_dump case for conflicting names
  libbpf: Skip forward declaration when counting duplicated type names
  bpf: Add some description about BPF_JIT_ALWAYS_ON in Kconfig
  bpf, docs: Add a missing colon in verifier.rst
  bpf: Cache the last valid build_id
  libbpf: Fix BPF_MAP_TYPE_PERF_EVENT_ARRAY auto-pinning
  bpf, selftests: Use raw_tp program for atomic test
  bpf, arm64: Support more atomic operations
  bpftool: Remove redundant slashes
  bpf: Add config to allow loading modules with BTF mismatches
  bpf, arm64: Feed byte-offset into bpf line info
  bpf, arm64: Call build_prologue() first in first JIT pass
  bpf: Fix issue with bpf preload module taking over stdout/stdin of kernel.
  bpftool: Bpf skeletons assert type sizes
  bpf: Cleanup comments
  ...
====================

Link: https://lore.kernel.org/r/20220304164313.31675-1-daniel@iogearbox.net
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2022-03-04 19:28:17 -08:00
commit 6646dc241d
59 changed files with 1039 additions and 474 deletions

View File

@ -329,7 +329,7 @@ Program with unreachable instructions::
BPF_EXIT_INSN(),
};
Error:
Error::
unreachable insn 1

View File

@ -34,18 +34,6 @@
*/
#define BREAK_INSTR_SIZE AARCH64_INSN_SIZE
/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* BRK instruction for provoking a fault on purpose
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
#define AARCH64_BREAK_KGDB_DYN_DBG \
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))

View File

@ -3,7 +3,21 @@
#ifndef __ASM_INSN_DEF_H
#define __ASM_INSN_DEF_H
#include <asm/brk-imm.h>
/* A64 instructions are always 32 bits. */
#define AARCH64_INSN_SIZE 4
/*
* BRK instruction encoding
* The #imm16 value should be placed at bits[20:5] within BRK ins
*/
#define AARCH64_BREAK_MON 0xd4200000
/*
* BRK instruction for provoking a fault on purpose
* Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
*/
#define AARCH64_BREAK_FAULT (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
#endif /* __ASM_INSN_DEF_H */

View File

@ -205,7 +205,9 @@ enum aarch64_insn_ldst_type {
AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX,
AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX,
AARCH64_INSN_LDST_LOAD_EX,
AARCH64_INSN_LDST_LOAD_ACQ_EX,
AARCH64_INSN_LDST_STORE_EX,
AARCH64_INSN_LDST_STORE_REL_EX,
};
enum aarch64_insn_adsb_type {
@ -280,6 +282,36 @@ enum aarch64_insn_adr_type {
AARCH64_INSN_ADR_TYPE_ADR,
};
enum aarch64_insn_mem_atomic_op {
AARCH64_INSN_MEM_ATOMIC_ADD,
AARCH64_INSN_MEM_ATOMIC_CLR,
AARCH64_INSN_MEM_ATOMIC_EOR,
AARCH64_INSN_MEM_ATOMIC_SET,
AARCH64_INSN_MEM_ATOMIC_SWP,
};
enum aarch64_insn_mem_order_type {
AARCH64_INSN_MEM_ORDER_NONE,
AARCH64_INSN_MEM_ORDER_ACQ,
AARCH64_INSN_MEM_ORDER_REL,
AARCH64_INSN_MEM_ORDER_ACQREL,
};
enum aarch64_insn_mb_type {
AARCH64_INSN_MB_SY,
AARCH64_INSN_MB_ST,
AARCH64_INSN_MB_LD,
AARCH64_INSN_MB_ISH,
AARCH64_INSN_MB_ISHST,
AARCH64_INSN_MB_ISHLD,
AARCH64_INSN_MB_NSH,
AARCH64_INSN_MB_NSHST,
AARCH64_INSN_MB_NSHLD,
AARCH64_INSN_MB_OSH,
AARCH64_INSN_MB_OSHST,
AARCH64_INSN_MB_OSHLD,
};
#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
{ \
@ -303,6 +335,11 @@ __AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
__AARCH64_INSN_FUNCS(load_post, 0x3FE00C00, 0x38400400)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldclr, 0x3F20FC00, 0x38201000)
__AARCH64_INSN_FUNCS(ldeor, 0x3F20FC00, 0x38202000)
__AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
@ -474,13 +511,6 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
enum aarch64_insn_register state,
enum aarch64_insn_size_type size,
enum aarch64_insn_ldst_type type);
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size);
u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
enum aarch64_insn_register src,
int imm, enum aarch64_insn_variant variant,
@ -541,6 +571,42 @@ u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy);
#ifdef CONFIG_ARM64_LSE_ATOMICS
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_atomic_op op,
enum aarch64_insn_mem_order_type order);
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order);
#else
static inline
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_atomic_op op,
enum aarch64_insn_mem_order_type order)
{
return AARCH64_BREAK_FAULT;
}
static inline
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order)
{
return AARCH64_BREAK_FAULT;
}
#endif
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
s32 aarch64_get_branch_offset(u32 insn);
u32 aarch64_set_branch_offset(u32 insn, s32 offset);

View File

@ -578,10 +578,16 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
switch (type) {
case AARCH64_INSN_LDST_LOAD_EX:
case AARCH64_INSN_LDST_LOAD_ACQ_EX:
insn = aarch64_insn_get_load_ex_value();
if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
insn |= BIT(15);
break;
case AARCH64_INSN_LDST_STORE_EX:
case AARCH64_INSN_LDST_STORE_REL_EX:
insn = aarch64_insn_get_store_ex_value();
if (type == AARCH64_INSN_LDST_STORE_REL_EX)
insn |= BIT(15);
break;
default:
pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
@ -603,12 +609,65 @@ u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
state);
}
u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size)
#ifdef CONFIG_ARM64_LSE_ATOMICS
static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
u32 insn)
{
u32 insn = aarch64_insn_get_ldadd_value();
u32 order;
switch (type) {
case AARCH64_INSN_MEM_ORDER_NONE:
order = 0;
break;
case AARCH64_INSN_MEM_ORDER_ACQ:
order = 2;
break;
case AARCH64_INSN_MEM_ORDER_REL:
order = 1;
break;
case AARCH64_INSN_MEM_ORDER_ACQREL:
order = 3;
break;
default:
pr_err("%s: unknown mem order %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
insn &= ~GENMASK(23, 22);
insn |= order << 22;
return insn;
}
u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_atomic_op op,
enum aarch64_insn_mem_order_type order)
{
u32 insn;
switch (op) {
case AARCH64_INSN_MEM_ATOMIC_ADD:
insn = aarch64_insn_get_ldadd_value();
break;
case AARCH64_INSN_MEM_ATOMIC_CLR:
insn = aarch64_insn_get_ldclr_value();
break;
case AARCH64_INSN_MEM_ATOMIC_EOR:
insn = aarch64_insn_get_ldeor_value();
break;
case AARCH64_INSN_MEM_ATOMIC_SET:
insn = aarch64_insn_get_ldset_value();
break;
case AARCH64_INSN_MEM_ATOMIC_SWP:
insn = aarch64_insn_get_swp_value();
break;
default:
pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
return AARCH64_BREAK_FAULT;
}
switch (size) {
case AARCH64_INSN_SIZE_32:
@ -621,6 +680,8 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
insn = aarch64_insn_encode_ldst_size(size, insn);
insn = aarch64_insn_encode_ldst_order(order, insn);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
result);
@ -631,18 +692,69 @@ u32 aarch64_insn_gen_ldadd(enum aarch64_insn_register result,
value);
}
u32 aarch64_insn_gen_stadd(enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size)
static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
u32 insn)
{
/*
* STADD is simply encoded as an alias for LDADD with XZR as
* the destination register.
*/
return aarch64_insn_gen_ldadd(AARCH64_INSN_REG_ZR, address,
value, size);
u32 order;
switch (type) {
case AARCH64_INSN_MEM_ORDER_NONE:
order = 0;
break;
case AARCH64_INSN_MEM_ORDER_ACQ:
order = BIT(22);
break;
case AARCH64_INSN_MEM_ORDER_REL:
order = BIT(15);
break;
case AARCH64_INSN_MEM_ORDER_ACQREL:
order = BIT(15) | BIT(22);
break;
default:
pr_err("%s: unknown mem order %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
insn &= ~(BIT(15) | BIT(22));
insn |= order;
return insn;
}
u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
enum aarch64_insn_register address,
enum aarch64_insn_register value,
enum aarch64_insn_size_type size,
enum aarch64_insn_mem_order_type order)
{
u32 insn;
switch (size) {
case AARCH64_INSN_SIZE_32:
case AARCH64_INSN_SIZE_64:
break;
default:
pr_err("%s: unimplemented size encoding %d\n", __func__, size);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_cas_value();
insn = aarch64_insn_encode_ldst_size(size, insn);
insn = aarch64_insn_encode_cas_order(order, insn);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
result);
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
address);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
value);
}
#endif
static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
enum aarch64_insn_prfm_target target,
enum aarch64_insn_prfm_policy policy,
@ -1379,7 +1491,7 @@ static u32 aarch64_encode_immediate(u64 imm,
* Compute the rotation to get a continuous set of
* ones, with the first bit set at position 0
*/
ror = fls(~imm);
ror = fls64(~imm);
}
/*
@ -1456,3 +1568,48 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
}
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
{
u32 opt;
u32 insn;
switch (type) {
case AARCH64_INSN_MB_SY:
opt = 0xf;
break;
case AARCH64_INSN_MB_ST:
opt = 0xe;
break;
case AARCH64_INSN_MB_LD:
opt = 0xd;
break;
case AARCH64_INSN_MB_ISH:
opt = 0xb;
break;
case AARCH64_INSN_MB_ISHST:
opt = 0xa;
break;
case AARCH64_INSN_MB_ISHLD:
opt = 0x9;
break;
case AARCH64_INSN_MB_NSH:
opt = 0x7;
break;
case AARCH64_INSN_MB_NSHST:
opt = 0x6;
break;
case AARCH64_INSN_MB_NSHLD:
opt = 0x5;
break;
default:
pr_err("%s: unknown dmb type %d\n", __func__, type);
return AARCH64_BREAK_FAULT;
}
insn = aarch64_insn_get_dmb_value();
insn &= ~GENMASK(11, 8);
insn |= (opt << 8);
return insn;
}

View File

@ -88,10 +88,42 @@
/* [Rn] = Rt; (atomic) Rs = [state] */
#define A64_STXR(sf, Rt, Rn, Rs) \
A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
/* [Rn] = Rt (store release); (atomic) Rs = [state] */
#define A64_STLXR(sf, Rt, Rn, Rs) \
aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
AARCH64_INSN_LDST_STORE_REL_EX)
/* LSE atomics */
#define A64_STADD(sf, Rn, Rs) \
aarch64_insn_gen_stadd(Rn, Rs, A64_SIZE(sf))
/*
* LSE atomics
*
* ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
* LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
*/
#define A64_ST_OP(sf, Rn, Rs, op) \
aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
AARCH64_INSN_MEM_ORDER_NONE)
/* [Rn] <op>= Rs */
#define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
#define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
#define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
#define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
#define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
AARCH64_INSN_MEM_ORDER_ACQREL)
/* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
#define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
#define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
#define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
#define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
/* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
#define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
/* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
#define A64_CASAL(sf, Rt, Rn, Rs) \
aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
AARCH64_INSN_MEM_ORDER_ACQREL)
/* Add/subtract (immediate) */
#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
@ -196,6 +228,9 @@
#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
/* Rn & Rm; set condition flags */
#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
/* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
#define A64_MVN(sf, Rd, Rm) \
A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
/* Logical (immediate) */
#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
@ -219,4 +254,7 @@
#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
#define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
/* DMB */
#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
#endif /* _BPF_JIT_H */

View File

@ -27,6 +27,17 @@
#define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
(((imm) < 0) && (~(imm) >> (bits)))) { \
pr_info("[%2d] imm=%d(0x%x) out of range\n", \
i, imm, imm); \
return -EINVAL; \
} \
} while (0)
#define check_imm19(imm) check_imm(19, imm)
#define check_imm26(imm) check_imm(26, imm)
/* Map BPF registers to A64 registers */
static const int bpf2a64[] = {
/* return value from in-kernel function, and exit value from eBPF */
@ -329,6 +340,170 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
#undef jmp_offset
}
#ifdef CONFIG_ARM64_LSE_ATOMICS
static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg];
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const bool isdw = BPF_SIZE(code) == BPF_DW;
const s16 off = insn->off;
u8 reg;
if (!off) {
reg = dst;
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
reg = tmp;
}
switch (insn->imm) {
/* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
case BPF_ADD:
emit(A64_STADD(isdw, reg, src), ctx);
break;
case BPF_AND:
emit(A64_MVN(isdw, tmp2, src), ctx);
emit(A64_STCLR(isdw, reg, tmp2), ctx);
break;
case BPF_OR:
emit(A64_STSET(isdw, reg, src), ctx);
break;
case BPF_XOR:
emit(A64_STEOR(isdw, reg, src), ctx);
break;
/* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
case BPF_ADD | BPF_FETCH:
emit(A64_LDADDAL(isdw, src, reg, src), ctx);
break;
case BPF_AND | BPF_FETCH:
emit(A64_MVN(isdw, tmp2, src), ctx);
emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx);
break;
case BPF_OR | BPF_FETCH:
emit(A64_LDSETAL(isdw, src, reg, src), ctx);
break;
case BPF_XOR | BPF_FETCH:
emit(A64_LDEORAL(isdw, src, reg, src), ctx);
break;
/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
case BPF_XCHG:
emit(A64_SWPAL(isdw, src, reg, src), ctx);
break;
/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
case BPF_CMPXCHG:
emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx);
break;
default:
pr_err_once("unknown atomic op code %02x\n", insn->imm);
return -EINVAL;
}
return 0;
}
#else
static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
return -EINVAL;
}
#endif
static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
{
const u8 code = insn->code;
const u8 dst = bpf2a64[insn->dst_reg];
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const int i = insn - ctx->prog->insnsi;
const s32 imm = insn->imm;
const s16 off = insn->off;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 reg;
s32 jmp_offset;
if (!off) {
reg = dst;
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
reg = tmp;
}
if (imm == BPF_ADD || imm == BPF_AND ||
imm == BPF_OR || imm == BPF_XOR) {
/* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
emit(A64_LDXR(isdw, tmp2, reg), ctx);
if (imm == BPF_ADD)
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
else if (imm == BPF_AND)
emit(A64_AND(isdw, tmp2, tmp2, src), ctx);
else if (imm == BPF_OR)
emit(A64_ORR(isdw, tmp2, tmp2, src), ctx);
else
emit(A64_EOR(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
} else if (imm == (BPF_ADD | BPF_FETCH) ||
imm == (BPF_AND | BPF_FETCH) ||
imm == (BPF_OR | BPF_FETCH) ||
imm == (BPF_XOR | BPF_FETCH)) {
/* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
const u8 ax = bpf2a64[BPF_REG_AX];
emit(A64_MOV(isdw, ax, src), ctx);
emit(A64_LDXR(isdw, src, reg), ctx);
if (imm == (BPF_ADD | BPF_FETCH))
emit(A64_ADD(isdw, tmp2, src, ax), ctx);
else if (imm == (BPF_AND | BPF_FETCH))
emit(A64_AND(isdw, tmp2, src, ax), ctx);
else if (imm == (BPF_OR | BPF_FETCH))
emit(A64_ORR(isdw, tmp2, src, ax), ctx);
else
emit(A64_EOR(isdw, tmp2, src, ax), ctx);
emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
emit(A64_DMB_ISH, ctx);
} else if (imm == BPF_XCHG) {
/* src_reg = atomic_xchg(dst_reg + off, src_reg); */
emit(A64_MOV(isdw, tmp2, src), ctx);
emit(A64_LDXR(isdw, src, reg), ctx);
emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
jmp_offset = -2;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
emit(A64_DMB_ISH, ctx);
} else if (imm == BPF_CMPXCHG) {
/* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
const u8 r0 = bpf2a64[BPF_REG_0];
emit(A64_MOV(isdw, tmp2, r0), ctx);
emit(A64_LDXR(isdw, r0, reg), ctx);
emit(A64_EOR(isdw, tmp3, r0, tmp2), ctx);
jmp_offset = 4;
check_imm19(jmp_offset);
emit(A64_CBNZ(isdw, tmp3, jmp_offset), ctx);
emit(A64_STLXR(isdw, src, reg, tmp3), ctx);
jmp_offset = -4;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
emit(A64_DMB_ISH, ctx);
} else {
pr_err_once("unknown atomic op code %02x\n", imm);
return -EINVAL;
}
return 0;
}
static void build_epilogue(struct jit_ctx *ctx)
{
const u8 r0 = bpf2a64[BPF_REG_0];
@ -434,29 +609,16 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off;
const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi;
const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
BPF_CLASS(code) == BPF_JMP;
const bool isdw = BPF_SIZE(code) == BPF_DW;
u8 jmp_cond, reg;
u8 jmp_cond;
s32 jmp_offset;
u32 a64_insn;
int ret;
#define check_imm(bits, imm) do { \
if ((((imm) > 0) && ((imm) >> (bits))) || \
(((imm) < 0) && (~(imm) >> (bits)))) { \
pr_info("[%2d] imm=%d(0x%x) out of range\n", \
i, imm, imm); \
return -EINVAL; \
} \
} while (0)
#define check_imm19(imm) check_imm(19, imm)
#define check_imm26(imm) check_imm(26, imm)
switch (code) {
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
@ -891,33 +1053,12 @@ emit_cond_jmp:
case BPF_STX | BPF_ATOMIC | BPF_W:
case BPF_STX | BPF_ATOMIC | BPF_DW:
if (insn->imm != BPF_ADD) {
pr_err_once("unknown atomic op code %02x\n", insn->imm);
return -EINVAL;
}
/* STX XADD: lock *(u32 *)(dst + off) += src
* and
* STX XADD: lock *(u64 *)(dst + off) += src
*/
if (!off) {
reg = dst;
} else {
emit_a64_mov_i(1, tmp, off, ctx);
emit(A64_ADD(1, tmp, tmp, dst), ctx);
reg = tmp;
}
if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS)) {
emit(A64_STADD(isdw, reg, src), ctx);
} else {
emit(A64_LDXR(isdw, tmp2, reg), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
jmp_offset = -3;
check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
}
if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
ret = emit_lse_atomic(insn, ctx);
else
ret = emit_ll_sc_atomic(insn, ctx);
if (ret)
return ret;
break;
default:
@ -1049,15 +1190,18 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto out_off;
}
/* 1. Initial fake pass to compute ctx->idx. */
/* Fake pass to fill in ctx->offset. */
if (build_body(&ctx, extra_pass)) {
/*
* 1. Initial fake pass to compute ctx->idx and ctx->offset.
*
* BPF line info needs ctx->offset[i] to be the offset of
* instruction[i] in jited image, so build prologue first.
*/
if (build_prologue(&ctx, was_classic)) {
prog = orig_prog;
goto out_off;
}
if (build_prologue(&ctx, was_classic)) {
if (build_body(&ctx, extra_pass)) {
prog = orig_prog;
goto out_off;
}
@ -1130,6 +1274,11 @@ skip_init_ctx:
prog->jited_len = prog_size;
if (!prog->is_func || extra_pass) {
int i;
/* offset[prog->len] is the size of program */
for (i = 0; i <= prog->len; i++)
ctx.offset[i] *= AARCH64_INSN_SIZE;
bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
out_off:
kfree(ctx.offset);

View File

@ -158,7 +158,7 @@ config X86
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_HUGE_VMAP if X86_64 || X86_PAE
select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP
select HAVE_ARCH_HUGE_VMALLOC if X86_64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if X86_64

View File

@ -2330,8 +2330,11 @@ skip_init_addrs:
if (proglen <= 0) {
out_image:
image = NULL;
if (header)
if (header) {
bpf_arch_text_copy(&header->size, &rw_header->size,
sizeof(rw_header->size));
bpf_jit_binary_pack_free(header, rw_header);
}
prog = orig_prog;
goto out_addrs;
}

View File

@ -58,6 +58,10 @@ config BPF_JIT_ALWAYS_ON
Enables BPF JIT and removes BPF interpreter to avoid speculative
execution of BPF instructions by the interpreter.
When CONFIG_BPF_JIT_ALWAYS_ON is enabled, /proc/sys/net/core/bpf_jit_enable
is permanently set to 1 and setting any other value than that will
return failure.
config BPF_JIT_DEFAULT_ON
def_bool ARCH_WANT_DEFAULT_BPF_JIT || BPF_JIT_ALWAYS_ON
depends on HAVE_EBPF_JIT && BPF_JIT

View File

@ -136,7 +136,7 @@ bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
* will be done by the caller.
*
* Although the unlock will be done under
* rcu_read_lock(), it is more intutivie to
* rcu_read_lock(), it is more intuitive to
* read if the freeing of the storage is done
* after the raw_spin_unlock_bh(&local_storage->lock).
*

View File

@ -1,4 +1,4 @@
/* SPDX-License-Identifier: GPL-2.0 */
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018 Facebook */
#include <uapi/linux/btf.h>
@ -2547,7 +2547,7 @@ static int btf_ptr_resolve(struct btf_verifier_env *env,
*
* We now need to continue from the last-resolved-ptr to
* ensure the last-resolved-ptr will not referring back to
* the currenct ptr (t).
* the current ptr (t).
*/
if (btf_type_is_modifier(next_type)) {
const struct btf_type *resolved_type;
@ -6148,7 +6148,7 @@ int btf_type_snprintf_show(const struct btf *btf, u32 type_id, void *obj,
btf_type_show(btf, type_id, obj, (struct btf_show *)&ssnprintf);
/* If we encontered an error, return it. */
/* If we encountered an error, return it. */
if (ssnprintf.show.state.status)
return ssnprintf.show.state.status;
@ -6398,7 +6398,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op,
pr_warn("failed to validate module [%s] BTF: %ld\n",
mod->name, PTR_ERR(btf));
kfree(btf_mod);
err = PTR_ERR(btf);
if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH))
err = PTR_ERR(btf);
goto out;
}
err = btf_alloc_id(btf);
@ -6706,7 +6707,7 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook,
const struct btf_kfunc_id_set *kset)
{
bool vmlinux_set = !btf_is_module(btf);
int type, ret;
int type, ret = 0;
for (type = 0; type < ARRAY_SIZE(kset->sets); type++) {
if (!kset->sets[type])

View File

@ -1031,7 +1031,7 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
* __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
* @sk: The socket sending or receiving traffic
* @skb: The skb that is being sent or received
* @type: The type of program to be exectuted
* @type: The type of program to be executed
*
* If no socket is passed, or the socket is not of type INET or INET6,
* this function does nothing and returns 0.
@ -1094,7 +1094,7 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
/**
* __cgroup_bpf_run_filter_sk() - Run a program on a sock
* @sk: sock structure to manipulate
* @type: The type of program to be exectuted
* @type: The type of program to be executed
*
* socket is passed is expected to be of type INET or INET6.
*
@ -1119,7 +1119,7 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
* provided by user sockaddr
* @sk: sock struct that will use sockaddr
* @uaddr: sockaddr struct provided by user
* @type: The type of program to be exectuted
* @type: The type of program to be executed
* @t_ctx: Pointer to attach type specific context
* @flags: Pointer to u32 which contains higher bits of BPF program
* return value (OR'ed together).
@ -1166,7 +1166,7 @@ EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
* @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
* sk with connection information (IP addresses, etc.) May not contain
* cgroup info if it is a req sock.
* @type: The type of program to be exectuted
* @type: The type of program to be executed
*
* socket passed is expected to be of type INET or INET6.
*

View File

@ -1112,13 +1112,16 @@ int bpf_jit_binary_pack_finalize(struct bpf_prog *prog,
* 1) when the program is freed after;
* 2) when the JIT engine fails (before bpf_jit_binary_pack_finalize).
* For case 2), we need to free both the RO memory and the RW buffer.
* Also, ro_header->size in 2) is not properly set yet, so rw_header->size
* is used for uncharge.
*
* bpf_jit_binary_pack_free requires proper ro_header->size. However,
* bpf_jit_binary_pack_alloc does not set it. Therefore, ro_header->size
* must be set with either bpf_jit_binary_pack_finalize (normal path) or
* bpf_arch_text_copy (when jit fails).
*/
void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header,
struct bpf_binary_header *rw_header)
{
u32 size = rw_header ? rw_header->size : ro_header->size;
u32 size = ro_header->size;
bpf_prog_pack_free(ro_header);
kvfree(rw_header);

View File

@ -1636,7 +1636,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
value_size = size * num_possible_cpus();
total = 0;
/* while experimenting with hash tables with sizes ranging from 10 to
* 1000, it was observed that a bucket can have upto 5 entries.
* 1000, it was observed that a bucket can have up to 5 entries.
*/
bucket_size = 5;

View File

@ -1093,7 +1093,7 @@ struct bpf_hrtimer {
struct bpf_timer_kern {
struct bpf_hrtimer *timer;
/* bpf_spin_lock is used here instead of spinlock_t to make
* sure that it always fits into space resereved by struct bpf_timer
* sure that it always fits into space reserved by struct bpf_timer
* regardless of LOCKDEP and spinlock debug flags.
*/
struct bpf_spin_lock lock;

View File

@ -1,4 +1,4 @@
//SPDX-License-Identifier: GPL-2.0
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf-cgroup.h>
#include <linux/bpf.h>
#include <linux/bpf_local_storage.h>

View File

@ -54,6 +54,13 @@ static int load_skel(void)
err = PTR_ERR(progs_link);
goto out;
}
/* Avoid taking over stdin/stdout/stderr of init process. Zeroing out
* makes skel_closenz() a no-op later in iterators_bpf__destroy().
*/
close_fd(skel->links.dump_bpf_map_fd);
skel->links.dump_bpf_map_fd = 0;
close_fd(skel->links.dump_bpf_prog_fd);
skel->links.dump_bpf_prog_fd = 0;
return 0;
out:
free_links_and_skel();

View File

@ -143,7 +143,7 @@ static void reuseport_array_free(struct bpf_map *map)
/*
* Once reaching here, all sk->sk_user_data is not
* referenceing this "array". "array" can be freed now.
* referencing this "array". "array" can be freed now.
*/
bpf_map_area_free(array);
}

View File

@ -132,7 +132,8 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
int i;
struct mmap_unlock_irq_work *work = NULL;
bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
struct vm_area_struct *vma;
struct vm_area_struct *vma, *prev_vma = NULL;
const char *prev_build_id;
/* If the irq_work is in use, fall back to report ips. Same
* fallback is used for kernel stack (!user) on a stackmap with
@ -150,6 +151,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
}
for (i = 0; i < trace_nr; i++) {
if (range_in_vma(prev_vma, ips[i], ips[i])) {
vma = prev_vma;
memcpy(id_offs[i].build_id, prev_build_id,
BUILD_ID_SIZE_MAX);
goto build_id_valid;
}
vma = find_vma(current->mm, ips[i]);
if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
/* per entry fall back to ips */
@ -158,9 +165,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
continue;
}
build_id_valid:
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
- vma->vm_start;
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
prev_vma = vma;
prev_build_id = id_offs[i].build_id;
}
bpf_mmap_unlock_mm(work, current->mm);
}

View File

@ -1352,7 +1352,6 @@ int generic_map_delete_batch(struct bpf_map *map,
err = map->ops->map_delete_elem(map, key);
rcu_read_unlock();
bpf_enable_instrumentation();
maybe_wait_bpf_programs(map);
if (err)
break;
cond_resched();
@ -1361,6 +1360,8 @@ int generic_map_delete_batch(struct bpf_map *map,
err = -EFAULT;
kvfree(key);
maybe_wait_bpf_programs(map);
return err;
}
@ -2565,7 +2566,7 @@ static int bpf_link_alloc_id(struct bpf_link *link)
* pre-allocated resources are to be freed with bpf_cleanup() call. All the
* transient state is passed around in struct bpf_link_primer.
* This is preferred way to create and initialize bpf_link, especially when
* there are complicated and expensive operations inbetween creating bpf_link
* there are complicated and expensive operations in between creating bpf_link
* itself and attaching it to BPF hook. By using bpf_link_prime() and
* bpf_link_settle() kernel code using bpf_link doesn't have to perform
* expensive (and potentially failing) roll back operations in a rare case

View File

@ -45,7 +45,7 @@ void *bpf_jit_alloc_exec_page(void)
set_vm_flush_reset_perms(image);
/* Keep image as writeable. The alternative is to keep flipping ro/rw
* everytime new program is attached or detached.
* every time new program is attached or detached.
*/
set_memory_x((long)image, 1);
return image;

View File

@ -539,7 +539,7 @@ static const char *reg_type_str(struct bpf_verifier_env *env,
char postfix[16] = {0}, prefix[32] = {0};
static const char * const str[] = {
[NOT_INIT] = "?",
[SCALAR_VALUE] = "inv",
[SCALAR_VALUE] = "scalar",
[PTR_TO_CTX] = "ctx",
[CONST_PTR_TO_MAP] = "map_ptr",
[PTR_TO_MAP_VALUE] = "map_value",
@ -685,74 +685,80 @@ static void print_verifier_state(struct bpf_verifier_env *env,
continue;
verbose(env, " R%d", i);
print_liveness(env, reg->live);
verbose(env, "=%s", reg_type_str(env, t));
verbose(env, "=");
if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P");
if ((t == SCALAR_VALUE || t == PTR_TO_STACK) &&
tnum_is_const(reg->var_off)) {
/* reg->off should be 0 for SCALAR_VALUE */
verbose(env, "%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
verbose(env, "%lld", reg->var_off.value + reg->off);
} else {
const char *sep = "";
verbose(env, "%s", reg_type_str(env, t));
if (base_type(t) == PTR_TO_BTF_ID ||
base_type(t) == PTR_TO_PERCPU_BTF_ID)
verbose(env, "%s", kernel_type_name(reg->btf, reg->btf_id));
verbose(env, "(id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t))
verbose(env, ",ref_obj_id=%d", reg->ref_obj_id);
verbose(env, "(");
/*
* _a stands for append, was shortened to avoid multiline statements below.
* This macro is used to output a comma separated list of attributes.
*/
#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, __VA_ARGS__); sep = ","; })
if (reg->id)
verbose_a("id=%d", reg->id);
if (reg_type_may_be_refcounted_or_null(t) && reg->ref_obj_id)
verbose_a("ref_obj_id=%d", reg->ref_obj_id);
if (t != SCALAR_VALUE)
verbose(env, ",off=%d", reg->off);
verbose_a("off=%d", reg->off);
if (type_is_pkt_pointer(t))
verbose(env, ",r=%d", reg->range);
verbose_a("r=%d", reg->range);
else if (base_type(t) == CONST_PTR_TO_MAP ||
base_type(t) == PTR_TO_MAP_KEY ||
base_type(t) == PTR_TO_MAP_VALUE)
verbose(env, ",ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
verbose_a("ks=%d,vs=%d",
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (tnum_is_const(reg->var_off)) {
/* Typically an immediate SCALAR_VALUE, but
* could be a pointer whose offset is too big
* for reg->off
*/
verbose(env, ",imm=%llx", reg->var_off.value);
verbose_a("imm=%llx", reg->var_off.value);
} else {
if (reg->smin_value != reg->umin_value &&
reg->smin_value != S64_MIN)
verbose(env, ",smin_value=%lld",
(long long)reg->smin_value);
verbose_a("smin=%lld", (long long)reg->smin_value);
if (reg->smax_value != reg->umax_value &&
reg->smax_value != S64_MAX)
verbose(env, ",smax_value=%lld",
(long long)reg->smax_value);
verbose_a("smax=%lld", (long long)reg->smax_value);
if (reg->umin_value != 0)
verbose(env, ",umin_value=%llu",
(unsigned long long)reg->umin_value);
verbose_a("umin=%llu", (unsigned long long)reg->umin_value);
if (reg->umax_value != U64_MAX)
verbose(env, ",umax_value=%llu",
(unsigned long long)reg->umax_value);
verbose_a("umax=%llu", (unsigned long long)reg->umax_value);
if (!tnum_is_unknown(reg->var_off)) {
char tn_buf[48];
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
verbose(env, ",var_off=%s", tn_buf);
verbose_a("var_off=%s", tn_buf);
}
if (reg->s32_min_value != reg->smin_value &&
reg->s32_min_value != S32_MIN)
verbose(env, ",s32_min_value=%d",
(int)(reg->s32_min_value));
verbose_a("s32_min=%d", (int)(reg->s32_min_value));
if (reg->s32_max_value != reg->smax_value &&
reg->s32_max_value != S32_MAX)
verbose(env, ",s32_max_value=%d",
(int)(reg->s32_max_value));
verbose_a("s32_max=%d", (int)(reg->s32_max_value));
if (reg->u32_min_value != reg->umin_value &&
reg->u32_min_value != U32_MIN)
verbose(env, ",u32_min_value=%d",
(int)(reg->u32_min_value));
verbose_a("u32_min=%d", (int)(reg->u32_min_value));
if (reg->u32_max_value != reg->umax_value &&
reg->u32_max_value != U32_MAX)
verbose(env, ",u32_max_value=%d",
(int)(reg->u32_max_value));
verbose_a("u32_max=%d", (int)(reg->u32_max_value));
}
#undef verbose_a
verbose(env, ")");
}
}
@ -777,7 +783,7 @@ static void print_verifier_state(struct bpf_verifier_env *env,
if (is_spilled_reg(&state->stack[i])) {
reg = &state->stack[i].spilled_ptr;
t = reg->type;
verbose(env, "=%s", reg_type_str(env, t));
verbose(env, "=%s", t == SCALAR_VALUE ? "" : reg_type_str(env, t));
if (t == SCALAR_VALUE && reg->precise)
verbose(env, "P");
if (t == SCALAR_VALUE && tnum_is_const(reg->var_off))

View File

@ -339,6 +339,16 @@ config DEBUG_INFO_BTF_MODULES
help
Generate compact split BTF type information for kernel modules.
config MODULE_ALLOW_BTF_MISMATCH
bool "Allow loading modules with non-matching BTF type info"
depends on DEBUG_INFO_BTF_MODULES
help
For modules whose split BTF does not match vmlinux, load without
BTF rather than refusing to load. The default behavior with
module BTF enabled is to reject modules with such mismatches;
this option will still load module BTF where possible but ignore
it when a mismatch is found.
config GDB_SCRIPTS
bool "Provide GDB scripts for kernel debugging"
help

View File

@ -150,6 +150,11 @@ static int bpf_test_finish(const union bpf_attr *kattr,
if (data_out) {
int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size;
if (len < 0) {
err = -ENOSPC;
goto out;
}
if (copy_to_user(data_out, data, len))
goto out;

View File

@ -16,5 +16,8 @@ fi
if [ "${pahole_ver}" -ge "121" ]; then
extra_paholeopt="${extra_paholeopt} --btf_gen_floats"
fi
if [ "${pahole_ver}" -ge "122" ]; then
extra_paholeopt="${extra_paholeopt} -j"
fi
echo ${extra_paholeopt}

View File

@ -18,19 +18,19 @@ BPF_DIR = $(srctree)/tools/lib/bpf
ifneq ($(OUTPUT),)
_OUTPUT := $(OUTPUT)
else
_OUTPUT := $(CURDIR)
_OUTPUT := $(CURDIR)/
endif
BOOTSTRAP_OUTPUT := $(_OUTPUT)/bootstrap/
BOOTSTRAP_OUTPUT := $(_OUTPUT)bootstrap/
LIBBPF_OUTPUT := $(_OUTPUT)/libbpf/
LIBBPF_OUTPUT := $(_OUTPUT)libbpf/
LIBBPF_DESTDIR := $(LIBBPF_OUTPUT)
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)/include
LIBBPF_INCLUDE := $(LIBBPF_DESTDIR)include
LIBBPF_HDRS_DIR := $(LIBBPF_INCLUDE)/bpf
LIBBPF := $(LIBBPF_OUTPUT)libbpf.a
LIBBPF_BOOTSTRAP_OUTPUT := $(BOOTSTRAP_OUTPUT)libbpf/
LIBBPF_BOOTSTRAP_DESTDIR := $(LIBBPF_BOOTSTRAP_OUTPUT)
LIBBPF_BOOTSTRAP_INCLUDE := $(LIBBPF_BOOTSTRAP_DESTDIR)/include
LIBBPF_BOOTSTRAP_INCLUDE := $(LIBBPF_BOOTSTRAP_DESTDIR)include
LIBBPF_BOOTSTRAP_HDRS_DIR := $(LIBBPF_BOOTSTRAP_INCLUDE)/bpf
LIBBPF_BOOTSTRAP := $(LIBBPF_BOOTSTRAP_OUTPUT)libbpf.a
@ -44,7 +44,7 @@ $(LIBBPF_OUTPUT) $(BOOTSTRAP_OUTPUT) $(LIBBPF_BOOTSTRAP_OUTPUT) $(LIBBPF_HDRS_DI
$(LIBBPF): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) \
DESTDIR=$(LIBBPF_DESTDIR) prefix= $(LIBBPF) install_headers
DESTDIR=$(LIBBPF_DESTDIR:/=) prefix= $(LIBBPF) install_headers
$(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_DIR)
$(call QUIET_INSTALL, $@)
@ -52,7 +52,7 @@ $(LIBBPF_INTERNAL_HDRS): $(LIBBPF_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_HDRS_
$(LIBBPF_BOOTSTRAP): $(wildcard $(BPF_DIR)/*.[ch] $(BPF_DIR)/Makefile) | $(LIBBPF_BOOTSTRAP_OUTPUT)
$(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_BOOTSTRAP_OUTPUT) \
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR) prefix= \
DESTDIR=$(LIBBPF_BOOTSTRAP_DESTDIR:/=) prefix= \
ARCH= CROSS_COMPILE= CC=$(HOSTCC) LD=$(HOSTLD) $@ install_headers
$(LIBBPF_BOOTSTRAP_INTERNAL_HDRS): $(LIBBPF_BOOTSTRAP_HDRS_DIR)/%.h: $(BPF_DIR)/%.h | $(LIBBPF_BOOTSTRAP_HDRS_DIR)
@ -93,7 +93,7 @@ INSTALL ?= install
RM ?= rm -f
FEATURE_USER = .bpftool
FEATURE_TESTS = libbfd disassembler-four-args reallocarray zlib libcap \
FEATURE_TESTS = libbfd disassembler-four-args zlib libcap \
clang-bpf-co-re
FEATURE_DISPLAY = libbfd disassembler-four-args zlib libcap \
clang-bpf-co-re
@ -118,10 +118,6 @@ ifeq ($(feature-disassembler-four-args), 1)
CFLAGS += -DDISASM_FOUR_ARGS_SIGNATURE
endif
ifeq ($(feature-reallocarray), 0)
CFLAGS += -DCOMPAT_NEED_REALLOCARRAY
endif
LIBS = $(LIBBPF) -lelf -lz
LIBS_BOOTSTRAP = $(LIBBPF_BOOTSTRAP) -lelf -lz
ifeq ($(feature-libcap), 1)

View File

@ -209,15 +209,36 @@ static int codegen_datasec_def(struct bpf_object *obj,
return 0;
}
static const struct btf_type *find_type_for_map(struct btf *btf, const char *map_ident)
{
int n = btf__type_cnt(btf), i;
char sec_ident[256];
for (i = 1; i < n; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
if (!btf_is_datasec(t))
continue;
name = btf__str_by_offset(btf, t->name_off);
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
continue;
if (strcmp(sec_ident, map_ident) == 0)
return t;
}
return NULL;
}
static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
int n = btf__type_cnt(btf);
struct btf_dump *d;
struct bpf_map *map;
const struct btf_type *sec;
char sec_ident[256], map_ident[256];
int i, err = 0;
char map_ident[256];
int err = 0;
d = btf_dump__new(btf, codegen_btf_dump_printf, NULL, NULL);
err = libbpf_get_error(d);
@ -234,23 +255,7 @@ static int codegen_datasecs(struct bpf_object *obj, const char *obj_name)
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
continue;
sec = NULL;
for (i = 1; i < n; i++) {
const struct btf_type *t = btf__type_by_id(btf, i);
const char *name;
if (!btf_is_datasec(t))
continue;
name = btf__str_by_offset(btf, t->name_off);
if (!get_datasec_ident(name, sec_ident, sizeof(sec_ident)))
continue;
if (strcmp(sec_ident, map_ident) == 0) {
sec = t;
break;
}
}
sec = find_type_for_map(btf, map_ident);
/* In some cases (e.g., sections like .rodata.cst16 containing
* compiler allocated string constants only) there will be
@ -363,6 +368,73 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map)
return map_sz;
}
/* Emit type size asserts for all top-level fields in memory-mapped internal maps. */
static void codegen_asserts(struct bpf_object *obj, const char *obj_name)
{
struct btf *btf = bpf_object__btf(obj);
struct bpf_map *map;
struct btf_var_secinfo *sec_var;
int i, vlen;
const struct btf_type *sec;
char map_ident[256], var_ident[256];
codegen("\
\n\
__attribute__((unused)) static void \n\
%1$s__assert(struct %1$s *s) \n\
{ \n\
#ifdef __cplusplus \n\
#define _Static_assert static_assert \n\
#endif \n\
", obj_name);
bpf_object__for_each_map(map, obj) {
if (!bpf_map__is_internal(map))
continue;
if (!(bpf_map__map_flags(map) & BPF_F_MMAPABLE))
continue;
if (!get_map_ident(map, map_ident, sizeof(map_ident)))
continue;
sec = find_type_for_map(btf, map_ident);
if (!sec) {
/* best effort, couldn't find the type for this map */
continue;
}
sec_var = btf_var_secinfos(sec);
vlen = btf_vlen(sec);
for (i = 0; i < vlen; i++, sec_var++) {
const struct btf_type *var = btf__type_by_id(btf, sec_var->type);
const char *var_name = btf__name_by_offset(btf, var->name_off);
long var_size;
/* static variables are not exposed through BPF skeleton */
if (btf_var(var)->linkage == BTF_VAR_STATIC)
continue;
var_size = btf__resolve_size(btf, var->type);
if (var_size < 0)
continue;
var_ident[0] = '\0';
strncat(var_ident, var_name, sizeof(var_ident) - 1);
sanitize_identifier(var_ident);
printf("\t_Static_assert(sizeof(s->%s->%s) == %ld, \"unexpected size of '%s'\");\n",
map_ident, var_ident, var_size, var_ident);
}
}
codegen("\
\n\
#ifdef __cplusplus \n\
#undef _Static_assert \n\
#endif \n\
} \n\
");
}
static void codegen_attach_detach(struct bpf_object *obj, const char *obj_name)
{
struct bpf_program *prog;
@ -639,8 +711,11 @@ static int gen_trace(struct bpf_object *obj, const char *obj_name, const char *h
} \n\
return skel; \n\
} \n\
\n\
", obj_name);
codegen_asserts(obj, obj_name);
codegen("\
\n\
\n\
@ -1046,9 +1121,17 @@ static int do_skeleton(int argc, char **argv)
const void *%1$s::elf_bytes(size_t *sz) { return %1$s__elf_bytes(sz); } \n\
#endif /* __cplusplus */ \n\
\n\
#endif /* %2$s */ \n\
",
obj_name, header_guard);
obj_name);
codegen_asserts(obj, obj_name);
codegen("\
\n\
\n\
#endif /* %1$s */ \n\
",
header_guard);
err = 0;
out:
bpf_object__close(obj);

View File

@ -8,10 +8,10 @@
#undef GCC_VERSION
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <linux/bpf.h>
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <tools/libc_compat.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h>

View File

@ -26,6 +26,7 @@
#include <bpf/btf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h>
#include <bpf/libbpf_internal.h>
#include <bpf/skel_internal.h>
#include "cfg.h"
@ -1558,9 +1559,9 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
if (fd < 0)
goto err_free_reuse_maps;
new_map_replace = reallocarray(map_replace,
old_map_fds + 1,
sizeof(*map_replace));
new_map_replace = libbpf_reallocarray(map_replace,
old_map_fds + 1,
sizeof(*map_replace));
if (!new_map_replace) {
p_err("mem alloc failed");
goto err_free_reuse_maps;

View File

@ -8,6 +8,7 @@
#include <string.h>
#include <sys/types.h>
#include <bpf/libbpf.h>
#include <bpf/libbpf_internal.h>
#include "disasm.h"
#include "json_writer.h"
@ -32,8 +33,8 @@ void kernel_syms_load(struct dump_data *dd)
return;
while (fgets(buff, sizeof(buff), fp)) {
tmp = reallocarray(dd->sym_mapping, dd->sym_count + 1,
sizeof(*dd->sym_mapping));
tmp = libbpf_reallocarray(dd->sym_mapping, dd->sym_count + 1,
sizeof(*dd->sym_mapping));
if (!tmp) {
out:
free(dd->sym_mapping);

View File

@ -1505,6 +1505,11 @@ static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
if (s->name_resolved)
return *cached_name ? *cached_name : orig_name;
if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) {
s->name_resolved = 1;
return orig_name;
}
dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
if (dup_cnt > 1) {
const size_t max_len = 256;

View File

@ -1374,22 +1374,20 @@ static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
{
int ret = -ENOENT;
Elf_Data *data;
Elf_Scn *scn;
*size = 0;
if (!name)
return -EINVAL;
scn = elf_sec_by_name(obj, name);
data = elf_sec_data(obj, scn);
if (data) {
ret = 0; /* found it */
*size = data->d_size;
return 0; /* found it */
}
return *size ? 0 : ret;
return -ENOENT;
}
static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off)
@ -2795,7 +2793,7 @@ static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
goto sort_vars;
ret = find_elf_sec_sz(obj, name, &size);
if (ret || !size || (t->size && t->size != size)) {
if (ret || !size) {
pr_debug("Invalid size for section %s: %u bytes\n", name, size);
return -ENOENT;
}
@ -4861,7 +4859,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
LIBBPF_OPTS(bpf_map_create_opts, create_attr);
struct bpf_map_def *def = &map->def;
const char *map_name = NULL;
__u32 max_entries;
int err = 0;
if (kernel_supports(obj, FEAT_PROG_NAME))
@ -4871,21 +4868,6 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
create_attr.numa_node = map->numa_node;
create_attr.map_extra = map->map_extra;
if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
int nr_cpus;
nr_cpus = libbpf_num_possible_cpus();
if (nr_cpus < 0) {
pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
map->name, nr_cpus);
return nr_cpus;
}
pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
max_entries = nr_cpus;
} else {
max_entries = def->max_entries;
}
if (bpf_map__is_struct_ops(map))
create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
@ -4935,7 +4917,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
if (obj->gen_loader) {
bpf_gen__map_create(obj->gen_loader, def->type, map_name,
def->key_size, def->value_size, max_entries,
def->key_size, def->value_size, def->max_entries,
&create_attr, is_inner ? -1 : map - obj->maps);
/* Pretend to have valid FD to pass various fd >= 0 checks.
* This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
@ -4944,7 +4926,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
} else {
map->fd = bpf_map_create(def->type, map_name,
def->key_size, def->value_size,
max_entries, &create_attr);
def->max_entries, &create_attr);
}
if (map->fd < 0 && (create_attr.btf_key_type_id ||
create_attr.btf_value_type_id)) {
@ -4961,7 +4943,7 @@ static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, b
map->btf_value_type_id = 0;
map->fd = bpf_map_create(def->type, map_name,
def->key_size, def->value_size,
max_entries, &create_attr);
def->max_entries, &create_attr);
}
err = map->fd < 0 ? -errno : 0;
@ -5065,6 +5047,24 @@ static int bpf_object_init_prog_arrays(struct bpf_object *obj)
return 0;
}
static int map_set_def_max_entries(struct bpf_map *map)
{
if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
int nr_cpus;
nr_cpus = libbpf_num_possible_cpus();
if (nr_cpus < 0) {
pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
map->name, nr_cpus);
return nr_cpus;
}
pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
map->def.max_entries = nr_cpus;
}
return 0;
}
static int
bpf_object__create_maps(struct bpf_object *obj)
{
@ -5097,6 +5097,10 @@ bpf_object__create_maps(struct bpf_object *obj)
continue;
}
err = map_set_def_max_entries(map);
if (err)
goto err_out;
retried = false;
retry:
if (map->pin_path) {
@ -10947,7 +10951,7 @@ struct perf_buffer *perf_buffer__new_raw_v0_6_0(int map_fd, size_t page_cnt,
{
struct perf_buffer_params p = {};
if (page_cnt == 0 || !attr)
if (!attr)
return libbpf_err_ptr(-EINVAL);
if (!OPTS_VALID(opts, perf_buffer_raw_opts))
@ -10988,7 +10992,7 @@ static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
__u32 map_info_len;
int err, i, j, n;
if (page_cnt & (page_cnt - 1)) {
if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
pr_warn("page count should be power of two, but is %zu\n",
page_cnt);
return ERR_PTR(-EINVAL);

View File

@ -1,4 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
bpftool
bpf-helpers*
bpf-syscall*
test_verifier

View File

@ -470,6 +470,7 @@ $(OUTPUT)/$(TRUNNER_BINARY): $(TRUNNER_TEST_OBJS) \
$$(call msg,BINARY,,$$@)
$(Q)$$(CC) $$(CFLAGS) $$(filter %.a %.o,$$^) $$(LDLIBS) -o $$@
$(Q)$(RESOLVE_BTFIDS) --btf $(TRUNNER_OUTPUT)/btf_data.o $$@
$(Q)ln -sf $(if $2,..,.)/tools/build/bpftool/bootstrap/bpftool $(if $2,$2/)bpftool
endef
@ -555,7 +556,7 @@ $(OUTPUT)/bench: $(OUTPUT)/bench.o \
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(SCRATCH_DIR) $(HOST_SCRATCH_DIR) \
prog_tests/tests.h map_tests/tests.h verifier/tests.h \
feature \
feature bpftool \
$(addprefix $(OUTPUT)/,*.o *.skel.h *.lskel.h no_alu32 bpf_gcc bpf_testmod.ko)
.PHONY: docs docs-clean

View File

@ -39,13 +39,13 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(id=0,off=0,imm=0)"},
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=inv2"},
{1, "R3_w=inv4"},
{2, "R3_w=inv8"},
{3, "R3_w=inv16"},
{4, "R3_w=inv32"},
{0, "R3_w=2"},
{1, "R3_w=4"},
{2, "R3_w=8"},
{3, "R3_w=16"},
{4, "R3_w=32"},
},
},
{
@ -67,19 +67,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(id=0,off=0,imm=0)"},
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=inv1"},
{1, "R3_w=inv2"},
{2, "R3_w=inv4"},
{3, "R3_w=inv8"},
{4, "R3_w=inv16"},
{5, "R3_w=inv1"},
{6, "R4_w=inv32"},
{7, "R4_w=inv16"},
{8, "R4_w=inv8"},
{9, "R4_w=inv4"},
{10, "R4_w=inv2"},
{0, "R3_w=1"},
{1, "R3_w=2"},
{2, "R3_w=4"},
{3, "R3_w=8"},
{4, "R3_w=16"},
{5, "R3_w=1"},
{6, "R4_w=32"},
{7, "R4_w=16"},
{8, "R4_w=8"},
{9, "R4_w=4"},
{10, "R4_w=2"},
},
},
{
@ -96,14 +96,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(id=0,off=0,imm=0)"},
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=inv4"},
{1, "R3_w=inv8"},
{2, "R3_w=inv10"},
{3, "R4_w=inv8"},
{4, "R4_w=inv12"},
{5, "R4_w=inv14"},
{0, "R3_w=4"},
{1, "R3_w=8"},
{2, "R3_w=10"},
{3, "R4_w=8"},
{4, "R4_w=12"},
{5, "R4_w=14"},
},
},
{
@ -118,12 +118,12 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{0, "R1=ctx(id=0,off=0,imm=0)"},
{0, "R1=ctx(off=0,imm=0)"},
{0, "R10=fp0"},
{0, "R3_w=inv7"},
{1, "R3_w=inv7"},
{2, "R3_w=inv14"},
{3, "R3_w=inv56"},
{0, "R3_w=7"},
{1, "R3_w=7"},
{2, "R3_w=14"},
{3, "R3_w=56"},
},
},
@ -161,19 +161,19 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{7, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
{8, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{9, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
{10, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
{12, "R3_w=pkt_end(id=0,off=0,imm=0)"},
{17, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{18, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
{19, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
{20, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
{21, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{22, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
{6, "R0_w=pkt(off=8,r=8,imm=0)"},
{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{12, "R3_w=pkt_end(off=0,imm=0)"},
{17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
{19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
{20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
},
},
{
@ -194,16 +194,16 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{6, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{7, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{9, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{10, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
{11, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{12, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{13, "R4_w=inv(id=1,umax_value=255,var_off=(0x0; 0xff))"},
{14, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
{15, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
{11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
{13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
{14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
{15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
},
},
{
@ -234,14 +234,14 @@ static struct bpf_align_test tests[] = {
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.matches = {
{2, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
{4, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
{5, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
{9, "R2=pkt(id=0,off=0,r=18,imm=0)"},
{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
{13, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
{2, "R5_w=pkt(off=0,r=0,imm=0)"},
{4, "R5_w=pkt(off=14,r=0,imm=0)"},
{5, "R4_w=pkt(off=14,r=0,imm=0)"},
{9, "R2=pkt(off=0,r=18,imm=0)"},
{10, "R5=pkt(off=14,r=18,imm=0)"},
{10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
{13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
{14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
},
},
{
@ -296,59 +296,59 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Offset is added to packet pointer R5, resulting in
* known fixed offset, and variable offset from R6.
*/
{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* it's total offset is NET_IP_ALIGN + reg->off (0) +
* reg->aux_off (14) which is 16. Then the variable
* offset is considered using reg->aux_off_align which
* is 4 and meets the load's requirements.
*/
{15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
{15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Variable offset is added to R5 packet pointer,
* resulting in auxiliary alignment of 4.
*/
{17, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{17, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5, resulting in
* reg->off of 14.
*/
{18, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{18, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off
* (14) which is 16. Then the variable offset is 4-byte
* aligned, so the total offset is 4-byte aligned and
* meets the load's requirements.
*/
{23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
{23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
{23, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
{23, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant offset is added to R5 packet pointer,
* resulting in reg->off value of 14.
*/
{25, "R5_w=pkt(id=0,off=14,r=8"},
{25, "R5_w=pkt(off=14,r=8"},
/* Variable offset is added to R5, resulting in a
* variable offset of (4n).
*/
{26, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{26, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* Constant is added to R5 again, setting reg->off to 18. */
{27, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{27, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
/* And once more we add a variable; resulting var_off
* is still (4n), fixed offset is not changed.
* Also, we create a new reg->id.
*/
{28, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
{28, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (18)
* which is 20. Then the variable offset is (4n), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
{33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
{33, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
{33, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
},
},
{
@ -386,36 +386,36 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{7, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
{8, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
{8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
/* Packet pointer has (4n+2) offset */
{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
{12, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
{11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
{12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc)"},
{15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
/* Newly read value in R6 was shifted left by 2, so has
* known alignment of 4.
*/
{17, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Added (4n) to packet pointer's (4n+2) var_off, giving
* another (4n+2).
*/
{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
{20, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
{19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
{20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc)"},
{23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
},
},
{
@ -448,18 +448,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.matches = {
{3, "R5_w=pkt_end(id=0,off=0,imm=0)"},
{3, "R5_w=pkt_end(off=0,imm=0)"},
/* (ptr - ptr) << 2 == unknown, (4n) */
{5, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
{5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
/* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow.
*/
{6, "R5_w=inv(id=0,smin_value=-9223372036854775806,smax_value=9223372036854775806,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
{6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>=0 */
{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* packet pointer + nonnegative (4n+2) */
{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{12, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the
@ -467,7 +467,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access
* attempt will fail.
*/
{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
{15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
}
},
{
@ -502,23 +502,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Adding 14 makes R6 be (4n+2) */
{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
{9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
/* New unknown value in R7 is (4n) */
{10, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
{10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
/* Subtracting it from R6 blows our unsigned bounds */
{11, "R6=inv(id=0,smin_value=-1006,smax_value=1034,umin_value=2,umax_value=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
{11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
/* Checked s>= 0 */
{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
{14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc)"},
{20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
},
},
@ -556,23 +556,23 @@ static struct bpf_align_test tests[] = {
/* Calculated offset in R6 has unknown value, but known
* alignment of 4.
*/
{6, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
{9, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
{6, "R2_w=pkt(off=0,r=8,imm=0)"},
{9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
/* Adding 14 makes R6 be (4n+2) */
{10, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
{10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
/* Subtracting from packet pointer overflows ubounds */
{13, "R5_w=pkt(id=2,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
{13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
/* New unknown value in R7 is (4n), >= 76 */
{14, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
/* Adding it to packet pointer gives nice bounds again */
{16, "R5_w=pkt(id=3,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
/* At the time the word size load is performed from R5,
* its total fixed offset is NET_IP_ALIGN + reg->off (0)
* which is 2. Then the variable offset is (4n+2), so
* the total offset is 4-byte aligned and meets the
* load's requirements.
*/
{20, "R5=pkt(id=3,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0xfffffffc)"},
{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
},
},
};
@ -648,8 +648,8 @@ static int do_test_single(struct bpf_align_test *test)
/* Check the next line as well in case the previous line
* did not have a corresponding bpf insn. Example:
* func#0 @0
* 0: R1=ctx(id=0,off=0,imm=0) R10=fp0
* 0: (b7) r3 = 2 ; R3_w=inv2
* 0: R1=ctx(off=0,imm=0) R10=fp0
* 0: (b7) r3 = 2 ; R3_w=2
*/
if (!strstr(line_ptr, m.match)) {
cur_line = -1;

View File

@ -7,19 +7,15 @@
static void test_add(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__add__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(add)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.add.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->add64_value, 3, "add64_value");
ASSERT_EQ(skel->bss->add64_result, 1, "add64_result");
@ -31,27 +27,20 @@ static void test_add(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->add_stack_result, 1, "add_stack_result");
ASSERT_EQ(skel->data->add_noreturn_value, 3, "add_noreturn_value");
cleanup:
close(link_fd);
}
static void test_sub(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__sub__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(sub)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.sub.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->sub64_value, -1, "sub64_value");
ASSERT_EQ(skel->bss->sub64_result, 1, "sub64_result");
@ -63,27 +52,20 @@ static void test_sub(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->sub_stack_result, 1, "sub_stack_result");
ASSERT_EQ(skel->data->sub_noreturn_value, -1, "sub_noreturn_value");
cleanup:
close(link_fd);
}
static void test_and(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__and__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(and)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.and.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->and64_value, 0x010ull << 32, "and64_value");
ASSERT_EQ(skel->bss->and64_result, 0x110ull << 32, "and64_result");
@ -92,26 +74,20 @@ static void test_and(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->and32_result, 0x110, "and32_result");
ASSERT_EQ(skel->data->and_noreturn_value, 0x010ull << 32, "and_noreturn_value");
cleanup:
close(link_fd);
}
static void test_or(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__or__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(or)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.or.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->or64_value, 0x111ull << 32, "or64_value");
ASSERT_EQ(skel->bss->or64_result, 0x110ull << 32, "or64_result");
@ -120,26 +96,20 @@ static void test_or(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->or32_result, 0x110, "or32_result");
ASSERT_EQ(skel->data->or_noreturn_value, 0x111ull << 32, "or_noreturn_value");
cleanup:
close(link_fd);
}
static void test_xor(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__xor__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(xor)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.xor.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->xor64_value, 0x101ull << 32, "xor64_value");
ASSERT_EQ(skel->bss->xor64_result, 0x110ull << 32, "xor64_result");
@ -148,26 +118,20 @@ static void test_xor(struct atomics_lskel *skel)
ASSERT_EQ(skel->bss->xor32_result, 0x110, "xor32_result");
ASSERT_EQ(skel->data->xor_noreturn_value, 0x101ull << 32, "xor_nxoreturn_value");
cleanup:
close(link_fd);
}
static void test_cmpxchg(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__cmpxchg__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(cmpxchg)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.cmpxchg.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->cmpxchg64_value, 2, "cmpxchg64_value");
ASSERT_EQ(skel->bss->cmpxchg64_result_fail, 1, "cmpxchg_result_fail");
@ -176,45 +140,34 @@ static void test_cmpxchg(struct atomics_lskel *skel)
ASSERT_EQ(skel->data->cmpxchg32_value, 2, "lcmpxchg32_value");
ASSERT_EQ(skel->bss->cmpxchg32_result_fail, 1, "cmpxchg_result_fail");
ASSERT_EQ(skel->bss->cmpxchg32_result_succeed, 1, "cmpxchg_result_succeed");
cleanup:
close(link_fd);
}
static void test_xchg(struct atomics_lskel *skel)
{
int err, prog_fd;
int link_fd;
LIBBPF_OPTS(bpf_test_run_opts, topts);
link_fd = atomics_lskel__xchg__attach(skel);
if (!ASSERT_GT(link_fd, 0, "attach(xchg)"))
return;
/* No need to attach it, just run it directly */
prog_fd = skel->progs.xchg.prog_fd;
err = bpf_prog_test_run_opts(prog_fd, &topts);
if (!ASSERT_OK(err, "test_run_opts err"))
goto cleanup;
return;
if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
goto cleanup;
return;
ASSERT_EQ(skel->data->xchg64_value, 2, "xchg64_value");
ASSERT_EQ(skel->bss->xchg64_result, 1, "xchg64_result");
ASSERT_EQ(skel->data->xchg32_value, 2, "xchg32_value");
ASSERT_EQ(skel->bss->xchg32_result, 1, "xchg32_result");
cleanup:
close(link_fd);
}
void test_atomics(void)
{
struct atomics_lskel *skel;
__u32 duration = 0;
skel = atomics_lskel__open_and_load();
if (CHECK(!skel, "skel_load", "atomics skeleton failed\n"))
if (!ASSERT_OK_PTR(skel, "atomics skeleton load"))
return;
if (skel->data->skip_tests) {

View File

@ -148,22 +148,38 @@ static void test_btf_dump_incremental(void)
/* First, generate BTF corresponding to the following C code:
*
* enum { VAL = 1 };
* enum x;
*
* enum x { X = 1 };
*
* enum { Y = 1 };
*
* struct s;
*
* struct s { int x; };
*
*/
id = btf__add_enum(btf, "x", 4);
ASSERT_EQ(id, 1, "enum_declaration_id");
id = btf__add_enum(btf, "x", 4);
ASSERT_EQ(id, 2, "named_enum_id");
err = btf__add_enum_value(btf, "X", 1);
ASSERT_OK(err, "named_enum_val_ok");
id = btf__add_enum(btf, NULL, 4);
ASSERT_EQ(id, 1, "enum_id");
err = btf__add_enum_value(btf, "VAL", 1);
ASSERT_OK(err, "enum_val_ok");
ASSERT_EQ(id, 3, "anon_enum_id");
err = btf__add_enum_value(btf, "Y", 1);
ASSERT_OK(err, "anon_enum_val_ok");
id = btf__add_int(btf, "int", 4, BTF_INT_SIGNED);
ASSERT_EQ(id, 2, "int_id");
ASSERT_EQ(id, 4, "int_id");
id = btf__add_fwd(btf, "s", BTF_FWD_STRUCT);
ASSERT_EQ(id, 5, "fwd_id");
id = btf__add_struct(btf, "s", 4);
ASSERT_EQ(id, 3, "struct_id");
err = btf__add_field(btf, "x", 2, 0, 0);
ASSERT_EQ(id, 6, "struct_id");
err = btf__add_field(btf, "x", 4, 0, 0);
ASSERT_OK(err, "field_ok");
for (i = 1; i < btf__type_cnt(btf); i++) {
@ -173,11 +189,20 @@ static void test_btf_dump_incremental(void)
fflush(dump_buf_file);
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
ASSERT_STREQ(dump_buf,
"enum {\n"
" VAL = 1,\n"
"enum x;\n"
"\n"
"enum x {\n"
" X = 1,\n"
"};\n"
"\n"
"enum {\n"
" Y = 1,\n"
"};\n"
"\n"
"struct s;\n"
"\n"
"struct s {\n"
" int x;\n"
"};\n\n", "c_dump1");
@ -199,10 +224,12 @@ static void test_btf_dump_incremental(void)
fseek(dump_buf_file, 0, SEEK_SET);
id = btf__add_struct(btf, "s", 4);
ASSERT_EQ(id, 4, "struct_id");
err = btf__add_field(btf, "x", 1, 0, 0);
ASSERT_EQ(id, 7, "struct_id");
err = btf__add_field(btf, "x", 2, 0, 0);
ASSERT_OK(err, "field_ok");
err = btf__add_field(btf, "s", 3, 32, 0);
err = btf__add_field(btf, "y", 3, 32, 0);
ASSERT_OK(err, "field_ok");
err = btf__add_field(btf, "s", 6, 64, 0);
ASSERT_OK(err, "field_ok");
for (i = 1; i < btf__type_cnt(btf); i++) {
@ -214,9 +241,10 @@ static void test_btf_dump_incremental(void)
dump_buf[dump_buf_sz] = 0; /* some libc implementations don't do this */
ASSERT_STREQ(dump_buf,
"struct s___2 {\n"
" enum x x;\n"
" enum {\n"
" VAL___2 = 1,\n"
" } x;\n"
" Y___2 = 1,\n"
" } y;\n"
" struct s s;\n"
"};\n\n" , "c_dump1");

View File

@ -512,7 +512,7 @@ static int __trigger_module_test_read(const struct core_reloc_test_case *test)
}
static struct core_reloc_test_case test_cases[] = {
static const struct core_reloc_test_case test_cases[] = {
/* validate we can find kernel image and use its BTF for relocs */
{
.case_name = "kernel",
@ -843,7 +843,7 @@ static int run_btfgen(const char *src_btf, const char *dst_btf, const char *objp
int n;
n = snprintf(command, sizeof(command),
"./tools/build/bpftool/bpftool gen min_core_btf %s %s %s",
"./bpftool gen min_core_btf %s %s %s",
src_btf, dst_btf, objpath);
if (n < 0 || n >= sizeof(command))
return -1;
@ -855,7 +855,7 @@ static void run_core_reloc_tests(bool use_btfgen)
{
const size_t mmap_sz = roundup_page(sizeof(struct data));
DECLARE_LIBBPF_OPTS(bpf_object_open_opts, open_opts);
struct core_reloc_test_case *test_case;
struct core_reloc_test_case *test_case, test_case_copy;
const char *tp_name, *probe_name;
int err, i, equal, fd;
struct bpf_link *link = NULL;
@ -870,7 +870,10 @@ static void run_core_reloc_tests(bool use_btfgen)
for (i = 0; i < ARRAY_SIZE(test_cases); i++) {
char btf_file[] = "/tmp/core_reloc.btf.XXXXXX";
test_case = &test_cases[i];
test_case_copy = test_cases[i];
test_case = &test_case_copy;
if (!test__start_subtest(test_case->case_name))
continue;
@ -881,6 +884,7 @@ static void run_core_reloc_tests(bool use_btfgen)
/* generate a "minimal" BTF file and use it as source */
if (use_btfgen) {
if (!test_case->btf_src_file || test_case->fails) {
test__skip();
continue;
@ -989,7 +993,8 @@ cleanup:
CHECK_FAIL(munmap(mmap_data, mmap_sz));
mmap_data = NULL;
}
remove(btf_file);
if (use_btfgen)
remove(test_case->btf_src_file);
bpf_link__destroy(link);
link = NULL;
bpf_object__close(obj);
@ -1001,7 +1006,7 @@ void test_core_reloc(void)
run_core_reloc_tests(false);
}
void test_core_btfgen(void)
void test_core_reloc_btfgen(void)
{
run_core_reloc_tests(true);
}

View File

@ -78,7 +78,7 @@ static void obj_load_log_buf(void)
ASSERT_OK_PTR(strstr(libbpf_log_buf, "prog 'bad_prog': BPF program load failed"),
"libbpf_log_not_empty");
ASSERT_OK_PTR(strstr(obj_log_buf, "DATASEC license"), "obj_log_not_empty");
ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"),
ASSERT_OK_PTR(strstr(good_log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"),
"good_log_verbose");
ASSERT_OK_PTR(strstr(bad_log_buf, "invalid access to map value, value_size=16 off=16000 size=4"),
"bad_log_not_empty");
@ -175,7 +175,7 @@ static void bpf_prog_load_log_buf(void)
opts.log_level = 2;
fd = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "good_prog", "GPL",
good_prog_insns, good_prog_insn_cnt, &opts);
ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(id=0,off=0,imm=0) R10=fp0"), "good_log_2");
ASSERT_OK_PTR(strstr(log_buf, "0: R1=ctx(off=0,imm=0) R10=fp0"), "good_log_2");
ASSERT_GE(fd, 0, "good_fd2");
if (fd >= 0)
close(fd);

View File

@ -20,8 +20,8 @@ __u64 add_stack_value_copy = 0;
__u64 add_stack_result = 0;
__u64 add_noreturn_value = 1;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(add, int a)
SEC("raw_tp/sys_enter")
int add(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@ -46,8 +46,8 @@ __s64 sub_stack_value_copy = 0;
__s64 sub_stack_result = 0;
__s64 sub_noreturn_value = 1;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(sub, int a)
SEC("raw_tp/sys_enter")
int sub(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@ -70,8 +70,8 @@ __u32 and32_value = 0x110;
__u32 and32_result = 0;
__u64 and_noreturn_value = (0x110ull << 32);
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(and, int a)
SEC("raw_tp/sys_enter")
int and(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@ -91,8 +91,8 @@ __u32 or32_value = 0x110;
__u32 or32_result = 0;
__u64 or_noreturn_value = (0x110ull << 32);
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(or, int a)
SEC("raw_tp/sys_enter")
int or(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@ -111,8 +111,8 @@ __u32 xor32_value = 0x110;
__u32 xor32_result = 0;
__u64 xor_noreturn_value = (0x110ull << 32);
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(xor, int a)
SEC("raw_tp/sys_enter")
int xor(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@ -132,8 +132,8 @@ __u32 cmpxchg32_value = 1;
__u32 cmpxchg32_result_fail = 0;
__u32 cmpxchg32_result_succeed = 0;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(cmpxchg, int a)
SEC("raw_tp/sys_enter")
int cmpxchg(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;
@ -153,8 +153,8 @@ __u64 xchg64_result = 0;
__u32 xchg32_value = 1;
__u32 xchg32_result = 0;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(xchg, int a)
SEC("raw_tp/sys_enter")
int xchg(const void *ctx)
{
if (pid != (bpf_get_current_pid_tgid() >> 32))
return 0;

View File

@ -1,6 +1,9 @@
/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
#include <iostream>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#include <bpf/libbpf.h>
#pragma GCC diagnostic pop
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include "test_core_extern.skel.h"

View File

@ -1,6 +1,6 @@
#define __INVALID_ATOMIC_ACCESS_TEST(op) \
#define __INVALID_ATOMIC_ACCESS_TEST(op) \
{ \
"atomic " #op " access through non-pointer ", \
"atomic " #op " access through non-pointer ", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_MOV64_IMM(BPF_REG_1, 0), \
@ -9,7 +9,7 @@
BPF_EXIT_INSN(), \
}, \
.result = REJECT, \
.errstr = "R1 invalid mem access 'inv'" \
.errstr = "R1 invalid mem access 'scalar'" \
}
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),

View File

@ -508,7 +508,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT
},
@ -530,7 +530,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, -1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT
},

View File

@ -96,6 +96,25 @@
{ "bpf_kfunc_call_test_mem_len_fail1", 2 },
},
},
{
"calls: trigger reg2btf_ids[reg->type] for reg->type > __BPF_REG_TYPE_MAX",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "arg#0 pointer type STRUCT prog_test_ref_kfunc must point",
.fixup_kfunc_btf_id = {
{ "bpf_kfunc_call_test_acquire", 3 },
{ "bpf_kfunc_call_test_release", 5 },
},
},
{
"calls: basic sanity",
.insns = {
@ -169,7 +188,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.errstr = "R0 invalid mem access 'scalar'",
},
{
"calls: multiple ret types in subprog 2",
@ -472,7 +491,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R6 invalid mem access 'inv'",
.errstr = "R6 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_XDP,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@ -1678,7 +1697,7 @@
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.fixup_map_hash_8b = { 12, 22 },
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.errstr = "R0 invalid mem access 'scalar'",
},
{
"calls: pkt_ptr spill into caller stack",

View File

@ -127,7 +127,7 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG,
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
.errstr = "R1 type=scalar expected=ctx",
},
{
"pass ctx or null check, 4: ctx - const",
@ -193,5 +193,5 @@
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
.expected_attach_type = BPF_CGROUP_INET4_POST_BIND,
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
.errstr = "R1 type=scalar expected=ctx",
},

View File

@ -339,7 +339,7 @@
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R2 invalid mem access 'inv'",
.errstr = "R2 invalid mem access 'scalar'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,

View File

@ -350,7 +350,7 @@
BPF_EMIT_CALL(BPF_FUNC_csum_diff),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=inv expected=fp",
.errstr = "R1 type=scalar expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
@ -471,7 +471,7 @@
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=inv expected=fp",
.errstr = "R1 type=scalar expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
@ -484,7 +484,7 @@
BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel),
BPF_EXIT_INSN(),
},
.errstr = "R1 type=inv expected=fp",
.errstr = "R1 type=scalar expected=fp",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},

View File

@ -286,7 +286,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -356,7 +356,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -426,7 +426,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -496,7 +496,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -566,7 +566,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -636,7 +636,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -706,7 +706,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
@ -776,7 +776,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,

View File

@ -27,7 +27,7 @@
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),
@ -87,7 +87,7 @@
BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=scalar(umin=1, umax=8) */
BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
BPF_MOV64_IMM(BPF_REG_3, 0),

View File

@ -132,7 +132,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.errstr = "R0 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@ -162,7 +162,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R3 invalid mem access 'inv'",
.errstr = "R3 invalid mem access 'scalar'",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},

View File

@ -162,7 +162,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "type=inv expected=sock",
.errstr = "type=scalar expected=sock",
.result = REJECT,
},
{
@ -178,7 +178,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "type=inv expected=sock",
.errstr = "type=scalar expected=sock",
.result = REJECT,
},
{
@ -274,7 +274,7 @@
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.errstr = "type=inv expected=sock",
.errstr = "type=scalar expected=sock",
.result = REJECT,
},
{

View File

@ -104,7 +104,7 @@
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 3 },
.errstr = "R6 invalid mem access 'inv'",
.errstr = "R6 invalid mem access 'scalar'",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},

View File

@ -502,7 +502,7 @@
.fixup_sk_storage_map = { 11 },
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "R3 type=inv expected=fp",
.errstr = "R3 type=scalar expected=fp",
},
{
"sk_storage_get(map, skb->sk, &stack_value, 1): stack_value",

View File

@ -102,7 +102,7 @@
BPF_EXIT_INSN(),
},
.errstr_unpriv = "attempt to corrupt spilled",
.errstr = "R0 invalid mem access 'inv",
.errstr = "R0 invalid mem access 'scalar'",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
@ -147,11 +147,11 @@
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv20 */
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=inv20 */
/* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=inv20 */
/* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@ -190,11 +190,11 @@
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@ -222,11 +222,11 @@
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -8),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@ -250,11 +250,11 @@
BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_10, -6),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=65535 */
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv,umax=65535 */
/* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=inv20 */
/* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@ -280,11 +280,11 @@
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -4),
/* r0 = r2 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=inv,umax=U32_MAX */
/* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_4),
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
/* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4=inv */
/* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
@ -305,13 +305,13 @@
BPF_JMP_IMM(BPF_JLE, BPF_REG_4, 40, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
/* *(u32 *)(r10 -8) = r4 R4=inv,umax=40 */
/* *(u32 *)(r10 -8) = r4 R4=umax=40 */
BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_4, -8),
/* r4 = (*u32 *)(r10 - 8) */
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_10, -8),
/* r2 += r4 R2=pkt R4=inv,umax=40 */
/* r2 += r4 R2=pkt R4=umax=40 */
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_4),
/* r0 = r2 R2=pkt,umax=40 R4=inv,umax=40 */
/* r0 = r2 R2=pkt,umax=40 R4=umax=40 */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
/* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 20),

View File

@ -214,7 +214,7 @@
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R1 type=inv expected=ctx",
.errstr = "R1 type=scalar expected=ctx",
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
@ -420,7 +420,7 @@
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R7 invalid mem access 'inv'",
.errstr_unpriv = "R7 invalid mem access 'scalar'",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 0,

View File

@ -64,7 +64,7 @@
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.errstr = "invalid mem access 'scalar'",
.result = REJECT,
.result_unpriv = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
@ -89,7 +89,7 @@
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "leaking pointer from stack off -8",
.errstr = "R0 invalid mem access 'inv'",
.errstr = "R0 invalid mem access 'scalar'",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},

View File

@ -397,7 +397,7 @@
.fixup_map_array_48b = { 1 },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R0 invalid mem access 'inv'",
.errstr_unpriv = "R0 invalid mem access 'scalar'",
.retval = 0,
},
{
@ -1074,7 +1074,7 @@
},
.fixup_map_array_48b = { 3 },
.result = REJECT,
.errstr = "R0 invalid mem access 'inv'",
.errstr = "R0 invalid mem access 'scalar'",
.errstr_unpriv = "R0 pointer -= pointer prohibited",
},
{

View File

@ -131,7 +131,7 @@
* write might have overwritten the spilled pointer (i.e. we lose track
* of the spilled register when we analyze the write).
*/
.errstr = "R2 invalid mem access 'inv'",
.errstr = "R2 invalid mem access 'scalar'",
.result = REJECT,
},
{