Merge branch 'bpf: Improve verifier for cond_op and spilled loop index variables'

Yonghong Song says:

====================

LLVM commit [1] introduced hoistMinMax optimization like
  (i < VIRTIO_MAX_SGS) && (i < out_sgs)
to
  upper = MIN(VIRTIO_MAX_SGS, out_sgs)
  ... i < upper ...
and caused the verification failure. Commit [2] workarounded the issue by
adding some bpf assembly code to prohibit the above optimization.
This patch improved verifier such that verification can succeed without
the above workaround.

Without [2], the current verifier will hit the following failures:
  ...
  119: (15) if r1 == 0x0 goto pc+1
  The sequence of 8193 jumps is too complex.
  verification time 525829 usec
  stack depth 64
  processed 156616 insns (limit 1000000) max_states_per_insn 8 total_states 1754 peak_states 1712 mark_read 12
  -- END PROG LOAD LOG --
  libbpf: prog 'trace_virtqueue_add_sgs': failed to load: -14
  libbpf: failed to load object 'loop6.bpf.o'
  ...
The failure is due to verifier inadequately handling '<const> <cond_op> <non_const>' which will
go through both pathes and generate the following verificaiton states:
  ...
  89: (07) r2 += 1                      ; R2_w=5
  90: (79) r8 = *(u64 *)(r10 -48)       ; R8_w=scalar() R10=fp0
  91: (79) r1 = *(u64 *)(r10 -56)       ; R1_w=scalar(umax=5,var_off=(0x0; 0x7)) R10=fp0
  92: (ad) if r2 < r1 goto pc+41        ; R0_w=scalar() R1_w=scalar(umin=6,umax=5,var_off=(0x4; 0x3))
      R2_w=5 R6_w=scalar(id=385) R7_w=0 R8_w=scalar() R9_w=scalar(umax=21474836475,var_off=(0x0; 0x7ffffffff))
      R10=fp0 fp-8=mmmmmmmm fp-16=mmmmmmmm fp-24=mmmm???? fp-32= fp-40_w=4 fp-48=mmmmmmmm fp-56= fp-64=mmmmmmmm
  ...
  89: (07) r2 += 1                      ; R2_w=6
  90: (79) r8 = *(u64 *)(r10 -48)       ; R8_w=scalar() R10=fp0
  91: (79) r1 = *(u64 *)(r10 -56)       ; R1_w=scalar(umax=5,var_off=(0x0; 0x7)) R10=fp0
  92: (ad) if r2 < r1 goto pc+41        ; R0_w=scalar() R1_w=scalar(umin=7,umax=5,var_off=(0x4; 0x3))
      R2_w=6 R6=scalar(id=388) R7=0 R8_w=scalar() R9_w=scalar(umax=25769803770,var_off=(0x0; 0x7ffffffff))
      R10=fp0 fp-8=mmmmmmmm fp-16=mmmmmmmm fp-24=mmmm???? fp-32= fp-40=5 fp-48=mmmmmmmm fp-56= fp-64=mmmmmmmm
    ...
  89: (07) r2 += 1                      ; R2_w=4088
  90: (79) r8 = *(u64 *)(r10 -48)       ; R8_w=scalar() R10=fp0
  91: (79) r1 = *(u64 *)(r10 -56)       ; R1_w=scalar(umax=5,var_off=(0x0; 0x7)) R10=fp0
  92: (ad) if r2 < r1 goto pc+41        ; R0=scalar() R1=scalar(umin=4089,umax=5,var_off=(0x0; 0x7))
      R2=4088 R6=scalar(id=12634) R7=0 R8=scalar() R9=scalar(umax=17557826301960,var_off=(0x0; 0xfffffffffff))
      R10=fp0 fp-8=mmmmmmmm fp-16=mmmmmmmm fp-24=mmmm???? fp-32= fp-40=4087 fp-48=mmmmmmmm fp-56= fp-64=mmmmmmmm

Patch 3 fixed the above issue by handling '<const> <cond_op> <non_const>' properly.
During developing selftests for Patch 3, I found some issues with bound deduction with
BPF_EQ/BPF_NE and fixed the issue in Patch 1.

After the above issue is fixed, the second issue shows up.
  ...
  67: (07) r1 += -16                    ; R1_w=fp-16
  ; bpf_probe_read_kernel(&sgp, sizeof(sgp), sgs + i);
  68: (b4) w2 = 8                       ; R2_w=8
  69: (85) call bpf_probe_read_kernel#113       ; R0_w=scalar() fp-16=mmmmmmmm
  ; return sgp;
  70: (79) r6 = *(u64 *)(r10 -16)       ; R6=scalar() R10=fp0
  ; for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
  71: (15) if r6 == 0x0 goto pc-49      ; R6=scalar()
  72: (b4) w1 = 0                       ; R1_w=0
  73: (05) goto pc-46
  ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
  28: (bc) w7 = w1                      ; R1_w=0 R7_w=0
  ; bpf_probe_read_kernel(&len, sizeof(len), &sgp->length);
  ...
  23: (79) r3 = *(u64 *)(r10 -40)       ; R3_w=2 R10=fp0
  ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
  24: (07) r3 += 1                      ; R3_w=3
  ; for (i = 0; (i < VIRTIO_MAX_SGS) && (i < out_sgs); i++) {
  25: (79) r1 = *(u64 *)(r10 -56)       ; R1_w=scalar(umax=5,var_off=(0x0; 0x7)) R10=fp0
  26: (ad) if r3 < r1 goto pc+34 61: R0=scalar() R1_w=scalar(umin=4,umax=5,var_off=(0x4; 0x1)) R3_w=3 R6=scalar(id=1658)
     R7=0 R8=scalar(id=1653) R9=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) R10=fp0 fp-8=mmmmmmmm fp-16=mmmmmmmm
     fp-24=mmmm???? fp-32= fp-40=2 fp-56= fp-64=mmmmmmmm
  ; if (sg_is_chain(&sg))
  61: (7b) *(u64 *)(r10 -40) = r3       ; R3_w=3 R10=fp0 fp-40_w=3
    ...
  67: (07) r1 += -16                    ; R1_w=fp-16
  ; bpf_probe_read_kernel(&sgp, sizeof(sgp), sgs + i);
  68: (b4) w2 = 8                       ; R2_w=8
  69: (85) call bpf_probe_read_kernel#113       ; R0_w=scalar() fp-16=mmmmmmmm
  ; return sgp;
  70: (79) r6 = *(u64 *)(r10 -16)
  ; for (n = 0, sgp = get_sgp(sgs, i); sgp && (n < SG_MAX);
  infinite loop detected at insn 71
  verification time 90800 usec
  stack depth 64
  processed 25017 insns (limit 1000000) max_states_per_insn 20 total_states 491 peak_states 169 mark_read 12
  -- END PROG LOAD LOG --
  libbpf: prog 'trace_virtqueue_add_sgs': failed to load: -22

Further analysis found the index variable 'i' is spilled but since it is not marked as precise.
This is more tricky as identifying induction variable is not easy in verifier. Although a heuristic
is possible, let us leave it for now.

  [1] https://reviews.llvm.org/D143726
  [2] Commit 3c2611bac0 ("selftests/bpf: Fix trace_virtqueue_add_sgs test issue with LLVM 17.")
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Alexei Starovoitov 2023-04-06 15:26:08 -07:00
commit 4daf0b327f
4 changed files with 662 additions and 1 deletions

View file

@ -12651,10 +12651,14 @@ static int is_branch32_taken(struct bpf_reg_state *reg, u32 val, u8 opcode)
case BPF_JEQ:
if (tnum_is_const(subreg))
return !!tnum_equals_const(subreg, val);
else if (val < reg->u32_min_value || val > reg->u32_max_value)
return 0;
break;
case BPF_JNE:
if (tnum_is_const(subreg))
return !tnum_equals_const(subreg, val);
else if (val < reg->u32_min_value || val > reg->u32_max_value)
return 1;
break;
case BPF_JSET:
if ((~subreg.mask & subreg.value) & val)
@ -12724,10 +12728,14 @@ static int is_branch64_taken(struct bpf_reg_state *reg, u64 val, u8 opcode)
case BPF_JEQ:
if (tnum_is_const(reg->var_off))
return !!tnum_equals_const(reg->var_off, val);
else if (val < reg->umin_value || val > reg->umax_value)
return 0;
break;
case BPF_JNE:
if (tnum_is_const(reg->var_off))
return !tnum_equals_const(reg->var_off, val);
else if (val < reg->umin_value || val > reg->umax_value)
return 1;
break;
case BPF_JSET:
if ((~reg->var_off.mask & reg->var_off.value) & val)
@ -13348,6 +13356,18 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
src_reg->var_off.value,
opcode,
is_jmp32);
} else if (dst_reg->type == SCALAR_VALUE &&
is_jmp32 && tnum_is_const(tnum_subreg(dst_reg->var_off))) {
pred = is_branch_taken(src_reg,
tnum_subreg(dst_reg->var_off).value,
flip_opcode(opcode),
is_jmp32);
} else if (dst_reg->type == SCALAR_VALUE &&
!is_jmp32 && tnum_is_const(dst_reg->var_off)) {
pred = is_branch_taken(src_reg,
dst_reg->var_off.value,
flip_opcode(opcode),
is_jmp32);
} else if (reg_is_pkt_pointer_any(dst_reg) &&
reg_is_pkt_pointer_any(src_reg) &&
!is_jmp32) {

View file

@ -7,6 +7,7 @@
#include "verifier_array_access.skel.h"
#include "verifier_basic_stack.skel.h"
#include "verifier_bounds_deduction.skel.h"
#include "verifier_bounds_deduction_non_const.skel.h"
#include "verifier_bounds_mix_sign_unsign.skel.h"
#include "verifier_cfg.skel.h"
#include "verifier_cgroup_inv_retcode.skel.h"
@ -70,6 +71,7 @@ void test_verifier_and(void) { RUN(verifier_and); }
void test_verifier_array_access(void) { RUN(verifier_array_access); }
void test_verifier_basic_stack(void) { RUN(verifier_basic_stack); }
void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); }
void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); }
void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }

View file

@ -0,0 +1,639 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_1(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r0 == r2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> == <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_2(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 > 3 goto l0_%=; \
r2 = 4; \
if r0 == r2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_3(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r0 != r2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <non_const> != <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_4(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 > 3 goto l0_%=; \
r2 = 4; \
if r0 != r2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_5(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 3; \
if w0 == w2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> == <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_6(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 > 4 goto l0_%=; \
w2 = 5; \
if w0 == w2 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_7(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 3 goto l0_%=; \
w2 = 2; \
if w0 != w2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <non_const> != <const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_8(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 > 3 goto l0_%=; \
w2 = 4; \
if w0 != w2 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_9(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
r2 = 0; \
if r2 > r0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> > <non_const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_10(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 4 goto l0_%=; \
r2 = 4; \
if r2 > r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> >= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_11(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 4 goto l0_%=; \
r2 = 3; \
if r2 >= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> < <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_12(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 > 4 goto l0_%=; \
r2 = 4; \
if r2 < r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> <= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_13(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 >= 4 goto l0_%=; \
r2 = 4; \
if r2 <= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> == <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_14(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r2 == r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s> <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_15(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s< 4 goto l0_%=; \
r2 = 4; \
if r2 s> r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s>= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_16(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s< 4 goto l0_%=; \
r2 = 3; \
if r2 s>= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s< <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_17(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s> 4 goto l0_%=; \
r2 = 4; \
if r2 s< r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> s<= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_18(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 s> 4 goto l0_%=; \
r2 = 5; \
if r2 s<= r0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp64, <const> != <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_19(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if r0 < 3 goto l0_%=; \
r2 = 2; \
if r2 != r0 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 1")
__success __retval(0)
__naked void deducing_bounds_from_non_const_20(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
w2 = 0; \
if w2 > w0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> > <non_const>, 2")
__success __retval(0)
__naked void deducing_bounds_from_non_const_21(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 4; \
if w2 > w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> >= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_22(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 3; \
if w2 >= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> < <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_23(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 > 4 goto l0_%=; \
w2 = 4; \
if w2 < w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> <= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_24(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 >= 4 goto l0_%=; \
w2 = 4; \
if w2 <= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> == <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_25(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 4 goto l0_%=; \
w2 = 3; \
if w2 == w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s> <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_26(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s< 4 goto l0_%=; \
w2 = 4; \
if w2 s> w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s>= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_27(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s< 4 goto l0_%=; \
w2 = 3; \
if w2 s>= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s< <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_28(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s> 4 goto l0_%=; \
w2 = 5; \
if w2 s< w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> s<= <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_29(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 s>= 4 goto l0_%=; \
w2 = 4; \
if w2 s<= w0 goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
SEC("socket")
__description("check deducing bounds from non-const, jmp32, <const> != <non_const>")
__success __retval(0)
__naked void deducing_bounds_from_non_const_30(void)
{
asm volatile (" \
call %[bpf_ktime_get_ns]; \
if w0 < 3 goto l0_%=; \
w2 = 2; \
if w2 != w0 goto l0_%=; \
goto l1_%=; \
l0_%=: \
r0 = 0; \
exit; \
l1_%=: \
r0 -= r1; \
exit; \
" :
: __imm(bpf_ktime_get_ns)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";

View file

@ -354,7 +354,7 @@ __naked void signed_and_unsigned_variant_10(void)
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r1 = *(u64*)(r10 - 16); \
r2 = 0; \
r2 = -1; \
if r2 > r1 goto l1_%=; \
r0 = 0; \
exit; \