mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
3123d8018d
Expand dummy prog generation such that we can easily check on return codes and add few more test cases to make sure we keep on tracking pruning behavior. # ./test_verifier [...] #1066/p XDP pkt read, pkt_data <= pkt_meta', bad access 1 OK #1067/p XDP pkt read, pkt_data <= pkt_meta', bad access 2 OK Summary: 1580 PASSED, 0 SKIPPED, 0 FAILED Also verified that JIT dump of added test cases looks good. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/df7200b6021444fd369376d227de917357285b65.1576789878.git.daniel@iogearbox.net
231 lines
6.1 KiB
C
231 lines
6.1 KiB
C
{
|
|
"runtime/jit: tail_call within bounds, prog once",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 1 },
|
|
.result = ACCEPT,
|
|
.retval = 42,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, prog loop",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_3, 1),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 1 },
|
|
.result = ACCEPT,
|
|
.retval = 41,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, no prog",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_3, 3),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 1 },
|
|
.result = ACCEPT,
|
|
.retval = 1,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, key 2",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 1 },
|
|
.result = ACCEPT,
|
|
.retval = 24,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, key 2 / key 2, first branch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 13),
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 5, 9 },
|
|
.result = ACCEPT,
|
|
.retval = 24,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, key 2 / key 2, second branch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 14),
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 5, 9 },
|
|
.result = ACCEPT,
|
|
.retval = 24,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, key 0 / key 2, first branch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 13),
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 5, 9 },
|
|
.result = ACCEPT,
|
|
.retval = 24,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, key 0 / key 2, second branch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 14),
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 2),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 5, 9 },
|
|
.result = ACCEPT,
|
|
.retval = 42,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, different maps, first branch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 13),
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 5 },
|
|
.fixup_prog2 = { 9 },
|
|
.result_unpriv = REJECT,
|
|
.errstr_unpriv = "tail_call abusing map_ptr",
|
|
.result = ACCEPT,
|
|
.retval = 1,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call within bounds, different maps, second branch",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 14),
|
|
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 13, 4),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_JMP_IMM(BPF_JA, 0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 5 },
|
|
.fixup_prog2 = { 9 },
|
|
.result_unpriv = REJECT,
|
|
.errstr_unpriv = "tail_call abusing map_ptr",
|
|
.result = ACCEPT,
|
|
.retval = 42,
|
|
},
|
|
{
|
|
"runtime/jit: tail_call out of bounds",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_3, 256),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 1 },
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|
|
{
|
|
"runtime/jit: pass negative index to tail_call",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_3, -1),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 1 },
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|
|
{
|
|
"runtime/jit: pass > 32bit index to tail_call",
|
|
.insns = {
|
|
BPF_LD_IMM64(BPF_REG_3, 0x100000000ULL),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_tail_call),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_prog1 = { 2 },
|
|
.result = ACCEPT,
|
|
.retval = 42,
|
|
/* Verifier rewrite for unpriv skips tail call here. */
|
|
.retval_unpriv = 2,
|
|
},
|