mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
bdc4e36945
Add a bpf_iter test which feeds bpf_get_task_stack's return value into seq_write after confirming it's positive. No attempt to bound the value from above is made. Load will fail if verifier does not refine retval range based on buf sz input to bpf_get_task_stack. Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Link: https://lore.kernel.org/bpf/20210416204704.2816874-3-davemarchevsky@fb.com
87 lines
2.9 KiB
C
87 lines
2.9 KiB
C
{
|
|
"bpf_get_stack return R0 within range",
|
|
.insns = {
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
|
|
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
|
|
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
|
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
|
|
BPF_MOV64_IMM(BPF_REG_4, 256),
|
|
BPF_EMIT_CALL(BPF_FUNC_get_stack),
|
|
BPF_MOV64_IMM(BPF_REG_1, 0),
|
|
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
|
|
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
|
|
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
|
|
BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16),
|
|
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
|
|
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
|
|
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
|
|
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
|
|
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
|
|
BPF_MOV64_IMM(BPF_REG_4, 0),
|
|
BPF_EMIT_CALL(BPF_FUNC_get_stack),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_hash_48b = { 4 },
|
|
.result = ACCEPT,
|
|
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
|
|
},
|
|
{
|
|
"bpf_get_task_stack return R0 range is refined",
|
|
.insns = {
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
|
|
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
|
|
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
|
|
BPF_MOV64_IMM(BPF_REG_3, 48),
|
|
BPF_MOV64_IMM(BPF_REG_4, 0),
|
|
BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
|
|
BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
|
|
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
|
|
BPF_EMIT_CALL(BPF_FUNC_seq_write),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.prog_type = BPF_PROG_TYPE_TRACING,
|
|
.expected_attach_type = BPF_TRACE_ITER,
|
|
.kfunc = "task",
|
|
.runs = -1, // Don't run, just load
|
|
.fixup_map_array_48b = { 3 },
|
|
},
|