mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
91c960b005
A subsequent patch will add additional atomic operations. These new operations will use the same opcode field as the existing XADD, with the immediate discriminating different operations. In preparation, rename the instruction mode BPF_ATOMIC and start calling the zero immediate BPF_ADD. This is possible (doesn't break existing valid BPF progs) because the immediate field is currently reserved MBZ and BPF_ADD is zero. All uses are removed from the tree but the BPF_XADD definition is kept around to avoid breaking builds for people including kernel headers. Signed-off-by: Brendan Jackman <jackmanb@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Björn Töpel <bjorn.topel@gmail.com> Link: https://lore.kernel.org/bpf/20210114181751.768687-5-jackmanb@google.com
67 lines
1.9 KiB
C
67 lines
1.9 KiB
C
{
|
|
"leak pointer into ctx 1",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_2,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_hash_8b = { 2 },
|
|
.errstr_unpriv = "R2 leaks addr into mem",
|
|
.result_unpriv = REJECT,
|
|
.result = REJECT,
|
|
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
|
},
|
|
{
|
|
"leak pointer into ctx 2",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_1, BPF_REG_10,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.errstr_unpriv = "R10 leaks addr into mem",
|
|
.result_unpriv = REJECT,
|
|
.result = REJECT,
|
|
.errstr = "BPF_ATOMIC stores into R1 ctx is not allowed",
|
|
},
|
|
{
|
|
"leak pointer into ctx 3",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_LD_MAP_FD(BPF_REG_2, 0),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_2,
|
|
offsetof(struct __sk_buff, cb[0])),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_hash_8b = { 1 },
|
|
.errstr_unpriv = "R2 leaks addr into ctx",
|
|
.result_unpriv = REJECT,
|
|
.result = ACCEPT,
|
|
},
|
|
{
|
|
"leak pointer into map val",
|
|
.insns = {
|
|
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
|
|
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
|
|
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
|
|
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
|
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
|
|
BPF_MOV64_IMM(BPF_REG_3, 0),
|
|
BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
|
|
BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_6, 0),
|
|
BPF_MOV64_IMM(BPF_REG_0, 0),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.fixup_map_hash_8b = { 4 },
|
|
.errstr_unpriv = "R6 leaks addr into mem",
|
|
.result_unpriv = REJECT,
|
|
.result = ACCEPT,
|
|
},
|