selftests/bpf: Some more atomic tests

Some new verifier tests that hit some important gaps in the parameter
space for atomic ops.

There are already exhaustive tests for the JIT part in
lib/test_bpf.c, but these exercise the verifier too.

Signed-off-by: Brendan Jackman <jackmanb@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20211015093318.1273686-1-jackmanb@google.com
This commit is contained in:
Brendan Jackman 2021-10-15 09:33:18 +00:00 committed by Alexei Starovoitov
parent 99aaebfc28
commit 7960d02ddd
3 changed files with 120 additions and 0 deletions

View file

@ -119,3 +119,41 @@
},
.result = ACCEPT,
},
{
"Dest pointer in r0 - fail",
.insns = {
/* val = 0; */
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* r0 = atomic_cmpxchg(&val, r0, 1); */
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* if (r0 != 0) exit(1); */
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"Dest pointer in r0 - succeed",
.insns = {
/* r0 = &val */
BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
/* val = r0; */
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
/* r0 = atomic_cmpxchg(&val, r0, 0); */
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_ATOMIC_OP(BPF_DW, BPF_CMPXCHG, BPF_REG_10, BPF_REG_1, -8),
/* r1 = *r0 */
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
/* exit(0); */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},

View file

@ -0,0 +1,57 @@
#define __ATOMIC_FETCH_OP_TEST(src_reg, dst_reg, operand1, op, operand2, expect) \
{ \
"atomic fetch " #op ", src=" #dst_reg " dst=" #dst_reg, \
.insns = { \
/* u64 val = operan1; */ \
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, operand1), \
/* u64 old = atomic_fetch_add(&val, operand2); */ \
BPF_MOV64_REG(dst_reg, BPF_REG_10), \
BPF_MOV64_IMM(src_reg, operand2), \
BPF_ATOMIC_OP(BPF_DW, op, \
dst_reg, src_reg, -8), \
/* if (old != operand1) exit(1); */ \
BPF_JMP_IMM(BPF_JEQ, src_reg, operand1, 2), \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_EXIT_INSN(), \
/* if (val != result) exit (2); */ \
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_10, -8), \
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, expect, 2), \
BPF_MOV64_IMM(BPF_REG_0, 2), \
BPF_EXIT_INSN(), \
/* exit(0); */ \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(), \
}, \
.result = ACCEPT, \
}
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 1, BPF_ADD | BPF_FETCH, 2, 3),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_AND | BPF_FETCH, 0x011, 0x010),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_OR | BPF_FETCH, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XOR | BPF_FETCH, 0x011, 0x001),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_2, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_0, BPF_REG_1, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_1, BPF_REG_0, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_2, BPF_REG_3, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_4, BPF_REG_5, 0x010, BPF_XCHG, 0x011, 0x011),
__ATOMIC_FETCH_OP_TEST(BPF_REG_9, BPF_REG_8, 0x010, BPF_XCHG, 0x011, 0x011),
#undef __ATOMIC_FETCH_OP_TEST

View file

@ -0,0 +1,25 @@
#define __INVALID_ATOMIC_ACCESS_TEST(op) \
{ \
"atomic " #op " access through non-pointer ", \
.insns = { \
BPF_MOV64_IMM(BPF_REG_0, 1), \
BPF_MOV64_IMM(BPF_REG_1, 0), \
BPF_ATOMIC_OP(BPF_DW, op, BPF_REG_1, BPF_REG_0, -8), \
BPF_MOV64_IMM(BPF_REG_0, 0), \
BPF_EXIT_INSN(), \
}, \
.result = REJECT, \
.errstr = "R1 invalid mem access 'inv'" \
}
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD),
__INVALID_ATOMIC_ACCESS_TEST(BPF_ADD | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND),
__INVALID_ATOMIC_ACCESS_TEST(BPF_AND | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR),
__INVALID_ATOMIC_ACCESS_TEST(BPF_OR | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XOR | BPF_FETCH),
__INVALID_ATOMIC_ACCESS_TEST(BPF_XCHG),
__INVALID_ATOMIC_ACCESS_TEST(BPF_CMPXCHG),