bpf/tests: add tests for cpuv4 instructions

The BPF JITs now support cpuv4 instructions. Add tests for these new
instructions to the test suite:

1. Sign extended Load
2. Sign extended Mov
3. Unconditional byte swap
4. Unconditional jump with 32-bit offset
5. Signed division and modulo

Signed-off-by: Puranjay Mohan <puranjay12@gmail.com>
Link: https://lore.kernel.org/r/20230907230550.1417590-9-puranjay12@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Puranjay Mohan 2023-09-07 23:05:49 +00:00 committed by Alexei Starovoitov
parent 59ff6d63b7
commit daabb2b098
2 changed files with 417 additions and 4 deletions

View File

@ -117,21 +117,25 @@ struct ctl_table_header;
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
#define BPF_ALU64_IMM(OP, DST, IMM) \
#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.off = OFF, \
.imm = IMM })
#define BPF_ALU64_IMM(OP, DST, IMM) \
BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
#define BPF_ALU32_IMM(OP, DST, IMM) \
#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.off = OFF, \
.imm = IMM })
#define BPF_ALU32_IMM(OP, DST, IMM) \
BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@ -143,6 +147,16 @@ struct ctl_table_header;
.off = 0, \
.imm = LEN })
/* Byte Swap, bswap16/32/64 */
#define BPF_BSWAP(DST, LEN) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
.dst_reg = DST, \
.src_reg = 0, \
.off = 0, \
.imm = LEN })
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@ -179,6 +193,24 @@ struct ctl_table_header;
.off = 0, \
.imm = IMM })
/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
#define BPF_MOVSX64_REG(DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_MOV | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })
#define BPF_MOVSX32_REG(DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_MOV | BPF_X, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })
/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST) \
((struct bpf_insn) { \
@ -263,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
((struct bpf_insn) { \
.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
.dst_reg = DST, \
.src_reg = SRC, \
.off = OFF, \
.imm = 0 })
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \

View File

@ -5111,6 +5111,104 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0xffffffff } }
},
/* MOVSX32 */
{
"ALU_MOVSX | BPF_B",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x00000000ffffffefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX32_REG(R1, R3, 8),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_MOVSX | BPF_H",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x00000000ffffbeefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX32_REG(R1, R3, 16),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU_MOVSX | BPF_W",
.u.insns_int = {
BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX32_REG(R1, R3, 32),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
/* MOVSX64 REG */
{
"ALU64_MOVSX | BPF_B",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffffffffefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX64_REG(R1, R3, 8),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_MOVSX | BPF_H",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffffffbeefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX64_REG(R1, R3, 16),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
{
"ALU64_MOVSX | BPF_W",
.u.insns_int = {
BPF_LD_IMM64(R2, 0xffffffffdeadbeefLL),
BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
BPF_MOVSX64_REG(R1, R3, 32),
BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
BPF_MOV32_IMM(R0, 2),
BPF_EXIT_INSN(),
BPF_MOV32_IMM(R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1 } },
},
/* BPF_ALU | BPF_ADD | BPF_X */
{
"ALU_ADD_X: 1 + 2 = 3",
@ -6105,6 +6203,106 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 2 } },
},
/* BPF_ALU | BPF_DIV | BPF_X off=1 (SDIV) */
{
"ALU_SDIV_X: -6 / 2 = -3",
.u.insns_int = {
BPF_LD_IMM64(R0, -6),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG_OFF(BPF_DIV, R0, R1, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -3 } },
},
/* BPF_ALU | BPF_DIV | BPF_K off=1 (SDIV) */
{
"ALU_SDIV_K: -6 / 2 = -3",
.u.insns_int = {
BPF_LD_IMM64(R0, -6),
BPF_ALU32_IMM_OFF(BPF_DIV, R0, 2, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -3 } },
},
/* BPF_ALU64 | BPF_DIV | BPF_X off=1 (SDIV64) */
{
"ALU64_SDIV_X: -6 / 2 = -3",
.u.insns_int = {
BPF_LD_IMM64(R0, -6),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG_OFF(BPF_DIV, R0, R1, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -3 } },
},
/* BPF_ALU64 | BPF_DIV | BPF_K off=1 (SDIV64) */
{
"ALU64_SDIV_K: -6 / 2 = -3",
.u.insns_int = {
BPF_LD_IMM64(R0, -6),
BPF_ALU64_IMM_OFF(BPF_DIV, R0, 2, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -3 } },
},
/* BPF_ALU | BPF_MOD | BPF_X off=1 (SMOD) */
{
"ALU_SMOD_X: -7 % 2 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, -7),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU32_REG_OFF(BPF_MOD, R0, R1, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
/* BPF_ALU | BPF_MOD | BPF_K off=1 (SMOD) */
{
"ALU_SMOD_K: -7 % 2 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, -7),
BPF_ALU32_IMM_OFF(BPF_MOD, R0, 2, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
/* BPF_ALU64 | BPF_MOD | BPF_X off=1 (SMOD64) */
{
"ALU64_SMOD_X: -7 % 2 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, -7),
BPF_ALU32_IMM(BPF_MOV, R1, 2),
BPF_ALU64_REG_OFF(BPF_MOD, R0, R1, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
/* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
{
"ALU64_SMOD_X: -7 % 2 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, -7),
BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, -1 } },
},
/* BPF_ALU | BPF_AND | BPF_X */
{
"ALU_AND_X: 3 & 2 = 2",
@ -7837,6 +8035,104 @@ static struct bpf_test tests[] = {
{ },
{ { 0, (u32) (cpu_to_le64(0xfedcba9876543210ULL) >> 32) } },
},
/* BSWAP */
{
"BSWAP 16: 0x0123456789abcdef -> 0xefcd",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_BSWAP(R0, 16),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xefcd } },
},
{
"BSWAP 32: 0x0123456789abcdef -> 0xefcdab89",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_BSWAP(R0, 32),
BPF_ALU64_REG(BPF_MOV, R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xefcdab89 } },
},
{
"BSWAP 64: 0x0123456789abcdef -> 0x67452301",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_BSWAP(R0, 64),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x67452301 } },
},
{
"BSWAP 64: 0x0123456789abcdef >> 32 -> 0xefcdab89",
.u.insns_int = {
BPF_LD_IMM64(R0, 0x0123456789abcdefLL),
BPF_BSWAP(R0, 64),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0xefcdab89 } },
},
/* BSWAP, reversed */
{
"BSWAP 16: 0xfedcba9876543210 -> 0x1032",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_BSWAP(R0, 16),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x1032 } },
},
{
"BSWAP 32: 0xfedcba9876543210 -> 0x10325476",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_BSWAP(R0, 32),
BPF_ALU64_REG(BPF_MOV, R1, R0),
BPF_ALU64_IMM(BPF_RSH, R1, 32),
BPF_ALU32_REG(BPF_ADD, R0, R1), /* R1 = 0 */
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x10325476 } },
},
{
"BSWAP 64: 0xfedcba9876543210 -> 0x98badcfe",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_BSWAP(R0, 64),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x98badcfe } },
},
{
"BSWAP 64: 0xfedcba9876543210 >> 32 -> 0x10325476",
.u.insns_int = {
BPF_LD_IMM64(R0, 0xfedcba9876543210ULL),
BPF_BSWAP(R0, 64),
BPF_ALU64_IMM(BPF_RSH, R0, 32),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0x10325476 } },
},
/* BPF_LDX_MEM B/H/W/DW */
{
"BPF_LDX_MEM | BPF_B, base",
@ -8228,6 +8524,67 @@ static struct bpf_test tests[] = {
{ { 32, 0 } },
.stack_depth = 0,
},
/* BPF_LDX_MEMSX B/H/W */
{
"BPF_LDX_MEMSX | BPF_B",
.u.insns_int = {
BPF_LD_IMM64(R1, 0xdead0000000000f0ULL),
BPF_LD_IMM64(R2, 0xfffffffffffffff0ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEMSX(BPF_B, R0, R10, -1),
#else
BPF_LDX_MEMSX(BPF_B, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEMSX | BPF_H",
.u.insns_int = {
BPF_LD_IMM64(R1, 0xdead00000000f123ULL),
BPF_LD_IMM64(R2, 0xfffffffffffff123ULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEMSX(BPF_H, R0, R10, -2),
#else
BPF_LDX_MEMSX(BPF_H, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
{
"BPF_LDX_MEMSX | BPF_W",
.u.insns_int = {
BPF_LD_IMM64(R1, 0x00000000deadbeefULL),
BPF_LD_IMM64(R2, 0xffffffffdeadbeefULL),
BPF_STX_MEM(BPF_DW, R10, R1, -8),
#ifdef __BIG_ENDIAN
BPF_LDX_MEMSX(BPF_W, R0, R10, -4),
#else
BPF_LDX_MEMSX(BPF_W, R0, R10, -8),
#endif
BPF_JMP_REG(BPF_JNE, R0, R2, 1),
BPF_ALU64_IMM(BPF_MOV, R0, 0),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 0 } },
.stack_depth = 8,
},
/* BPF_STX_MEM B/H/W/DW */
{
"BPF_STX_MEM | BPF_B",
@ -9474,6 +9831,20 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 1 } },
},
/* BPF_JMP32 | BPF_JA */
{
"JMP32_JA: Unconditional jump: if (true) return 1",
.u.insns_int = {
BPF_ALU32_IMM(BPF_MOV, R0, 0),
BPF_JMP32_IMM(BPF_JA, 0, 1, 0),
BPF_EXIT_INSN(),
BPF_ALU32_IMM(BPF_MOV, R0, 1),
BPF_EXIT_INSN(),
},
INTERNAL,
{ },
{ { 0, 1 } },
},
/* BPF_JMP | BPF_JSLT | BPF_K */
{
"JMP_JSLT_K: Signed jump: if (-2 < -1) return 1",