mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-10-31 16:38:12 +00:00
711aef1bbf
The current method to compare 64-bit numbers for conditional jump is: 1) Compare the high 32-bit first. 2) If the high 32-bit isn't the same, then goto step 4. 3) Compare the low 32-bit. 4) Check the desired condition. This method is right for unsigned comparison, but it is buggy for signed comparison, because it does signed comparison for low 32-bit too. There is only one sign bit in 64-bit number, that is the MSB in the 64-bit number, it is wrong to treat low 32-bit as signed number and do the signed comparison for it. This patch fixes the bug and adds a testcase in selftests/bpf for such bug. Signed-off-by: Wang YanQing <udknight@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
107 lines
2.8 KiB
C
107 lines
2.8 KiB
C
{
|
|
"jit: lsh, rsh, arsh by 1",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_MOV64_IMM(BPF_REG_1, 0xff),
|
|
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
|
|
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
|
|
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
|
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|
|
{
|
|
"jit: mov32 for ldimm64, 1",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
|
|
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
|
|
BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|
|
{
|
|
"jit: mov32 for ldimm64, 2",
|
|
.insns = {
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
|
|
BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|
|
{
|
|
"jit: various mul tests",
|
|
.insns = {
|
|
BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
|
|
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
|
|
BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
|
|
BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
|
|
BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
|
|
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
|
|
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
|
|
BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
|
|
BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
|
|
BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
|
|
BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
|
|
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|
|
{
|
|
"jit: jsgt, jslt",
|
|
.insns = {
|
|
BPF_LD_IMM64(BPF_REG_1, 0x80000000ULL),
|
|
BPF_LD_IMM64(BPF_REG_2, 0x0ULL),
|
|
BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_2, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_JMP_REG(BPF_JSLT, BPF_REG_2, BPF_REG_1, 2),
|
|
BPF_MOV64_IMM(BPF_REG_0, 1),
|
|
BPF_EXIT_INSN(),
|
|
|
|
BPF_MOV64_IMM(BPF_REG_0, 2),
|
|
BPF_EXIT_INSN(),
|
|
},
|
|
.result = ACCEPT,
|
|
.retval = 2,
|
|
},
|