There are currently no tests for ALU64 shift operations when the shift
amount is 0. This adds 6 new tests to make sure they are equivalent
to a no-op. The x32 JIT had such bugs that could have been caught by
these tests.

Cc: Xi Wang <xi.w...@gmail.com>
Signed-off-by: Luke Nelson <luke.r.n...@gmail.com>
---
 .../selftests/bpf/verifier/basic_instr.c      | 85 +++++++++++++++++++
 1 file changed, 85 insertions(+)

diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c 
b/tools/testing/selftests/bpf/verifier/basic_instr.c
index ed91a7b9a456..071dbc889e8c 100644
--- a/tools/testing/selftests/bpf/verifier/basic_instr.c
+++ b/tools/testing/selftests/bpf/verifier/basic_instr.c
@@ -90,6 +90,91 @@
        },
        .result = ACCEPT,
 },
+{
+       "lsh64 by 0 imm",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 1),
+       BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 0),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "rsh64 by 0 imm",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 0),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "arsh64 by 0 imm",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 0),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "lsh64 by 0 reg",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 1),
+       BPF_LD_IMM64(BPF_REG_2, 0),
+       BPF_ALU64_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
+       BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 1, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "rsh64 by 0 reg",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_LD_IMM64(BPF_REG_3, 0),
+       BPF_ALU64_REG(BPF_RSH, BPF_REG_1, BPF_REG_3),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
+{
+       "arsh64 by 0 reg",
+       .insns = {
+       BPF_LD_IMM64(BPF_REG_0, 1),
+       BPF_LD_IMM64(BPF_REG_1, 0x100000000LL),
+       BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_1),
+       BPF_LD_IMM64(BPF_REG_3, 0),
+       BPF_ALU64_REG(BPF_ARSH, BPF_REG_1, BPF_REG_3),
+       BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
+       BPF_MOV64_IMM(BPF_REG_0, 2),
+       BPF_EXIT_INSN(),
+       },
+       .result = ACCEPT,
+       .retval = 1,
+},
 {
        "invalid 64-bit BPF_END",
        .insns = {
-- 
2.20.1

Reply via email to