When we shift by one, we can use a different encoding where imm
is not explicitly needed, which saves 1 byte per such op.

Signed-off-by: Daniel Borkmann <dan...@iogearbox.net>
---
 arch/x86/net/bpf_jit_comp.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4923d92..4bc36bd 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -640,7 +640,11 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, 
u8 *image,
                        case BPF_RSH: b3 = 0xE8; break;
                        case BPF_ARSH: b3 = 0xF8; break;
                        }
-                       EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
+
+                       if (imm32 == 1)
+                               EMIT2(0xD1, add_1reg(b3, dst_reg));
+                       else
+                               EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
                        break;
 
                case BPF_ALU | BPF_LSH | BPF_X:
-- 
2.9.5

Reply via email to