Add the emit_kasan_check() function that emits KASAN shadow memory
checks before memory accesses in JIT-compiled BPF programs. The
implementation relies on the existing __asan_{load,store}X functions
from KASAN subsystem. The helper:
- ensures that the kasan instrumention is actually needed: if the
  instruction being processed accesses the program stack, we skip the
  instrumentation, as those accesses are already protected with page
  guards
- saves registers. This includes caller-saved registers, but also
  temporary registers, as those were possibly used by the
  affected program
- computes the accessed address and stores it in %rdi
- calls the relevant function, depending on the instruction being a load
  or a store, and the size of the access.
- restores registeres

The special care needed when inserting this instrumentation comes at the
cost of a non negligeable increase in JITed code size. For example, a
bare

  mov   0x0(%si),rbx # Load in rbx content at address stored in rsi

becomes

  push    %rax
  push    %rcx
  push    %rdx
  push    %rsi
  push    %rdi
  push    %r8
  push    %r9
  push    %r10
  push    %r11
  sub     $0x8,%rsp
  mov     %rsi,%rdi
  call    0xffffffff81da0a60 <__asan_load8>
  add     $0x8,%rsp
  pop     %r11
  pop     %r10
  pop     %r9
  pop     %r8
  pop     %rdi
  pop     %rsi
  pop     %rdx
  pop     %rcx
  pop     %rax
  mov     0x0(%rsi),rbx

Signed-off-by: Alexis LothorĂ© (eBPF Foundation) <[email protected]>
---
 arch/x86/net/bpf_jit_comp.c | 93 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 93 insertions(+)

diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index ea9e707e8abf..b90103bd0080 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -20,6 +20,10 @@
 #include <asm/unwind.h>
 #include <asm/cfi.h>
 
+#ifdef CONFIG_BPF_JIT_KASAN
+#include <linux/kasan.h>
+#endif
+
 static bool all_callee_regs_used[4] = {true, true, true, true};
 
 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
@@ -1301,6 +1305,95 @@ static void emit_store_stack_imm64(u8 **pprog, int reg, 
int stack_off, u64 imm64
        emit_stx(pprog, BPF_DW, BPF_REG_FP, reg, stack_off);
 }
 
+static int emit_kasan_check(u8 **pprog, u32 addr_reg, struct bpf_insn *insn,
+                           u8 *ip, bool accesses_stack)
+{
+#ifdef CONFIG_BPF_JIT_KASAN
+       bool is_write = BPF_CLASS(insn->code) == BPF_STX;
+       u32 bpf_size = BPF_SIZE(insn->code);
+       s32 off = insn->off;
+       u8 *prog = *pprog;
+       void *kasan_func;
+
+       if (accesses_stack)
+               return 0;
+
+       /* Derive KASAN check function from access type and size */
+       switch (bpf_size) {
+       case BPF_B:
+               kasan_func = is_write ? __asan_store1 : __asan_load1;
+               break;
+       case BPF_H:
+               kasan_func = is_write ? __asan_store2 : __asan_load2;
+               break;
+       case BPF_W:
+               kasan_func = is_write ? __asan_store4 : __asan_load4;
+               break;
+       case BPF_DW:
+               kasan_func = is_write ? __asan_store8 : __asan_load8;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Save rax */
+       EMIT1(0x50);
+       /* Save rcx */
+       EMIT1(0x51);
+       /* Save rdx */
+       EMIT1(0x52);
+       /* Save rsi */
+       EMIT1(0x56);
+       /* Save rdi */
+       EMIT1(0x57);
+       /* Save r8 */
+       EMIT2(0x41, 0x50);
+       /* Save r9 */
+       EMIT2(0x41, 0x51);
+       /* Save r10 */
+       EMIT2(0x41, 0x52);
+       /* Save r11 */
+       EMIT2(0x41, 0x53);
+       /* We have pushed 72 bytes, realign stack to 16 bytes: sub rsp, 8 */
+       EMIT4(0x48, 0x83, 0xEC, 8);
+
+       /* mov rdi, addr_reg */
+       EMIT_mov(BPF_REG_1, addr_reg);
+
+       /* add rdi, off (if offset is non-zero) */
+       if (off) {
+               if (is_imm8(off)) {
+                       /* add rdi, imm8 */
+                       EMIT4(0x48, 0x83, 0xC7, (u8)off);
+               } else {
+                       /* add rdi, imm32 */
+                       EMIT3_off32(0x48, 0x81, 0xC7, off);
+               }
+       }
+
+       /* Adjust ip to account for the instrumentation generated so far */
+       ip += (prog - *pprog);
+       /* call kasan_func */
+       if (emit_call(&prog, kasan_func, ip))
+               return -ERANGE;
+
+       /* Restore registers */
+       EMIT4(0x48, 0x83, 0xC4, 8);
+       EMIT2(0x41, 0x5B);
+       EMIT2(0x41, 0x5A);
+       EMIT2(0x41, 0x59);
+       EMIT2(0x41, 0x58);
+       EMIT1(0x5F);
+       EMIT1(0x5E);
+       EMIT1(0x5A);
+       EMIT1(0x59);
+       EMIT1(0x58);
+
+       *pprog = prog;
+#endif /* CONFIG_BPF_JIT_KASAN */
+       return 0;
+}
+
 static int emit_atomic_rmw(u8 **pprog, u32 atomic_op,
                           u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
 {

-- 
2.53.0


Reply via email to