Adding support to optimize uprobe on top of instructions that can
be emulated.

The idea is to store instructions on underlying 5 bytes and emulate
them during the int3 and uprobe syscall execution:

  - install 'call trampoline' through standard int3 update
  - if int3 is hit before we finish optimizing we emulate
    all underlying instructions
  - when call is installed the uprobe syscall will emulate
    all underlying instructions

Adding opt_xol_ops that emulate instructions that are replaced
by 5 bytes call instruction used to optimize the uprobe.

Signed-off-by: Jiri Olsa <[email protected]>
---
 arch/x86/include/asm/uprobes.h |  13 ++--
 arch/x86/kernel/uprobes.c      | 106 ++++++++++++++++++++++++++++++++-
 include/linux/uprobes.h        |   1 +
 kernel/events/uprobes.c        |   6 ++
 4 files changed, 120 insertions(+), 6 deletions(-)

diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index e09aab82b8c1..eaa80dc1c836 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -21,8 +21,9 @@ typedef u8 uprobe_opcode_t;
 #define UPROBE_SWBP_INSN_SIZE             1
 
 enum {
-       ARCH_UPROBE_FLAG_CAN_OPTIMIZE   = 0,
-       ARCH_UPROBE_FLAG_OPTIMIZE_FAIL  = 1,
+       ARCH_UPROBE_FLAG_CAN_OPTIMIZE     = 0,
+       ARCH_UPROBE_FLAG_OPTIMIZE_FAIL    = 1,
+       ARCH_UPROBE_FLAG_OPTIMIZE_EMULATE = 2,
 };
 
 struct uprobe_xol_ops;
@@ -59,11 +60,15 @@ struct arch_uprobe_xol {
 
 struct arch_uprobe {
        union {
-               u8                      insn[MAX_UINSN_BYTES];
+               u8                      insn[5*MAX_UINSN_BYTES];
                u8                      ixol[MAX_UINSN_BYTES];
        };
 
-       struct arch_uprobe_xol          xol;
+       struct arch_uprobe_xol  xol;
+       struct {
+               struct arch_uprobe_xol  xol[5];
+               int                     cnt;
+       } opt;
 
        unsigned long flags;
 };
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 904c423ea81d..7f3f537a6425 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -277,13 +277,14 @@ static bool is_prefix_bad(struct insn *insn)
        return false;
 }
 
-static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, 
bool x86_64)
+static int uprobe_init_insn_offset(struct arch_uprobe *auprobe, unsigned long 
offset,
+                                  struct insn *insn, bool x86_64)
 {
        enum insn_mode m = x86_64 ? INSN_MODE_64 : INSN_MODE_32;
        u32 volatile *good_insns;
        int ret;
 
-       ret = insn_decode(insn, auprobe->insn, sizeof(auprobe->insn), m);
+       ret = insn_decode(insn, auprobe->insn + offset, sizeof(auprobe->insn) - 
offset, m);
        if (ret < 0)
                return -ENOEXEC;
 
@@ -310,6 +311,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, 
struct insn *insn, bool
        return -ENOTSUPP;
 }
 
+static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, 
bool x86_64)
+{
+       return uprobe_init_insn_offset(auprobe, 0, insn, x86_64);
+}
+
 #ifdef CONFIG_X86_64
 
 struct uretprobe_syscall_args {
@@ -1462,6 +1468,23 @@ static bool sub_emulate_op(struct arch_uprobe *auprobe, 
struct arch_uprobe_xol *
 
 #undef EFLAGS_MASK
 
+static bool optimized_emulate(struct arch_uprobe *auprobe, struct 
arch_uprobe_xol *xol,
+                             struct pt_regs *regs)
+{
+       int i;
+
+       for (i = 0; i < auprobe->opt.cnt; i++) {
+               WARN_ON(!auprobe->opt.xol[i].ops->emulate(auprobe, 
&auprobe->opt.xol[i], regs));
+       }
+       return true;
+}
+
+void arch_uprobe_optimized_emulate(struct arch_uprobe *auprobe, struct pt_regs 
*regs)
+{
+       if (test_bit(ARCH_UPROBE_FLAG_OPTIMIZE_EMULATE, &auprobe->flags))
+               optimized_emulate(auprobe, NULL, regs);
+}
+
 static const struct uprobe_xol_ops branch_xol_ops = {
        .emulate  = branch_emulate_op,
        .post_xol = branch_post_xol_op,
@@ -1479,6 +1502,10 @@ static const struct uprobe_xol_ops sub_xol_ops = {
        .emulate  = sub_emulate_op,
 };
 
+static const struct uprobe_xol_ops opt_xol_ops = {
+       .emulate  = optimized_emulate,
+};
+
 /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
 static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
 {
@@ -1675,6 +1702,73 @@ static int sub_setup_xol_ops(struct arch_uprobe_xol 
*xol, struct insn *insn)
        xol->ops = &sub_xol_ops;
        return 0;
 }
+
+static int opt_setup_xol_insns(struct arch_uprobe *auprobe, struct 
arch_uprobe_xol *xol,
+                              struct insn *insn)
+{
+       int ret;
+
+       /*
+        * TODO somehow separate nop emulation out of branch_xol_ops,
+        * so we could emulate nop instructions in here.
+        */
+       ret = push_setup_xol_ops(xol, insn);
+       if (ret != -ENOSYS)
+               return ret;
+       ret = mov_setup_xol_ops(xol, insn);
+       if (ret != -ENOSYS)
+               return ret;
+       ret = sub_setup_xol_ops(xol, insn);
+       if (ret != -ENOSYS)
+               return ret;
+
+       return -1;
+}
+
+static int opt_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+{
+       unsigned long offset = insn->length;
+       struct insn insnX;
+       int i, ret;
+
+       if (test_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags))
+               return -ENOSYS;
+
+       ret = opt_setup_xol_insns(auprobe, &auprobe->opt.xol[0], insn);
+       if (ret)
+               return -ENOSYS;
+
+       auprobe->opt.cnt = 1;
+       if (offset >= 5)
+               goto optimize;
+
+       for (i = 1; i < 5; i++) {
+               ret = uprobe_init_insn_offset(auprobe, offset, &insnX, true);
+               if (ret)
+                       break;
+               ret = opt_setup_xol_insns(auprobe, &auprobe->opt.xol[i], 
&insnX);
+               if (ret)
+                       break;
+               offset += insnX.length;
+               auprobe->opt.cnt++;
+               if (offset >= 5)
+                       goto optimize;
+       }
+
+       return -ENOSYS;
+
+optimize:
+       set_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags);
+       set_bit(ARCH_UPROBE_FLAG_OPTIMIZE_EMULATE, &auprobe->flags);
+       auprobe->xol.ops = &opt_xol_ops;
+
+       /*
+        * TODO perhaps we could 'emulate' nop, so there would be no need for
+        * ARCH_UPROBE_FLAG_OPTIMIZE_EMULATE flag, because we would emulate
+        * allways.
+        */
+       return 0;
+}
 #else
 static int mov_setup_xol_ops(struct arch_uprobe_xol *xol, struct insn *insn)
 {
@@ -1684,6 +1778,10 @@ static int sub_setup_xol_ops(struct arch_uprobe_xol 
*xol, struct insn *insn)
 {
        return -ENOSYS;
 }
+static int opt_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+{
+       return -ENOSYS;
+}
 #endif
 
 /**
@@ -1706,6 +1804,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe 
*auprobe, struct mm_struct *mm,
        if (can_optimize(&insn, addr))
                set_bit(ARCH_UPROBE_FLAG_CAN_OPTIMIZE, &auprobe->flags);
 
+       ret = opt_setup_xol_ops(auprobe, &insn);
+       if (ret != -ENOSYS)
+               return ret;
+
        ret = branch_setup_xol_ops(auprobe, &insn);
        if (ret != -ENOSYS)
                return ret;
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index ee3d36eda45d..4b9f81ad8316 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -242,6 +242,7 @@ extern void arch_uprobe_clear_state(struct mm_struct *mm);
 extern void arch_uprobe_init_state(struct mm_struct *mm);
 extern void handle_syscall_uprobe(struct pt_regs *regs, unsigned long 
bp_vaddr);
 extern void arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long 
vaddr);
+extern void arch_uprobe_optimized_emulate(struct arch_uprobe *auprobe, struct 
pt_regs *regs);
 #else /* !CONFIG_UPROBES */
 struct uprobes_state {
 };
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f11ceb8be8c4..dd893030e32e 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -2701,6 +2701,10 @@ void __weak arch_uprobe_optimize(struct arch_uprobe 
*auprobe, unsigned long vadd
 {
 }
 
+void __weak arch_uprobe_optimized_emulate(struct arch_uprobe *auprobe, struct 
pt_regs *regs)
+{
+}
+
 /*
  * Run handler and ask thread to singlestep.
  * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
@@ -2801,6 +2805,8 @@ void handle_syscall_uprobe(struct pt_regs *regs, unsigned 
long bp_vaddr)
        if (arch_uprobe_ignore(&uprobe->arch, regs))
                return;
        handler_chain(uprobe, regs);
+
+       arch_uprobe_optimized_emulate(&uprobe->arch, regs);
 }
 
 /*
-- 
2.51.1


Reply via email to