On Wed, Oct 09, 2019 at 03:07:54PM +0200, Peter Zijlstra wrote:
> So from what I can tell of kernel/kprobes.c, what it does is something like:
> 
> ARM: (__arm_kprobe)
>       text_poke(INT3)
>       /* guarantees nothing, INT3 will become visible at some point, maybe */
> 
>      (kprobe_optimizer)
>       if (opt) {
>               /* guarantees the bytes after INT3 are unused */
>               syncrhonize_rcu_tasks();
>               text_poke_bp(JMP32);
>               /* implies IPI-sync, kprobe really is enabled */
>       }
> 
> 
> DISARM: (__unregister_kprobe_top)
>       if (opt) {
>               text_poke_bp(INT3 + tail);
>               /* implies IPI-sync, so tail is guaranteed visible */
>       }
>       text_poke(old);
> 
> 
> FREE: (__unregister_kprobe_bottom)
>       /* guarantees 'old' is visible and the kprobe really is unused, maybe */
>       synchronize_rcu();
>       free();
> 
> 
> Now the problem is that I don't think the synchronize_rcu() at free
> implies enough to guarantee 'old' really is visible on all CPUs.
> Similarly, I don't think synchronize_rcu_tasks() is sufficient on the
> ARM side either. It only provides the guarantee -provided- the INT3 is
> actually visible. If it is not, all bets are off.
> 
> I'd feel much better if we switch arch_arm_kprobe() over to using
> text_poke_bp(). Or at the very least add the on_each_cpu(do_sync_core)
> to it.
> 
> Hmm?

So I suppose I'm suggesting we do something like the below on top of
what I already have here:

  git://git.kernel.org/pub/scm/linux/kernel/git/peterz/queue.git x86/ftrace

All it needs are a few comments ;-) Also note how this nicely gets rid
of the one text_poke_bp(.emulate) user, so we can go and remove that as
well.

---
 arch/x86/include/asm/text-patching.h |  1 +
 arch/x86/kernel/alternative.c        | 11 ++++++++---
 arch/x86/kernel/kprobes/core.c       |  1 +
 arch/x86/kernel/kprobes/opt.c        | 12 ++++--------
 4 files changed, 14 insertions(+), 11 deletions(-)

diff --git a/arch/x86/include/asm/text-patching.h 
b/arch/x86/include/asm/text-patching.h
index d553175212b3..d3269558e5b5 100644
--- a/arch/x86/include/asm/text-patching.h
+++ b/arch/x86/include/asm/text-patching.h
@@ -42,6 +42,7 @@ extern void text_poke_early(void *addr, const void *opcode, 
size_t len);
  * an inconsistent instruction while you patch.
  */
 extern void *text_poke(void *addr, const void *opcode, size_t len);
+extern void text_poke_sync(void);
 extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
 extern int poke_int3_handler(struct pt_regs *regs);
 extern void text_poke_bp(void *addr, const void *opcode, size_t len, const 
void *emulate);
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 34a08bc68e9a..9e81ab542190 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -936,6 +936,11 @@ static void do_sync_core(void *info)
        sync_core();
 }
 
+void text_poke_sync(void)
+{
+       on_each_cpu(do_sync_core, NULL, 1);
+}
+
 struct text_poke_loc {
        s32 rel_addr; /* addr := _stext + rel_addr */
        s32 rel32;
@@ -1089,7 +1094,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, 
unsigned int nr_entries
        for (i = 0; i < nr_entries; i++)
                text_poke(text_poke_addr(&tp[i]), &int3, sizeof(int3));
 
-       on_each_cpu(do_sync_core, NULL, 1);
+       text_poke_sync();
 
        /*
         * Second step: update all but the first byte of the patched range.
@@ -1111,7 +1116,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, 
unsigned int nr_entries
                 * not necessary and we'd be safe even without it. But
                 * better safe than sorry (plus there's not only Intel).
                 */
-               on_each_cpu(do_sync_core, NULL, 1);
+               text_poke_sync();
        }
 
        /*
@@ -1127,7 +1132,7 @@ static void text_poke_bp_batch(struct text_poke_loc *tp, 
unsigned int nr_entries
        }
 
        if (do_sync)
-               on_each_cpu(do_sync_core, NULL, 1);
+               text_poke_sync();
 
        /*
         * sync_core() implies an smp_mb() and orders this store against
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 7e4a8a1c9d9a..8ae5170207b2 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -503,6 +503,7 @@ void arch_arm_kprobe(struct kprobe *p)
 void arch_disarm_kprobe(struct kprobe *p)
 {
        text_poke(p->addr, &p->opcode, 1);
+       text_poke_sync();
 }
 
 void arch_remove_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 36d7249f2145..30a2646cfc8a 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -446,14 +446,10 @@ void arch_optimize_kprobes(struct list_head *oplist)
 /* Replace a relative jump with a breakpoint (int3).  */
 void arch_unoptimize_kprobe(struct optimized_kprobe *op)
 {
-       u8 insn_buff[JMP32_INSN_SIZE];
-
-       /* Set int3 to first byte for kprobes */
-       insn_buff[0] = INT3_INSN_OPCODE;
-       memcpy(insn_buff + 1, op->optinsn.copied_insn, DISP32_SIZE);
-
-       text_poke_bp(op->kp.addr, insn_buff, JMP32_INSN_SIZE,
-                    text_gen_insn(JMP32_INSN_OPCODE, op->kp.addr, 
op->optinsn.insn));
+       arch_arm_kprobe(&op->kp);
+       text_poke(op->kp.addr + INT3_INSN_SIZE,
+                 op->optinsn.copied_insn, DISP32_SIZE);
+       text_poke_sync();
 }
 
 /*

Reply via email to