Do not forget to set kprobes insn buffer memory back
to RO on failure path. Without this fix, if there is
an unexpected error on copying instructions, kprobes
insn buffer kept RW, which can allow unexpected modifying
instruction buffer.

Fixes: d0381c81c2f7 ("kprobes/x86: Set kprobes pages read-only")
Signed-off-by: Masami Hiramatsu <mhira...@kernel.org>
---
  Changes in v2:
   - Use a helper variable instead of using p->ainsn.insn directly.
---
 arch/x86/kernel/kprobes/core.c |   15 +++++++++------
 arch/x86/kernel/kprobes/opt.c  |    1 +
 2 files changed, 10 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f0153714ddac..5d8194b9a068 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -428,15 +428,18 @@ void free_insn_page(void *page)
 
 static int arch_copy_kprobe(struct kprobe *p)
 {
+       kprobe_opcode_t *buf = p->ainsn.insn;
        struct insn insn;
        int len;
 
-       set_memory_rw((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
+       set_memory_rw((unsigned long)buf & PAGE_MASK, 1);
 
        /* Copy an instruction with recovering if other optprobe modifies it.*/
-       len = __copy_instruction(p->ainsn.insn, p->addr, &insn);
-       if (!len)
+       len = __copy_instruction(buf, p->addr, &insn);
+       if (!len) {
+               set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
                return -EINVAL;
+       }
 
        /*
         * __copy_instruction can modify the displacement of the instruction,
@@ -444,13 +447,13 @@ static int arch_copy_kprobe(struct kprobe *p)
         */
        prepare_boost(p, &insn);
 
-       set_memory_ro((unsigned long)p->ainsn.insn & PAGE_MASK, 1);
+       set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
 
        /* Check whether the instruction modifies Interrupt Flag or not */
-       p->ainsn.if_modifier = is_IF_modifier(p->ainsn.insn);
+       p->ainsn.if_modifier = is_IF_modifier(buf);
 
        /* Also, displacement change doesn't affect the first byte */
-       p->opcode = p->ainsn.insn[0];
+       p->opcode = buf[0];
 
        return 0;
 }
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 4f98aad38237..86b9a883b712 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -371,6 +371,7 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe 
*op,
        ret = copy_optimized_instructions(buf + TMPL_END_IDX, op->kp.addr);
        if (ret < 0) {
                __arch_remove_optimized_kprobe(op, 0);
+               set_memory_ro((unsigned long)buf & PAGE_MASK, 1);
                return ret;
        }
        op->optinsn.size = ret;

Reply via email to