Adding support to emulate sub reg, imm instructions, because it's often part of the function prologue.
Signed-off-by: Jiri Olsa <[email protected]> --- arch/x86/include/asm/uprobes.h | 5 +++ arch/x86/kernel/uprobes.c | 73 ++++++++++++++++++++++++++++++++++ 2 files changed, 78 insertions(+) diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index e6fd87a1cbc3..e09aab82b8c1 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -49,6 +49,11 @@ struct arch_uprobe_xol { u16 dst; /* to the start of pt_regs */ u8 ilen; } mov; + struct { + s32 val; + u16 reg; /* to the start of pt_regs */ + u8 ilen; + } sub; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5c44c4b84e99..904c423ea81d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -1428,6 +1428,40 @@ static bool mov_emulate_op(struct arch_uprobe *auprobe, struct arch_uprobe_xol * return true; } +#define EFLAGS_MASK (X86_EFLAGS_OF|X86_EFLAGS_SF|X86_EFLAGS_ZF|X86_EFLAGS_AF|\ + X86_EFLAGS_PF|X86_EFLAGS_CF) + +static bool sub_emulate_op(struct arch_uprobe *auprobe, struct arch_uprobe_xol *xol, + struct pt_regs *regs) +{ + unsigned long dst, flags = regs->flags, val = xol->sub.val; + unsigned long *reg = (void *) regs + xol->sub.reg; + + dst = *reg; + + /* + * Emulate sub with 'sub reg,reg' and get result value and + * flags register change. Not sure it's completely equivalent + * to sub reg,imm so perhaps there's better way. + */ + asm volatile ( + "pushf \n\t" + "push %[flags]; popf \n\t" + "subq %[src], %[dst] \n\t" + "pushf; popq %[flags] \n\t" + "popf \n\t" + : [flags] "+D" (flags), [dst] "+r" (dst) + : [src] "r" (val) + ); + + *reg = dst; + regs->flags = (regs->flags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); + regs->ip += xol->sub.ilen; + return true; +} + +#undef EFLAGS_MASK + static const struct uprobe_xol_ops branch_xol_ops = { .emulate = branch_emulate_op, .post_xol = branch_post_xol_op, @@ -1441,6 +1475,10 @@ static const struct uprobe_xol_ops mov_xol_ops = { .emulate = mov_emulate_op, }; +static const struct uprobe_xol_ops sub_xol_ops = { + .emulate = sub_emulate_op, +}; + /* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { @@ -1610,11 +1648,42 @@ static int mov_setup_xol_ops(struct arch_uprobe_xol *xol, struct insn *insn) xol->ops = &mov_xol_ops; return 0; } + +static int sub_setup_xol_ops(struct arch_uprobe_xol *xol, struct insn *insn) +{ + u8 opc1 = OPCODE1(insn); + int off; + + if (opc1 != 0x81) + return -ENOSYS; + if (insn->rex_prefix.nbytes != 1 || + insn->rex_prefix.bytes[0] != 0x48) + return -ENOSYS; + if (X86_MODRM_MOD(insn->modrm.value) != 3) + return -ENOSYS; + if (X86_MODRM_REG(insn->modrm.value) != 5) + return -ENOSYS; + + /* get register offset */ + off = insn_get_modrm_rm_off(insn); + if (off < 0) + return off; + + xol->sub.reg = off; + xol->sub.val = insn->immediate.value; + xol->sub.ilen = insn->length; + xol->ops = &sub_xol_ops; + return 0; +} #else static int mov_setup_xol_ops(struct arch_uprobe_xol *xol, struct insn *insn) { return -ENOSYS; } +static int sub_setup_xol_ops(struct arch_uprobe_xol *xol, struct insn *insn) +{ + return -ENOSYS; +} #endif /** @@ -1649,6 +1718,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, if (ret != -ENOSYS) return ret; + ret = sub_setup_xol_ops(&auprobe->xol, &insn); + if (ret != -ENOSYS) + return ret; + /* * Figure out which fixups default_post_xol_op() will need to perform, * and annotate defparam->fixups accordingly. -- 2.51.1
