Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-31 Thread David Long

On 05/17/2016 05:10 AM, Huang Shijie wrote:

On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:

From: Sandeepa Prabhu 
+
+static bool __kprobes aarch64_insn_is_steppable(u32 insn)


Could we add more comment for this function? In the comment, we can tell
that which type of instructions are steppable, which are not.


+{
+   if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
+   if (aarch64_insn_is_branch(insn) ||
+   aarch64_insn_is_msr_imm(insn) ||
+   aarch64_insn_is_msr_reg(insn) ||
+   aarch64_insn_is_exception(insn))
+   return false;
+
+   if (aarch64_insn_is_mrs(insn))
+   return aarch64_insn_extract_system_reg(insn)
+!= AARCH64_INSN_SPCLREG_DAIF;
+
+   if (aarch64_insn_is_hint(insn))
+   return aarch64_insn_is_nop(insn);
+
+   return true;
+   }
+
+   if (aarch64_insn_uses_literal(insn) ||
+   aarch64_insn_is_exclusive(insn))
+   return false;
+
+   return true;


Thanks
Huang Shijie



I did add a comment to this for the next version of the patch.

-dl



Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-31 Thread David Long

On 05/17/2016 05:10 AM, Huang Shijie wrote:

On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:

From: Sandeepa Prabhu 
+
+static bool __kprobes aarch64_insn_is_steppable(u32 insn)


Could we add more comment for this function? In the comment, we can tell
that which type of instructions are steppable, which are not.


+{
+   if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
+   if (aarch64_insn_is_branch(insn) ||
+   aarch64_insn_is_msr_imm(insn) ||
+   aarch64_insn_is_msr_reg(insn) ||
+   aarch64_insn_is_exception(insn))
+   return false;
+
+   if (aarch64_insn_is_mrs(insn))
+   return aarch64_insn_extract_system_reg(insn)
+!= AARCH64_INSN_SPCLREG_DAIF;
+
+   if (aarch64_insn_is_hint(insn))
+   return aarch64_insn_is_nop(insn);
+
+   return true;
+   }
+
+   if (aarch64_insn_uses_literal(insn) ||
+   aarch64_insn_is_exclusive(insn))
+   return false;
+
+   return true;


Thanks
Huang Shijie



I did add a comment to this for the next version of the patch.

-dl



Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-26 Thread David Long

On 05/17/2016 11:29 PM, Masami Hiramatsu wrote:

On Tue, 17 May 2016 16:58:09 +0800
Huang Shijie  wrote:


On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:

+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and  start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();


Why not add a parameter for this function to save the @kcb?


Good catch, it should use same kcb of caller.



I've made the change for the next version of the patch.




+
+ kcb->saved_irqflag = regs->pstate;
+ regs->pstate |= PSR_I_BIT;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

ditto.



I've made the change.


+
+ if (kcb->saved_irqflag & PSR_I_BIT)
+ regs->pstate |= PSR_I_BIT;
+ else
+ regs->pstate &= ~PSR_I_BIT;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+struct pt_regs *regs,
+struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.insn;
+
+ set_ss_context(kcb, slot);  /* mark pending ss */
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 0);
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(regs);
+ kernel_enable_single_step(regs);
+ instruction_pointer(regs) = slot;
+ } else  {
+ BUG();


You'd better use BUG_ON(!p->ainsn.insn);



I have that change ready but the BUG*() is removed entirely in patch 
07/10 and the indentation changed back to the above, resulting in more 
diffs and the same final code.



+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore.type == RESTORE_PC) {
+ instruction_pointer(regs) = cur->ainsn.restore.addr;
+ if (!instruction_pointer(regs))
+ BUG();
+ }
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)  {
+ /* post_handler can hit breakpoint and single step
+  * again, so we enable D-flag for recursive exception.
+  */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+  * We are here because the instruction being single
+  * stepped caused a page fault. We reset the current
+  * kprobe and the ip points back to the probe address
+   

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-26 Thread David Long

On 05/17/2016 11:29 PM, Masami Hiramatsu wrote:

On Tue, 17 May 2016 16:58:09 +0800
Huang Shijie  wrote:


On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:

+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and  start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();


Why not add a parameter for this function to save the @kcb?


Good catch, it should use same kcb of caller.



I've made the change for the next version of the patch.




+
+ kcb->saved_irqflag = regs->pstate;
+ regs->pstate |= PSR_I_BIT;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

ditto.



I've made the change.


+
+ if (kcb->saved_irqflag & PSR_I_BIT)
+ regs->pstate |= PSR_I_BIT;
+ else
+ regs->pstate &= ~PSR_I_BIT;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+struct pt_regs *regs,
+struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.insn;
+
+ set_ss_context(kcb, slot);  /* mark pending ss */
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 0);
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(regs);
+ kernel_enable_single_step(regs);
+ instruction_pointer(regs) = slot;
+ } else  {
+ BUG();


You'd better use BUG_ON(!p->ainsn.insn);



I have that change ready but the BUG*() is removed entirely in patch 
07/10 and the indentation changed back to the above, resulting in more 
diffs and the same final code.



+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore.type == RESTORE_PC) {
+ instruction_pointer(regs) = cur->ainsn.restore.addr;
+ if (!instruction_pointer(regs))
+ BUG();
+ }
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)  {
+ /* post_handler can hit breakpoint and single step
+  * again, so we enable D-flag for recursive exception.
+  */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+  * We are here because the instruction being single
+  * stepped caused a page fault. We reset the current
+  * kprobe and the ip points back to the probe address
+  * and allow 

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-26 Thread David Long

On 05/17/2016 04:58 AM, Huang Shijie wrote:

On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:

+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and  start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();


Why not add a parameter for this function to save the @kcb?


+
+ kcb->saved_irqflag = regs->pstate;
+ regs->pstate |= PSR_I_BIT;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

ditto.


+
+ if (kcb->saved_irqflag & PSR_I_BIT)
+ regs->pstate |= PSR_I_BIT;
+ else
+ regs->pstate &= ~PSR_I_BIT;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+struct pt_regs *regs,
+struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.insn;
+
+ set_ss_context(kcb, slot);  /* mark pending ss */
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 0);
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(regs);
+ kernel_enable_single_step(regs);
+ instruction_pointer(regs) = slot;
+ } else  {
+ BUG();
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore.type == RESTORE_PC) {
+ instruction_pointer(regs) = cur->ainsn.restore.addr;
+ if (!instruction_pointer(regs))
+ BUG();
+ }
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)  {
+ /* post_handler can hit breakpoint and single step
+  * again, so we enable D-flag for recursive exception.
+  */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+  * We are here because the instruction being single
+  * stepped caused a page fault. We reset the current
+  * kprobe and the ip points back to the probe address
+  * and allow the page fault handler to continue as a
+  * normal page fault.
+  */
+ instruction_pointer(regs) = (unsigned long)cur->addr;
+ if (!instruction_pointer(regs))
+ BUG();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-26 Thread David Long

On 05/17/2016 04:58 AM, Huang Shijie wrote:

On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:

+
+/*
+ * Interrupts need to be disabled before single-step mode is set, and not
+ * reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and  start of
+ * out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();


Why not add a parameter for this function to save the @kcb?


+
+ kcb->saved_irqflag = regs->pstate;
+ regs->pstate |= PSR_I_BIT;
+}
+
+static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

ditto.


+
+ if (kcb->saved_irqflag & PSR_I_BIT)
+ regs->pstate |= PSR_I_BIT;
+ else
+ regs->pstate &= ~PSR_I_BIT;
+}
+
+static void __kprobes
+set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
+{
+ kcb->ss_ctx.ss_pending = true;
+ kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
+}
+
+static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
+{
+ kcb->ss_ctx.ss_pending = false;
+ kcb->ss_ctx.match_addr = 0;
+}
+
+static void __kprobes setup_singlestep(struct kprobe *p,
+struct pt_regs *regs,
+struct kprobe_ctlblk *kcb, int reenter)
+{
+ unsigned long slot;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.insn) {
+ /* prepare for single stepping */
+ slot = (unsigned long)p->ainsn.insn;
+
+ set_ss_context(kcb, slot);  /* mark pending ss */
+
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ spsr_set_debug_flag(regs, 0);
+
+ /* IRQs and single stepping do not mix well. */
+ kprobes_save_local_irqflag(regs);
+ kernel_enable_single_step(regs);
+ instruction_pointer(regs) = slot;
+ } else  {
+ BUG();
+ }
+}
+
+static int __kprobes reenter_kprobe(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
+ dump_kprobe(p);
+ BUG();
+ break;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void __kprobes
+post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+
+ if (!cur)
+ return;
+
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore.type == RESTORE_PC) {
+ instruction_pointer(regs) = cur->ainsn.restore.addr;
+ if (!instruction_pointer(regs))
+ BUG();
+ }
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ return;
+ }
+ /* call post handler */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)  {
+ /* post_handler can hit breakpoint and single step
+  * again, so we enable D-flag for recursive exception.
+  */
+ cur->post_handler(cur, regs, 0);
+ }
+
+ reset_current_kprobe();
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+  * We are here because the instruction being single
+  * stepped caused a page fault. We reset the current
+  * kprobe and the ip points back to the probe address
+  * and allow the page fault handler to continue as a
+  * normal page fault.
+  */
+ instruction_pointer(regs) = (unsigned long)cur->addr;
+ if (!instruction_pointer(regs))
+ BUG();
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+
+

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-19 Thread David Long

On 05/12/2016 11:01 AM, James Morse wrote:

Hi David, Sandeepa,

On 27/04/16 19:53, David Long wrote:

From: Sandeepa Prabhu 



diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
new file mode 100644
index 000..dfa1b1f
--- /dev/null
+++ b/arch/arm64/kernel/kprobes.c
@@ -0,0 +1,520 @@
+/*
+ * arch/arm64/kernel/kprobes.c
+ *
+ * Kprobes support for ARM64
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Author: Sandeepa Prabhu 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "kprobes-arm64.h"
+
+#define MIN_STACK_SIZE(addr)   min((unsigned long)MAX_STACK_SIZE,  \
+   (unsigned long)current_thread_info() + THREAD_START_SP - (addr))


What if we probe something called on the irq stack?
This needs the on_irq_stack() checks too, the start/end can be found from the
per-cpu irq_stack value.

[ ... ]



OK.


+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+   struct jprobe *jp = container_of(p, struct jprobe, kp);
+   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+   long stack_ptr = kernel_stack_pointer(regs);
+
+   kcb->jprobe_saved_regs = *regs;
+   memcpy(kcb->jprobes_stack, (void *)stack_ptr,
+  MIN_STACK_SIZE(stack_ptr));


I wonder if we need this stack save/restore?

The comment next to the equivalent code for x86 says:

gcc assumes that the callee owns the argument space and could overwrite it,
e.g. tailcall optimization. So, to be absolutely safe we also save and
restore enough stack bytes to cover the argument area.


On arm64 the first eight arguments are passed in registers, so we might not need
this stack copy. (sparc and powerpc work like this too, their versions of this
function don't copy chunks of the stack).

... then I went looking for functions with >8 arguments...

Looking at the arm64 defconfig dwarf debug data, there are 71 of these that
don't get inlined, picking at random:

rockchip_clk_register_pll() has 13
fib_dump_info() has 11
vma_merge() has 10
vring_create_virtqueue() has 10

etc...

So we do need this stack copying, so that we can probe these function without
risking the arguments being modified.

It may be worth including a comment to the effect that this stack save/restore
is needed for functions that pass >8 arguments where the pre-handler may change
these values on the stack.




I can add a comment.


+   preempt_enable_no_resched();
+   return 1;
+}
+



Thanks,

James



Thanks,
-dl



Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-19 Thread David Long

On 05/12/2016 11:01 AM, James Morse wrote:

Hi David, Sandeepa,

On 27/04/16 19:53, David Long wrote:

From: Sandeepa Prabhu 



diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
new file mode 100644
index 000..dfa1b1f
--- /dev/null
+++ b/arch/arm64/kernel/kprobes.c
@@ -0,0 +1,520 @@
+/*
+ * arch/arm64/kernel/kprobes.c
+ *
+ * Kprobes support for ARM64
+ *
+ * Copyright (C) 2013 Linaro Limited.
+ * Author: Sandeepa Prabhu 
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ */
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+#include "kprobes-arm64.h"
+
+#define MIN_STACK_SIZE(addr)   min((unsigned long)MAX_STACK_SIZE,  \
+   (unsigned long)current_thread_info() + THREAD_START_SP - (addr))


What if we probe something called on the irq stack?
This needs the on_irq_stack() checks too, the start/end can be found from the
per-cpu irq_stack value.

[ ... ]



OK.


+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+   struct jprobe *jp = container_of(p, struct jprobe, kp);
+   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+   long stack_ptr = kernel_stack_pointer(regs);
+
+   kcb->jprobe_saved_regs = *regs;
+   memcpy(kcb->jprobes_stack, (void *)stack_ptr,
+  MIN_STACK_SIZE(stack_ptr));


I wonder if we need this stack save/restore?

The comment next to the equivalent code for x86 says:

gcc assumes that the callee owns the argument space and could overwrite it,
e.g. tailcall optimization. So, to be absolutely safe we also save and
restore enough stack bytes to cover the argument area.


On arm64 the first eight arguments are passed in registers, so we might not need
this stack copy. (sparc and powerpc work like this too, their versions of this
function don't copy chunks of the stack).

... then I went looking for functions with >8 arguments...

Looking at the arm64 defconfig dwarf debug data, there are 71 of these that
don't get inlined, picking at random:

rockchip_clk_register_pll() has 13
fib_dump_info() has 11
vma_merge() has 10
vring_create_virtqueue() has 10

etc...

So we do need this stack copying, so that we can probe these function without
risking the arguments being modified.

It may be worth including a comment to the effect that this stack save/restore
is needed for functions that pass >8 arguments where the pre-handler may change
these values on the stack.




I can add a comment.


+   preempt_enable_no_resched();
+   return 1;
+}
+



Thanks,

James



Thanks,
-dl



Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Masami Hiramatsu
On Thu, 12 May 2016 16:01:54 +0100
James Morse  wrote:

> Hi David, Sandeepa,
> 
> On 27/04/16 19:53, David Long wrote:
> > From: Sandeepa Prabhu 
> 
> > diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
> > new file mode 100644
> > index 000..dfa1b1f
> > --- /dev/null
> > +++ b/arch/arm64/kernel/kprobes.c
> > @@ -0,0 +1,520 @@
> > +/*
> > + * arch/arm64/kernel/kprobes.c
> > + *
> > + * Kprobes support for ARM64
> > + *
> > + * Copyright (C) 2013 Linaro Limited.
> > + * Author: Sandeepa Prabhu 
> > + *
> > + * This program is free software; you can redistribute it and/or modify
> > + * it under the terms of the GNU General Public License version 2 as
> > + * published by the Free Software Foundation.
> > + *
> > + * This program is distributed in the hope that it will be useful,
> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> > + * General Public License for more details.
> > + *
> > + */
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +#include "kprobes-arm64.h"
> > +
> > +#define MIN_STACK_SIZE(addr)   min((unsigned long)MAX_STACK_SIZE,  
> > \
> > +   (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
> 
> What if we probe something called on the irq stack?
> This needs the on_irq_stack() checks too, the start/end can be found from the
> per-cpu irq_stack value.
> 
> [ ... ]
> 
> > +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
> > +{
> > +   struct jprobe *jp = container_of(p, struct jprobe, kp);
> > +   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> > +   long stack_ptr = kernel_stack_pointer(regs);
> > +
> > +   kcb->jprobe_saved_regs = *regs;
> > +   memcpy(kcb->jprobes_stack, (void *)stack_ptr,
> > +  MIN_STACK_SIZE(stack_ptr));
> 
> I wonder if we need this stack save/restore?
> 
> The comment next to the equivalent code for x86 says:
> > gcc assumes that the callee owns the argument space and could overwrite it,
> > e.g. tailcall optimization. So, to be absolutely safe we also save and
> > restore enough stack bytes to cover the argument area.
> 
> On arm64 the first eight arguments are passed in registers, so we might not 
> need
> this stack copy. (sparc and powerpc work like this too, their versions of this
> function don't copy chunks of the stack).

Hmm, maybe sparc and powerpc implementation should also be fixed...

> ... then I went looking for functions with >8 arguments...
> 
> Looking at the arm64 defconfig dwarf debug data, there are 71 of these that
> don't get inlined, picking at random:
> > rockchip_clk_register_pll() has 13
> > fib_dump_info() has 11
> > vma_merge() has 10
> > vring_create_virtqueue() has 10
> etc...
> 
> So we do need this stack copying, so that we can probe these function without
> risking the arguments being modified.
> 
> It may be worth including a comment to the effect that this stack save/restore
> is needed for functions that pass >8 arguments where the pre-handler may 
> change
> these values on the stack.

Indeed, commenting on this code can help us to understand the reason why.

Thank you!

> 
> 
> > +   preempt_enable_no_resched();
> > +   return 1;
> > +}
> > +
> 
> 
> Thanks,
> 
> James


-- 
Masami Hiramatsu 


Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Masami Hiramatsu
On Thu, 12 May 2016 16:01:54 +0100
James Morse  wrote:

> Hi David, Sandeepa,
> 
> On 27/04/16 19:53, David Long wrote:
> > From: Sandeepa Prabhu 
> 
> > diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
> > new file mode 100644
> > index 000..dfa1b1f
> > --- /dev/null
> > +++ b/arch/arm64/kernel/kprobes.c
> > @@ -0,0 +1,520 @@
> > +/*
> > + * arch/arm64/kernel/kprobes.c
> > + *
> > + * Kprobes support for ARM64
> > + *
> > + * Copyright (C) 2013 Linaro Limited.
> > + * Author: Sandeepa Prabhu 
> > + *
> > + * This program is free software; you can redistribute it and/or modify
> > + * it under the terms of the GNU General Public License version 2 as
> > + * published by the Free Software Foundation.
> > + *
> > + * This program is distributed in the hope that it will be useful,
> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> > + * General Public License for more details.
> > + *
> > + */
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +#include 
> > +
> > +#include "kprobes-arm64.h"
> > +
> > +#define MIN_STACK_SIZE(addr)   min((unsigned long)MAX_STACK_SIZE,  
> > \
> > +   (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
> 
> What if we probe something called on the irq stack?
> This needs the on_irq_stack() checks too, the start/end can be found from the
> per-cpu irq_stack value.
> 
> [ ... ]
> 
> > +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
> > +{
> > +   struct jprobe *jp = container_of(p, struct jprobe, kp);
> > +   struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> > +   long stack_ptr = kernel_stack_pointer(regs);
> > +
> > +   kcb->jprobe_saved_regs = *regs;
> > +   memcpy(kcb->jprobes_stack, (void *)stack_ptr,
> > +  MIN_STACK_SIZE(stack_ptr));
> 
> I wonder if we need this stack save/restore?
> 
> The comment next to the equivalent code for x86 says:
> > gcc assumes that the callee owns the argument space and could overwrite it,
> > e.g. tailcall optimization. So, to be absolutely safe we also save and
> > restore enough stack bytes to cover the argument area.
> 
> On arm64 the first eight arguments are passed in registers, so we might not 
> need
> this stack copy. (sparc and powerpc work like this too, their versions of this
> function don't copy chunks of the stack).

Hmm, maybe sparc and powerpc implementation should also be fixed...

> ... then I went looking for functions with >8 arguments...
> 
> Looking at the arm64 defconfig dwarf debug data, there are 71 of these that
> don't get inlined, picking at random:
> > rockchip_clk_register_pll() has 13
> > fib_dump_info() has 11
> > vma_merge() has 10
> > vring_create_virtqueue() has 10
> etc...
> 
> So we do need this stack copying, so that we can probe these function without
> risking the arguments being modified.
> 
> It may be worth including a comment to the effect that this stack save/restore
> is needed for functions that pass >8 arguments where the pre-handler may 
> change
> these values on the stack.

Indeed, commenting on this code can help us to understand the reason why.

Thank you!

> 
> 
> > +   preempt_enable_no_resched();
> > +   return 1;
> > +}
> > +
> 
> 
> Thanks,
> 
> James


-- 
Masami Hiramatsu 


Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Masami Hiramatsu
On Tue, 17 May 2016 16:58:09 +0800
Huang Shijie  wrote:

> On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:
> > +
> > +/*
> > + * Interrupts need to be disabled before single-step mode is set, and not
> > + * reenabled until after single-step mode ends.
> > + * Without disabling interrupt on local CPU, there is a chance of
> > + * interrupt occurrence in the period of exception return and  start of
> > + * out-of-line single-step, that result in wrongly single stepping
> > + * into the interrupt handler.
> > + */
> > +static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
> > +{
> > + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> 
> Why not add a parameter for this function to save the @kcb?

Good catch, it should use same kcb of caller.

> 
> > +
> > + kcb->saved_irqflag = regs->pstate;
> > + regs->pstate |= PSR_I_BIT;
> > +}
> > +
> > +static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
> > +{
> > + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> ditto.
> 
> > +
> > + if (kcb->saved_irqflag & PSR_I_BIT)
> > + regs->pstate |= PSR_I_BIT;
> > + else
> > + regs->pstate &= ~PSR_I_BIT;
> > +}
> > +
> > +static void __kprobes
> > +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
> > +{
> > + kcb->ss_ctx.ss_pending = true;
> > + kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
> > +}
> > +
> > +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
> > +{
> > + kcb->ss_ctx.ss_pending = false;
> > + kcb->ss_ctx.match_addr = 0;
> > +}
> > +
> > +static void __kprobes setup_singlestep(struct kprobe *p,
> > +struct pt_regs *regs,
> > +struct kprobe_ctlblk *kcb, int reenter)
> > +{
> > + unsigned long slot;
> > +
> > + if (reenter) {
> > + save_previous_kprobe(kcb);
> > + set_current_kprobe(p);
> > + kcb->kprobe_status = KPROBE_REENTER;
> > + } else {
> > + kcb->kprobe_status = KPROBE_HIT_SS;
> > + }
> > +
> > + if (p->ainsn.insn) {
> > + /* prepare for single stepping */
> > + slot = (unsigned long)p->ainsn.insn;
> > +
> > + set_ss_context(kcb, slot);  /* mark pending ss */
> > +
> > + if (kcb->kprobe_status == KPROBE_REENTER)
> > + spsr_set_debug_flag(regs, 0);
> > +
> > + /* IRQs and single stepping do not mix well. */
> > + kprobes_save_local_irqflag(regs);
> > + kernel_enable_single_step(regs);
> > + instruction_pointer(regs) = slot;
> > + } else  {
> > + BUG();

You'd better use BUG_ON(!p->ainsn.insn);

> > + }
> > +}
> > +
> > +static int __kprobes reenter_kprobe(struct kprobe *p,
> > + struct pt_regs *regs,
> > + struct kprobe_ctlblk *kcb)
> > +{
> > + switch (kcb->kprobe_status) {
> > + case KPROBE_HIT_SSDONE:
> > + case KPROBE_HIT_ACTIVE:
> > + kprobes_inc_nmissed_count(p);
> > + setup_singlestep(p, regs, kcb, 1);
> > + break;
> > + case KPROBE_HIT_SS:
> > + case KPROBE_REENTER:
> > + pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
> > + dump_kprobe(p);
> > + BUG();
> > + break;
> > + default:
> > + WARN_ON(1);
> > + return 0;
> > + }
> > +
> > + return 1;
> > +}
> > +
> > +static void __kprobes
> > +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
> > +{
> > + struct kprobe *cur = kprobe_running();
> > +
> > + if (!cur)
> > + return;
> > +
> > + /* return addr restore if non-branching insn */
> > + if (cur->ainsn.restore.type == RESTORE_PC) {
> > + instruction_pointer(regs) = cur->ainsn.restore.addr;
> > + if (!instruction_pointer(regs))
> > + BUG();
> > + }
> > +
> > + /* restore back original saved kprobe variables and continue */
> > + if (kcb->kprobe_status == KPROBE_REENTER) {
> > + restore_previous_kprobe(kcb);
> > + return;
> > + }
> > + /* call post handler */
> > + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> > + if (cur->post_handler)  {
> > + /* post_handler can hit breakpoint and single step
> > +  * again, so we enable D-flag for recursive exception.
> > +  */
> > + cur->post_handler(cur, regs, 0);
> > + }
> > +
> > + reset_current_kprobe();
> > +}
> > +
> > +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
> > +{
> > + struct kprobe *cur = kprobe_running();
> > + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> > +
> > + switch (kcb->kprobe_status) {
> > + case KPROBE_HIT_SS:
> > + case 

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Masami Hiramatsu
On Tue, 17 May 2016 16:58:09 +0800
Huang Shijie  wrote:

> On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:
> > +
> > +/*
> > + * Interrupts need to be disabled before single-step mode is set, and not
> > + * reenabled until after single-step mode ends.
> > + * Without disabling interrupt on local CPU, there is a chance of
> > + * interrupt occurrence in the period of exception return and  start of
> > + * out-of-line single-step, that result in wrongly single stepping
> > + * into the interrupt handler.
> > + */
> > +static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
> > +{
> > + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> 
> Why not add a parameter for this function to save the @kcb?

Good catch, it should use same kcb of caller.

> 
> > +
> > + kcb->saved_irqflag = regs->pstate;
> > + regs->pstate |= PSR_I_BIT;
> > +}
> > +
> > +static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
> > +{
> > + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> ditto.
> 
> > +
> > + if (kcb->saved_irqflag & PSR_I_BIT)
> > + regs->pstate |= PSR_I_BIT;
> > + else
> > + regs->pstate &= ~PSR_I_BIT;
> > +}
> > +
> > +static void __kprobes
> > +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
> > +{
> > + kcb->ss_ctx.ss_pending = true;
> > + kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
> > +}
> > +
> > +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
> > +{
> > + kcb->ss_ctx.ss_pending = false;
> > + kcb->ss_ctx.match_addr = 0;
> > +}
> > +
> > +static void __kprobes setup_singlestep(struct kprobe *p,
> > +struct pt_regs *regs,
> > +struct kprobe_ctlblk *kcb, int reenter)
> > +{
> > + unsigned long slot;
> > +
> > + if (reenter) {
> > + save_previous_kprobe(kcb);
> > + set_current_kprobe(p);
> > + kcb->kprobe_status = KPROBE_REENTER;
> > + } else {
> > + kcb->kprobe_status = KPROBE_HIT_SS;
> > + }
> > +
> > + if (p->ainsn.insn) {
> > + /* prepare for single stepping */
> > + slot = (unsigned long)p->ainsn.insn;
> > +
> > + set_ss_context(kcb, slot);  /* mark pending ss */
> > +
> > + if (kcb->kprobe_status == KPROBE_REENTER)
> > + spsr_set_debug_flag(regs, 0);
> > +
> > + /* IRQs and single stepping do not mix well. */
> > + kprobes_save_local_irqflag(regs);
> > + kernel_enable_single_step(regs);
> > + instruction_pointer(regs) = slot;
> > + } else  {
> > + BUG();

You'd better use BUG_ON(!p->ainsn.insn);

> > + }
> > +}
> > +
> > +static int __kprobes reenter_kprobe(struct kprobe *p,
> > + struct pt_regs *regs,
> > + struct kprobe_ctlblk *kcb)
> > +{
> > + switch (kcb->kprobe_status) {
> > + case KPROBE_HIT_SSDONE:
> > + case KPROBE_HIT_ACTIVE:
> > + kprobes_inc_nmissed_count(p);
> > + setup_singlestep(p, regs, kcb, 1);
> > + break;
> > + case KPROBE_HIT_SS:
> > + case KPROBE_REENTER:
> > + pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
> > + dump_kprobe(p);
> > + BUG();
> > + break;
> > + default:
> > + WARN_ON(1);
> > + return 0;
> > + }
> > +
> > + return 1;
> > +}
> > +
> > +static void __kprobes
> > +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
> > +{
> > + struct kprobe *cur = kprobe_running();
> > +
> > + if (!cur)
> > + return;
> > +
> > + /* return addr restore if non-branching insn */
> > + if (cur->ainsn.restore.type == RESTORE_PC) {
> > + instruction_pointer(regs) = cur->ainsn.restore.addr;
> > + if (!instruction_pointer(regs))
> > + BUG();
> > + }
> > +
> > + /* restore back original saved kprobe variables and continue */
> > + if (kcb->kprobe_status == KPROBE_REENTER) {
> > + restore_previous_kprobe(kcb);
> > + return;
> > + }
> > + /* call post handler */
> > + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> > + if (cur->post_handler)  {
> > + /* post_handler can hit breakpoint and single step
> > +  * again, so we enable D-flag for recursive exception.
> > +  */
> > + cur->post_handler(cur, regs, 0);
> > + }
> > +
> > + reset_current_kprobe();
> > +}
> > +
> > +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
> > +{
> > + struct kprobe *cur = kprobe_running();
> > + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> > +
> > + switch (kcb->kprobe_status) {
> > + case KPROBE_HIT_SS:
> > + case KPROBE_REENTER:
> > +

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Huang Shijie
On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:
> From: Sandeepa Prabhu 
> +
> +static bool __kprobes aarch64_insn_is_steppable(u32 insn)

Could we add more comment for this function? In the comment, we can tell
that which type of instructions are steppable, which are not.

> +{
> + if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
> + if (aarch64_insn_is_branch(insn) ||
> + aarch64_insn_is_msr_imm(insn) ||
> + aarch64_insn_is_msr_reg(insn) ||
> + aarch64_insn_is_exception(insn))
> + return false;
> +
> + if (aarch64_insn_is_mrs(insn))
> + return aarch64_insn_extract_system_reg(insn)
> +  != AARCH64_INSN_SPCLREG_DAIF;
> +
> + if (aarch64_insn_is_hint(insn))
> + return aarch64_insn_is_nop(insn);
> +
> + return true;
> + }
> +
> + if (aarch64_insn_uses_literal(insn) ||
> + aarch64_insn_is_exclusive(insn))
> + return false;
> +
> + return true;

Thanks
Huang Shijie



Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Huang Shijie
On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:
> From: Sandeepa Prabhu 
> +
> +static bool __kprobes aarch64_insn_is_steppable(u32 insn)

Could we add more comment for this function? In the comment, we can tell
that which type of instructions are steppable, which are not.

> +{
> + if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
> + if (aarch64_insn_is_branch(insn) ||
> + aarch64_insn_is_msr_imm(insn) ||
> + aarch64_insn_is_msr_reg(insn) ||
> + aarch64_insn_is_exception(insn))
> + return false;
> +
> + if (aarch64_insn_is_mrs(insn))
> + return aarch64_insn_extract_system_reg(insn)
> +  != AARCH64_INSN_SPCLREG_DAIF;
> +
> + if (aarch64_insn_is_hint(insn))
> + return aarch64_insn_is_nop(insn);
> +
> + return true;
> + }
> +
> + if (aarch64_insn_uses_literal(insn) ||
> + aarch64_insn_is_exclusive(insn))
> + return false;
> +
> + return true;

Thanks
Huang Shijie



Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Huang Shijie
On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:
> +
> +/*
> + * Interrupts need to be disabled before single-step mode is set, and not
> + * reenabled until after single-step mode ends.
> + * Without disabling interrupt on local CPU, there is a chance of
> + * interrupt occurrence in the period of exception return and  start of
> + * out-of-line single-step, that result in wrongly single stepping
> + * into the interrupt handler.
> + */
> +static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
> +{
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

Why not add a parameter for this function to save the @kcb?

> +
> + kcb->saved_irqflag = regs->pstate;
> + regs->pstate |= PSR_I_BIT;
> +}
> +
> +static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
> +{
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
ditto.

> +
> + if (kcb->saved_irqflag & PSR_I_BIT)
> + regs->pstate |= PSR_I_BIT;
> + else
> + regs->pstate &= ~PSR_I_BIT;
> +}
> +
> +static void __kprobes
> +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
> +{
> + kcb->ss_ctx.ss_pending = true;
> + kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
> +}
> +
> +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
> +{
> + kcb->ss_ctx.ss_pending = false;
> + kcb->ss_ctx.match_addr = 0;
> +}
> +
> +static void __kprobes setup_singlestep(struct kprobe *p,
> +struct pt_regs *regs,
> +struct kprobe_ctlblk *kcb, int reenter)
> +{
> + unsigned long slot;
> +
> + if (reenter) {
> + save_previous_kprobe(kcb);
> + set_current_kprobe(p);
> + kcb->kprobe_status = KPROBE_REENTER;
> + } else {
> + kcb->kprobe_status = KPROBE_HIT_SS;
> + }
> +
> + if (p->ainsn.insn) {
> + /* prepare for single stepping */
> + slot = (unsigned long)p->ainsn.insn;
> +
> + set_ss_context(kcb, slot);  /* mark pending ss */
> +
> + if (kcb->kprobe_status == KPROBE_REENTER)
> + spsr_set_debug_flag(regs, 0);
> +
> + /* IRQs and single stepping do not mix well. */
> + kprobes_save_local_irqflag(regs);
> + kernel_enable_single_step(regs);
> + instruction_pointer(regs) = slot;
> + } else  {
> + BUG();
> + }
> +}
> +
> +static int __kprobes reenter_kprobe(struct kprobe *p,
> + struct pt_regs *regs,
> + struct kprobe_ctlblk *kcb)
> +{
> + switch (kcb->kprobe_status) {
> + case KPROBE_HIT_SSDONE:
> + case KPROBE_HIT_ACTIVE:
> + kprobes_inc_nmissed_count(p);
> + setup_singlestep(p, regs, kcb, 1);
> + break;
> + case KPROBE_HIT_SS:
> + case KPROBE_REENTER:
> + pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
> + dump_kprobe(p);
> + BUG();
> + break;
> + default:
> + WARN_ON(1);
> + return 0;
> + }
> +
> + return 1;
> +}
> +
> +static void __kprobes
> +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
> +{
> + struct kprobe *cur = kprobe_running();
> +
> + if (!cur)
> + return;
> +
> + /* return addr restore if non-branching insn */
> + if (cur->ainsn.restore.type == RESTORE_PC) {
> + instruction_pointer(regs) = cur->ainsn.restore.addr;
> + if (!instruction_pointer(regs))
> + BUG();
> + }
> +
> + /* restore back original saved kprobe variables and continue */
> + if (kcb->kprobe_status == KPROBE_REENTER) {
> + restore_previous_kprobe(kcb);
> + return;
> + }
> + /* call post handler */
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + if (cur->post_handler)  {
> + /* post_handler can hit breakpoint and single step
> +  * again, so we enable D-flag for recursive exception.
> +  */
> + cur->post_handler(cur, regs, 0);
> + }
> +
> + reset_current_kprobe();
> +}
> +
> +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
> +{
> + struct kprobe *cur = kprobe_running();
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> +
> + switch (kcb->kprobe_status) {
> + case KPROBE_HIT_SS:
> + case KPROBE_REENTER:
> + /*
> +  * We are here because the instruction being single
> +  * stepped caused a page fault. We reset the current
> +  * kprobe and the ip points back to the probe address
> +  * and allow the page fault handler to continue as a
> +  * normal page fault.
> +  */
> + instruction_pointer(regs) = (unsigned long)cur->addr;
> + if 

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-17 Thread Huang Shijie
On Wed, Apr 27, 2016 at 02:53:00PM -0400, David Long wrote:
> +
> +/*
> + * Interrupts need to be disabled before single-step mode is set, and not
> + * reenabled until after single-step mode ends.
> + * Without disabling interrupt on local CPU, there is a chance of
> + * interrupt occurrence in the period of exception return and  start of
> + * out-of-line single-step, that result in wrongly single stepping
> + * into the interrupt handler.
> + */
> +static void __kprobes kprobes_save_local_irqflag(struct pt_regs *regs)
> +{
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();

Why not add a parameter for this function to save the @kcb?

> +
> + kcb->saved_irqflag = regs->pstate;
> + regs->pstate |= PSR_I_BIT;
> +}
> +
> +static void __kprobes kprobes_restore_local_irqflag(struct pt_regs *regs)
> +{
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
ditto.

> +
> + if (kcb->saved_irqflag & PSR_I_BIT)
> + regs->pstate |= PSR_I_BIT;
> + else
> + regs->pstate &= ~PSR_I_BIT;
> +}
> +
> +static void __kprobes
> +set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
> +{
> + kcb->ss_ctx.ss_pending = true;
> + kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
> +}
> +
> +static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
> +{
> + kcb->ss_ctx.ss_pending = false;
> + kcb->ss_ctx.match_addr = 0;
> +}
> +
> +static void __kprobes setup_singlestep(struct kprobe *p,
> +struct pt_regs *regs,
> +struct kprobe_ctlblk *kcb, int reenter)
> +{
> + unsigned long slot;
> +
> + if (reenter) {
> + save_previous_kprobe(kcb);
> + set_current_kprobe(p);
> + kcb->kprobe_status = KPROBE_REENTER;
> + } else {
> + kcb->kprobe_status = KPROBE_HIT_SS;
> + }
> +
> + if (p->ainsn.insn) {
> + /* prepare for single stepping */
> + slot = (unsigned long)p->ainsn.insn;
> +
> + set_ss_context(kcb, slot);  /* mark pending ss */
> +
> + if (kcb->kprobe_status == KPROBE_REENTER)
> + spsr_set_debug_flag(regs, 0);
> +
> + /* IRQs and single stepping do not mix well. */
> + kprobes_save_local_irqflag(regs);
> + kernel_enable_single_step(regs);
> + instruction_pointer(regs) = slot;
> + } else  {
> + BUG();
> + }
> +}
> +
> +static int __kprobes reenter_kprobe(struct kprobe *p,
> + struct pt_regs *regs,
> + struct kprobe_ctlblk *kcb)
> +{
> + switch (kcb->kprobe_status) {
> + case KPROBE_HIT_SSDONE:
> + case KPROBE_HIT_ACTIVE:
> + kprobes_inc_nmissed_count(p);
> + setup_singlestep(p, regs, kcb, 1);
> + break;
> + case KPROBE_HIT_SS:
> + case KPROBE_REENTER:
> + pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
> + dump_kprobe(p);
> + BUG();
> + break;
> + default:
> + WARN_ON(1);
> + return 0;
> + }
> +
> + return 1;
> +}
> +
> +static void __kprobes
> +post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
> +{
> + struct kprobe *cur = kprobe_running();
> +
> + if (!cur)
> + return;
> +
> + /* return addr restore if non-branching insn */
> + if (cur->ainsn.restore.type == RESTORE_PC) {
> + instruction_pointer(regs) = cur->ainsn.restore.addr;
> + if (!instruction_pointer(regs))
> + BUG();
> + }
> +
> + /* restore back original saved kprobe variables and continue */
> + if (kcb->kprobe_status == KPROBE_REENTER) {
> + restore_previous_kprobe(kcb);
> + return;
> + }
> + /* call post handler */
> + kcb->kprobe_status = KPROBE_HIT_SSDONE;
> + if (cur->post_handler)  {
> + /* post_handler can hit breakpoint and single step
> +  * again, so we enable D-flag for recursive exception.
> +  */
> + cur->post_handler(cur, regs, 0);
> + }
> +
> + reset_current_kprobe();
> +}
> +
> +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
> +{
> + struct kprobe *cur = kprobe_running();
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> +
> + switch (kcb->kprobe_status) {
> + case KPROBE_HIT_SS:
> + case KPROBE_REENTER:
> + /*
> +  * We are here because the instruction being single
> +  * stepped caused a page fault. We reset the current
> +  * kprobe and the ip points back to the probe address
> +  * and allow the page fault handler to continue as a
> +  * normal page fault.
> +  */
> + instruction_pointer(regs) = (unsigned long)cur->addr;
> + if 

Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-12 Thread James Morse
Hi David, Sandeepa,

On 27/04/16 19:53, David Long wrote:
> From: Sandeepa Prabhu 

> diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
> new file mode 100644
> index 000..dfa1b1f
> --- /dev/null
> +++ b/arch/arm64/kernel/kprobes.c
> @@ -0,0 +1,520 @@
> +/*
> + * arch/arm64/kernel/kprobes.c
> + *
> + * Kprobes support for ARM64
> + *
> + * Copyright (C) 2013 Linaro Limited.
> + * Author: Sandeepa Prabhu 
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + */
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include "kprobes-arm64.h"
> +
> +#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE,  \
> + (unsigned long)current_thread_info() + THREAD_START_SP - (addr))

What if we probe something called on the irq stack?
This needs the on_irq_stack() checks too, the start/end can be found from the
per-cpu irq_stack value.

[ ... ]

> +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
> +{
> + struct jprobe *jp = container_of(p, struct jprobe, kp);
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> + long stack_ptr = kernel_stack_pointer(regs);
> +
> + kcb->jprobe_saved_regs = *regs;
> + memcpy(kcb->jprobes_stack, (void *)stack_ptr,
> +MIN_STACK_SIZE(stack_ptr));

I wonder if we need this stack save/restore?

The comment next to the equivalent code for x86 says:
> gcc assumes that the callee owns the argument space and could overwrite it,
> e.g. tailcall optimization. So, to be absolutely safe we also save and
> restore enough stack bytes to cover the argument area.

On arm64 the first eight arguments are passed in registers, so we might not need
this stack copy. (sparc and powerpc work like this too, their versions of this
function don't copy chunks of the stack).

... then I went looking for functions with >8 arguments...

Looking at the arm64 defconfig dwarf debug data, there are 71 of these that
don't get inlined, picking at random:
> rockchip_clk_register_pll() has 13
> fib_dump_info() has 11
> vma_merge() has 10
> vring_create_virtqueue() has 10
etc...

So we do need this stack copying, so that we can probe these function without
risking the arguments being modified.

It may be worth including a comment to the effect that this stack save/restore
is needed for functions that pass >8 arguments where the pre-handler may change
these values on the stack.


> + preempt_enable_no_resched();
> + return 1;
> +}
> +


Thanks,

James


Re: [PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-05-12 Thread James Morse
Hi David, Sandeepa,

On 27/04/16 19:53, David Long wrote:
> From: Sandeepa Prabhu 

> diff --git a/arch/arm64/kernel/kprobes.c b/arch/arm64/kernel/kprobes.c
> new file mode 100644
> index 000..dfa1b1f
> --- /dev/null
> +++ b/arch/arm64/kernel/kprobes.c
> @@ -0,0 +1,520 @@
> +/*
> + * arch/arm64/kernel/kprobes.c
> + *
> + * Kprobes support for ARM64
> + *
> + * Copyright (C) 2013 Linaro Limited.
> + * Author: Sandeepa Prabhu 
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + * This program is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * General Public License for more details.
> + *
> + */
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +#include "kprobes-arm64.h"
> +
> +#define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE,  \
> + (unsigned long)current_thread_info() + THREAD_START_SP - (addr))

What if we probe something called on the irq stack?
This needs the on_irq_stack() checks too, the start/end can be found from the
per-cpu irq_stack value.

[ ... ]

> +int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
> +{
> + struct jprobe *jp = container_of(p, struct jprobe, kp);
> + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
> + long stack_ptr = kernel_stack_pointer(regs);
> +
> + kcb->jprobe_saved_regs = *regs;
> + memcpy(kcb->jprobes_stack, (void *)stack_ptr,
> +MIN_STACK_SIZE(stack_ptr));

I wonder if we need this stack save/restore?

The comment next to the equivalent code for x86 says:
> gcc assumes that the callee owns the argument space and could overwrite it,
> e.g. tailcall optimization. So, to be absolutely safe we also save and
> restore enough stack bytes to cover the argument area.

On arm64 the first eight arguments are passed in registers, so we might not need
this stack copy. (sparc and powerpc work like this too, their versions of this
function don't copy chunks of the stack).

... then I went looking for functions with >8 arguments...

Looking at the arm64 defconfig dwarf debug data, there are 71 of these that
don't get inlined, picking at random:
> rockchip_clk_register_pll() has 13
> fib_dump_info() has 11
> vma_merge() has 10
> vring_create_virtqueue() has 10
etc...

So we do need this stack copying, so that we can probe these function without
risking the arguments being modified.

It may be worth including a comment to the effect that this stack save/restore
is needed for functions that pass >8 arguments where the pre-handler may change
these values on the stack.


> + preempt_enable_no_resched();
> + return 1;
> +}
> +


Thanks,

James


[PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-04-27 Thread David Long
From: Sandeepa Prabhu 

Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.

Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.

A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.

ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.

Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).

Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.

Instructions generating exceptions or cpu mode change are rejected
for probing.

Exclusive load/store instructions are rejected too.  Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).

System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.

Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.

Signed-off-by: Sandeepa Prabhu 
Signed-off-by: David A. Long 
Signed-off-by: Pratyush Anand 
---
 arch/arm64/Kconfig  |   1 +
 arch/arm64/include/asm/debug-monitors.h |   5 +
 arch/arm64/include/asm/insn.h   |   4 +-
 arch/arm64/include/asm/kprobes.h|  60 
 arch/arm64/include/asm/probes.h |  44 +++
 arch/arm64/kernel/Makefile  |   1 +
 arch/arm64/kernel/debug-monitors.c  |  18 +-
 arch/arm64/kernel/kprobes-arm64.c   | 121 
 arch/arm64/kernel/kprobes-arm64.h   |  35 +++
 arch/arm64/kernel/kprobes.c | 520 
 arch/arm64/kernel/vmlinux.lds.S |   1 +
 arch/arm64/mm/fault.c   |  25 ++
 12 files changed, 831 insertions(+), 4 deletions(-)
 create mode 100644 arch/arm64/include/asm/kprobes.h
 create mode 100644 arch/arm64/include/asm/probes.h
 create mode 100644 arch/arm64/kernel/kprobes-arm64.c
 create mode 100644 arch/arm64/kernel/kprobes-arm64.h
 create mode 100644 arch/arm64/kernel/kprobes.c

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8f662fd..222bfb9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -83,6 +83,7 @@ config ARM64
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
+   select HAVE_KPROBES
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
diff --git a/arch/arm64/include/asm/debug-monitors.h 
b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7..4b6b3f7 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -66,6 +66,11 @@
 
 #define CACHE_FLUSH_IS_SAFE1
 
+/* kprobes BRK opcodes with ESR encoding  */
+#define BRK64_ESR_MASK 0x
+#define BRK64_ESR_KPROBES  0x0004
+#define BRK64_OPCODE_KPROBES   (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+
 /* AArch32 */
 #define DBG_ESR_EVT_BKPT   0x4
 #define DBG_ESR_EVT_VECC   0x5
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 72dda48..b9567a1 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -253,6 +253,8 @@ __AARCH64_INSN_FUNCS(ldr_reg,   0x3FE0EC00, 0x38606800)
 __AARCH64_INSN_FUNCS(ldr_lit,  0xBF00, 0x1800)
 __AARCH64_INSN_FUNCS(ldrsw_lit,0xFF00, 0x9800)
 __AARCH64_INSN_FUNCS(exclusive,0x3F80, 0x0800)
+__AARCH64_INSN_FUNCS(load_ex,  0x3F40, 0x0840)
+__AARCH64_INSN_FUNCS(store_ex, 0x3F40, 0x0800)
 __AARCH64_INSN_FUNCS(stp_post, 0x7FC0, 0x2880)
 __AARCH64_INSN_FUNCS(ldp_post, 0x7FC0, 0x28C0)
 __AARCH64_INSN_FUNCS(stp_pre,  0x7FC0, 0x2980)
@@ -401,7 +403,7 @@ bool aarch32_insn_is_wide(u32 insn);
 #define A32_RT_OFFSET  12
 #define A32_RT2_OFFSET  0
 
-u32 aarch64_extract_system_register(u32 insn);
+u32 aarch64_insn_extract_system_reg(u32 insn);
 u32 

[PATCH v12 05/10] arm64: Kprobes with single stepping support

2016-04-27 Thread David Long
From: Sandeepa Prabhu 

Add support for basic kernel probes(kprobes) and jump probes
(jprobes) for ARM64.

Kprobes utilizes software breakpoint and single step debug
exceptions supported on ARM v8.

A software breakpoint is placed at the probe address to trap the
kernel execution into the kprobe handler.

ARM v8 supports enabling single stepping before the break exception
return (ERET), with next PC in exception return address (ELR_EL1). The
kprobe handler prepares an executable memory slot for out-of-line
execution with a copy of the original instruction being probed, and
enables single stepping. The PC is set to the out-of-line slot address
before the ERET. With this scheme, the instruction is executed with the
exact same register context except for the PC (and DAIF) registers.

Debug mask (PSTATE.D) is enabled only when single stepping a recursive
kprobe, e.g.: during kprobes reenter so that probed instruction can be
single stepped within the kprobe handler -exception- context.
The recursion depth of kprobe is always 2, i.e. upon probe re-entry,
any further re-entry is prevented by not calling handlers and the case
counted as a missed kprobe).

Single stepping from the x-o-l slot has a drawback for PC-relative accesses
like branching and symbolic literals access as the offset from the new PC
(slot address) may not be ensured to fit in the immediate value of
the opcode. Such instructions need simulation, so reject
probing them.

Instructions generating exceptions or cpu mode change are rejected
for probing.

Exclusive load/store instructions are rejected too.  Additionally, the
code is checked to see if it is inside an exclusive load/store sequence
(code from Pratyush).

System instructions are mostly enabled for stepping, except MSR/MRS
accesses to "DAIF" flags in PSTATE, which are not safe for
probing.

Thanks to Steve Capper and Pratyush Anand for several suggested
Changes.

Signed-off-by: Sandeepa Prabhu 
Signed-off-by: David A. Long 
Signed-off-by: Pratyush Anand 
---
 arch/arm64/Kconfig  |   1 +
 arch/arm64/include/asm/debug-monitors.h |   5 +
 arch/arm64/include/asm/insn.h   |   4 +-
 arch/arm64/include/asm/kprobes.h|  60 
 arch/arm64/include/asm/probes.h |  44 +++
 arch/arm64/kernel/Makefile  |   1 +
 arch/arm64/kernel/debug-monitors.c  |  18 +-
 arch/arm64/kernel/kprobes-arm64.c   | 121 
 arch/arm64/kernel/kprobes-arm64.h   |  35 +++
 arch/arm64/kernel/kprobes.c | 520 
 arch/arm64/kernel/vmlinux.lds.S |   1 +
 arch/arm64/mm/fault.c   |  25 ++
 12 files changed, 831 insertions(+), 4 deletions(-)
 create mode 100644 arch/arm64/include/asm/kprobes.h
 create mode 100644 arch/arm64/include/asm/probes.h
 create mode 100644 arch/arm64/kernel/kprobes-arm64.c
 create mode 100644 arch/arm64/kernel/kprobes-arm64.h
 create mode 100644 arch/arm64/kernel/kprobes.c

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8f662fd..222bfb9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -83,6 +83,7 @@ config ARM64
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RCU_TABLE_FREE
select HAVE_SYSCALL_TRACEPOINTS
+   select HAVE_KPROBES
select IOMMU_DMA if IOMMU_SUPPORT
select IRQ_DOMAIN
select IRQ_FORCED_THREADING
diff --git a/arch/arm64/include/asm/debug-monitors.h 
b/arch/arm64/include/asm/debug-monitors.h
index 2fcb9b7..4b6b3f7 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -66,6 +66,11 @@
 
 #define CACHE_FLUSH_IS_SAFE1
 
+/* kprobes BRK opcodes with ESR encoding  */
+#define BRK64_ESR_MASK 0x
+#define BRK64_ESR_KPROBES  0x0004
+#define BRK64_OPCODE_KPROBES   (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
+
 /* AArch32 */
 #define DBG_ESR_EVT_BKPT   0x4
 #define DBG_ESR_EVT_VECC   0x5
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index 72dda48..b9567a1 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -253,6 +253,8 @@ __AARCH64_INSN_FUNCS(ldr_reg,   0x3FE0EC00, 0x38606800)
 __AARCH64_INSN_FUNCS(ldr_lit,  0xBF00, 0x1800)
 __AARCH64_INSN_FUNCS(ldrsw_lit,0xFF00, 0x9800)
 __AARCH64_INSN_FUNCS(exclusive,0x3F80, 0x0800)
+__AARCH64_INSN_FUNCS(load_ex,  0x3F40, 0x0840)
+__AARCH64_INSN_FUNCS(store_ex, 0x3F40, 0x0800)
 __AARCH64_INSN_FUNCS(stp_post, 0x7FC0, 0x2880)
 __AARCH64_INSN_FUNCS(ldp_post, 0x7FC0, 0x28C0)
 __AARCH64_INSN_FUNCS(stp_pre,  0x7FC0, 0x2980)
@@ -401,7 +403,7 @@ bool aarch32_insn_is_wide(u32 insn);
 #define A32_RT_OFFSET  12
 #define A32_RT2_OFFSET  0
 
-u32 aarch64_extract_system_register(u32 insn);
+u32 aarch64_insn_extract_system_reg(u32 insn);
 u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
 u32 aarch32_insn_mcr_extract_opc2(u32 insn);
 u32