It can be optimized at compile time.

Signed-off-by: zhouchuangao <zhouchuan...@vivo.com>
---
 arch/arm64/kernel/probes/kprobes.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/arch/arm64/kernel/probes/kprobes.c 
b/arch/arm64/kernel/probes/kprobes.c
index 66aac28..ecf0f61 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -264,8 +264,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, 
unsigned int fsr)
                 * normal page fault.
                 */
                instruction_pointer_set(regs, (unsigned long) cur->addr);
-               if (!instruction_pointer(regs))
-                       BUG();
+               BUG_ON(!instruction_pointer(regs));
 
                if (kcb->kprobe_status == KPROBE_REENTER)
                        restore_previous_kprobe(kcb);
-- 
2.7.4

Reply via email to