|
@@ -541,23 +541,6 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef KPROBES_CAN_USE_FTRACE
|
|
|
|
-static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs,
|
|
|
|
- struct kprobe_ctlblk *kcb)
|
|
|
|
-{
|
|
|
|
- /*
|
|
|
|
- * Emulate singlestep (and also recover regs->ip)
|
|
|
|
- * as if there is a 5byte nop
|
|
|
|
- */
|
|
|
|
- regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
|
|
|
|
- if (unlikely(p->post_handler)) {
|
|
|
|
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
|
|
|
- p->post_handler(p, regs, 0);
|
|
|
|
- }
|
|
|
|
- __this_cpu_write(current_kprobe, NULL);
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
|
|
* Interrupts are disabled on entry as trap3 is an interrupt gate and they
|
|
* remain disabled throughout this function.
|
|
* remain disabled throughout this function.
|
|
@@ -616,13 +599,8 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
|
|
} else if (kprobe_running()) {
|
|
} else if (kprobe_running()) {
|
|
p = __this_cpu_read(current_kprobe);
|
|
p = __this_cpu_read(current_kprobe);
|
|
if (p->break_handler && p->break_handler(p, regs)) {
|
|
if (p->break_handler && p->break_handler(p, regs)) {
|
|
-#ifdef KPROBES_CAN_USE_FTRACE
|
|
|
|
- if (kprobe_ftrace(p)) {
|
|
|
|
- skip_singlestep(p, regs, kcb);
|
|
|
|
- return 1;
|
|
|
|
- }
|
|
|
|
-#endif
|
|
|
|
- setup_singlestep(p, regs, kcb, 0);
|
|
|
|
|
|
+ if (!skip_singlestep(p, regs, kcb))
|
|
|
|
+ setup_singlestep(p, regs, kcb, 0);
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
} /* else: not a kprobe fault; let the kernel handle it */
|
|
} /* else: not a kprobe fault; let the kernel handle it */
|
|
@@ -1075,50 +1053,6 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef KPROBES_CAN_USE_FTRACE
|
|
|
|
-/* Ftrace callback handler for kprobes */
|
|
|
|
-void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
|
|
|
|
- struct ftrace_ops *ops, struct pt_regs *regs)
|
|
|
|
-{
|
|
|
|
- struct kprobe *p;
|
|
|
|
- struct kprobe_ctlblk *kcb;
|
|
|
|
- unsigned long flags;
|
|
|
|
-
|
|
|
|
- /* Disable irq for emulating a breakpoint and avoiding preempt */
|
|
|
|
- local_irq_save(flags);
|
|
|
|
-
|
|
|
|
- p = get_kprobe((kprobe_opcode_t *)ip);
|
|
|
|
- if (unlikely(!p) || kprobe_disabled(p))
|
|
|
|
- goto end;
|
|
|
|
-
|
|
|
|
- kcb = get_kprobe_ctlblk();
|
|
|
|
- if (kprobe_running()) {
|
|
|
|
- kprobes_inc_nmissed_count(p);
|
|
|
|
- } else {
|
|
|
|
- /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
|
|
|
|
- regs->ip = ip + sizeof(kprobe_opcode_t);
|
|
|
|
-
|
|
|
|
- __this_cpu_write(current_kprobe, p);
|
|
|
|
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
|
|
|
- if (!p->pre_handler || !p->pre_handler(p, regs))
|
|
|
|
- skip_singlestep(p, regs, kcb);
|
|
|
|
- /*
|
|
|
|
- * If pre_handler returns !0, it sets regs->ip and
|
|
|
|
- * resets current kprobe.
|
|
|
|
- */
|
|
|
|
- }
|
|
|
|
-end:
|
|
|
|
- local_irq_restore(flags);
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
-int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
|
|
|
|
-{
|
|
|
|
- p->ainsn.insn = NULL;
|
|
|
|
- p->ainsn.boostable = -1;
|
|
|
|
- return 0;
|
|
|
|
-}
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
int __init arch_init_kprobes(void)
|
|
int __init arch_init_kprobes(void)
|
|
{
|
|
{
|
|
return arch_init_optprobes();
|
|
return arch_init_optprobes();
|