|
@@ -523,57 +523,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
|
|
-{
|
|
|
- struct jprobe *jp = container_of(p, struct jprobe, kp);
|
|
|
- unsigned long addr;
|
|
|
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
|
|
-
|
|
|
- kcb->jprobe_saved_regs = *regs;
|
|
|
- kcb->jprobe_saved_r15 = regs->regs[15];
|
|
|
- addr = kcb->jprobe_saved_r15;
|
|
|
-
|
|
|
- /*
|
|
|
- * TBD: As Linus pointed out, gcc assumes that the callee
|
|
|
- * owns the argument space and could overwrite it, e.g.
|
|
|
- * tailcall optimization. So, to be absolutely safe
|
|
|
- * we also save and restore enough stack bytes to cover
|
|
|
- * the argument area.
|
|
|
- */
|
|
|
- memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
|
|
|
- MIN_STACK_SIZE(addr));
|
|
|
-
|
|
|
- regs->pc = (unsigned long)(jp->entry);
|
|
|
-
|
|
|
- return 1;
|
|
|
-}
|
|
|
-
|
|
|
-void __kprobes jprobe_return(void)
|
|
|
-{
|
|
|
- asm volatile ("trapa #0x3a\n\t" "jprobe_return_end:\n\t" "nop\n\t");
|
|
|
-}
|
|
|
-
|
|
|
-int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
|
|
-{
|
|
|
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
|
|
- unsigned long stack_addr = kcb->jprobe_saved_r15;
|
|
|
- u8 *addr = (u8 *)regs->pc;
|
|
|
-
|
|
|
- if ((addr >= (u8 *)jprobe_return) &&
|
|
|
- (addr <= (u8 *)jprobe_return_end)) {
|
|
|
- *regs = kcb->jprobe_saved_regs;
|
|
|
-
|
|
|
- memcpy((kprobe_opcode_t *)stack_addr, kcb->jprobes_stack,
|
|
|
- MIN_STACK_SIZE(stack_addr));
|
|
|
-
|
|
|
- kcb->kprobe_status = KPROBE_HIT_SS;
|
|
|
- preempt_enable_no_resched();
|
|
|
- return 1;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
static struct kprobe trampoline_p = {
|
|
|
.addr = (kprobe_opcode_t *)&kretprobe_trampoline,
|
|
|
.pre_handler = trampoline_probe_handler
|