kprobes-ftrace.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * Dynamic Ftrace based Kprobes Optimization
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) Hitachi Ltd., 2012
  19. * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
  20. * IBM Corporation
  21. */
  22. #include <linux/kprobes.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/hardirq.h>
  25. #include <linux/preempt.h>
  26. #include <linux/ftrace.h>
  27. /*
  28. * This is called from ftrace code after invoking registered handlers to
  29. * disambiguate regs->nip changes done by jprobes and livepatch. We check if
  30. * there is an active jprobe at the provided address (mcount location).
  31. */
  32. int __is_active_jprobe(unsigned long addr)
  33. {
  34. if (!preemptible()) {
  35. struct kprobe *p = raw_cpu_read(current_kprobe);
  36. return (p && (unsigned long)p->addr == addr) ? 1 : 0;
  37. }
  38. return 0;
  39. }
  40. static nokprobe_inline
  41. int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
  42. struct kprobe_ctlblk *kcb, unsigned long orig_nip)
  43. {
  44. /*
  45. * Emulate singlestep (and also recover regs->nip)
  46. * as if there is a nop
  47. */
  48. regs->nip = (unsigned long)p->addr + MCOUNT_INSN_SIZE;
  49. if (unlikely(p->post_handler)) {
  50. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  51. p->post_handler(p, regs, 0);
  52. }
  53. __this_cpu_write(current_kprobe, NULL);
  54. if (orig_nip)
  55. regs->nip = orig_nip;
  56. return 1;
  57. }
  58. int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
  59. struct kprobe_ctlblk *kcb)
  60. {
  61. if (kprobe_ftrace(p))
  62. return __skip_singlestep(p, regs, kcb, 0);
  63. else
  64. return 0;
  65. }
  66. NOKPROBE_SYMBOL(skip_singlestep);
  67. /* Ftrace callback handler for kprobes */
  68. void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
  69. struct ftrace_ops *ops, struct pt_regs *regs)
  70. {
  71. struct kprobe *p;
  72. struct kprobe_ctlblk *kcb;
  73. preempt_disable();
  74. p = get_kprobe((kprobe_opcode_t *)nip);
  75. if (unlikely(!p) || kprobe_disabled(p))
  76. goto end;
  77. kcb = get_kprobe_ctlblk();
  78. if (kprobe_running()) {
  79. kprobes_inc_nmissed_count(p);
  80. } else {
  81. unsigned long orig_nip = regs->nip;
  82. /*
  83. * On powerpc, NIP is *before* this instruction for the
  84. * pre handler
  85. */
  86. regs->nip -= MCOUNT_INSN_SIZE;
  87. __this_cpu_write(current_kprobe, p);
  88. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  89. if (!p->pre_handler || !p->pre_handler(p, regs))
  90. __skip_singlestep(p, regs, kcb, orig_nip);
  91. else {
  92. /*
  93. * If pre_handler returns !0, it sets regs->nip and
  94. * resets current kprobe. In this case, we should not
  95. * re-enable preemption.
  96. */
  97. return;
  98. }
  99. }
  100. end:
  101. preempt_enable_no_resched();
  102. }
  103. NOKPROBE_SYMBOL(kprobe_ftrace_handler);
  104. int arch_prepare_kprobe_ftrace(struct kprobe *p)
  105. {
  106. p->ainsn.insn = NULL;
  107. p->ainsn.boostable = -1;
  108. return 0;
  109. }