patch.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * patch.c - livepatch patching functions
  3. *
  4. * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  5. * Copyright (C) 2014 SUSE
  6. * Copyright (C) 2015 Josh Poimboeuf <jpoimboe@redhat.com>
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version 2
  11. * of the License, or (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  22. #include <linux/livepatch.h>
  23. #include <linux/list.h>
  24. #include <linux/ftrace.h>
  25. #include <linux/rculist.h>
  26. #include <linux/slab.h>
  27. #include <linux/bug.h>
  28. #include <linux/printk.h>
  29. #include "patch.h"
  30. #include "transition.h"
  31. static LIST_HEAD(klp_ops);
  32. struct klp_ops *klp_find_ops(unsigned long old_addr)
  33. {
  34. struct klp_ops *ops;
  35. struct klp_func *func;
  36. list_for_each_entry(ops, &klp_ops, node) {
  37. func = list_first_entry(&ops->func_stack, struct klp_func,
  38. stack_node);
  39. if (func->old_addr == old_addr)
  40. return ops;
  41. }
  42. return NULL;
  43. }
  44. static void notrace klp_ftrace_handler(unsigned long ip,
  45. unsigned long parent_ip,
  46. struct ftrace_ops *fops,
  47. struct pt_regs *regs)
  48. {
  49. struct klp_ops *ops;
  50. struct klp_func *func;
  51. int patch_state;
  52. ops = container_of(fops, struct klp_ops, fops);
  53. /*
  54. * A variant of synchronize_sched() is used to allow patching functions
  55. * where RCU is not watching, see klp_synchronize_transition().
  56. */
  57. preempt_disable_notrace();
  58. func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  59. stack_node);
  60. /*
  61. * func should never be NULL because preemption should be disabled here
  62. * and unregister_ftrace_function() does the equivalent of a
  63. * synchronize_sched() before the func_stack removal.
  64. */
  65. if (WARN_ON_ONCE(!func))
  66. goto unlock;
  67. /*
  68. * In the enable path, enforce the order of the ops->func_stack and
  69. * func->transition reads. The corresponding write barrier is in
  70. * __klp_enable_patch().
  71. *
  72. * (Note that this barrier technically isn't needed in the disable
  73. * path. In the rare case where klp_update_patch_state() runs before
  74. * this handler, its TIF_PATCH_PENDING read and this func->transition
  75. * read need to be ordered. But klp_update_patch_state() already
  76. * enforces that.)
  77. */
  78. smp_rmb();
  79. if (unlikely(func->transition)) {
  80. /*
  81. * Enforce the order of the func->transition and
  82. * current->patch_state reads. Otherwise we could read an
  83. * out-of-date task state and pick the wrong function. The
  84. * corresponding write barrier is in klp_init_transition().
  85. */
  86. smp_rmb();
  87. patch_state = current->patch_state;
  88. WARN_ON_ONCE(patch_state == KLP_UNDEFINED);
  89. if (patch_state == KLP_UNPATCHED) {
  90. /*
  91. * Use the previously patched version of the function.
  92. * If no previous patches exist, continue with the
  93. * original function.
  94. */
  95. func = list_entry_rcu(func->stack_node.next,
  96. struct klp_func, stack_node);
  97. if (&func->stack_node == &ops->func_stack)
  98. goto unlock;
  99. }
  100. }
  101. klp_arch_set_pc(regs, (unsigned long)func->new_func);
  102. unlock:
  103. preempt_enable_notrace();
  104. }
  105. /*
  106. * Convert a function address into the appropriate ftrace location.
  107. *
  108. * Usually this is just the address of the function, but on some architectures
  109. * it's more complicated so allow them to provide a custom behaviour.
  110. */
  111. #ifndef klp_get_ftrace_location
  112. static unsigned long klp_get_ftrace_location(unsigned long faddr)
  113. {
  114. return faddr;
  115. }
  116. #endif
  117. static void klp_unpatch_func(struct klp_func *func)
  118. {
  119. struct klp_ops *ops;
  120. if (WARN_ON(!func->patched))
  121. return;
  122. if (WARN_ON(!func->old_addr))
  123. return;
  124. ops = klp_find_ops(func->old_addr);
  125. if (WARN_ON(!ops))
  126. return;
  127. if (list_is_singular(&ops->func_stack)) {
  128. unsigned long ftrace_loc;
  129. ftrace_loc = klp_get_ftrace_location(func->old_addr);
  130. if (WARN_ON(!ftrace_loc))
  131. return;
  132. WARN_ON(unregister_ftrace_function(&ops->fops));
  133. WARN_ON(ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0));
  134. list_del_rcu(&func->stack_node);
  135. list_del(&ops->node);
  136. kfree(ops);
  137. } else {
  138. list_del_rcu(&func->stack_node);
  139. }
  140. func->patched = false;
  141. }
  142. static int klp_patch_func(struct klp_func *func)
  143. {
  144. struct klp_ops *ops;
  145. int ret;
  146. if (WARN_ON(!func->old_addr))
  147. return -EINVAL;
  148. if (WARN_ON(func->patched))
  149. return -EINVAL;
  150. ops = klp_find_ops(func->old_addr);
  151. if (!ops) {
  152. unsigned long ftrace_loc;
  153. ftrace_loc = klp_get_ftrace_location(func->old_addr);
  154. if (!ftrace_loc) {
  155. pr_err("failed to find location for function '%s'\n",
  156. func->old_name);
  157. return -EINVAL;
  158. }
  159. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  160. if (!ops)
  161. return -ENOMEM;
  162. ops->fops.func = klp_ftrace_handler;
  163. ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
  164. FTRACE_OPS_FL_DYNAMIC |
  165. FTRACE_OPS_FL_IPMODIFY;
  166. list_add(&ops->node, &klp_ops);
  167. INIT_LIST_HEAD(&ops->func_stack);
  168. list_add_rcu(&func->stack_node, &ops->func_stack);
  169. ret = ftrace_set_filter_ip(&ops->fops, ftrace_loc, 0, 0);
  170. if (ret) {
  171. pr_err("failed to set ftrace filter for function '%s' (%d)\n",
  172. func->old_name, ret);
  173. goto err;
  174. }
  175. ret = register_ftrace_function(&ops->fops);
  176. if (ret) {
  177. pr_err("failed to register ftrace handler for function '%s' (%d)\n",
  178. func->old_name, ret);
  179. ftrace_set_filter_ip(&ops->fops, ftrace_loc, 1, 0);
  180. goto err;
  181. }
  182. } else {
  183. list_add_rcu(&func->stack_node, &ops->func_stack);
  184. }
  185. func->patched = true;
  186. return 0;
  187. err:
  188. list_del_rcu(&func->stack_node);
  189. list_del(&ops->node);
  190. kfree(ops);
  191. return ret;
  192. }
  193. void klp_unpatch_object(struct klp_object *obj)
  194. {
  195. struct klp_func *func;
  196. klp_for_each_func(obj, func)
  197. if (func->patched)
  198. klp_unpatch_func(func);
  199. obj->patched = false;
  200. }
  201. int klp_patch_object(struct klp_object *obj)
  202. {
  203. struct klp_func *func;
  204. int ret;
  205. if (WARN_ON(obj->patched))
  206. return -EINVAL;
  207. klp_for_each_func(obj, func) {
  208. ret = klp_patch_func(func);
  209. if (ret) {
  210. klp_unpatch_object(obj);
  211. return ret;
  212. }
  213. }
  214. obj->patched = true;
  215. return 0;
  216. }
  217. void klp_unpatch_objects(struct klp_patch *patch)
  218. {
  219. struct klp_object *obj;
  220. klp_for_each_object(patch, obj)
  221. if (obj->patched)
  222. klp_unpatch_object(obj);
  223. }