core.c 22 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016
  1. /*
  2. * core.c - Kernel Live Patching Core
  3. *
  4. * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  5. * Copyright (C) 2014 SUSE
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version 2
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/ftrace.h>
  26. #include <linux/list.h>
  27. #include <linux/kallsyms.h>
  28. #include <linux/livepatch.h>
  29. /**
  30. * struct klp_ops - structure for tracking registered ftrace ops structs
  31. *
  32. * A single ftrace_ops is shared between all enabled replacement functions
  33. * (klp_func structs) which have the same old_addr. This allows the switch
  34. * between function versions to happen instantaneously by updating the klp_ops
  35. * struct's func_stack list. The winner is the klp_func at the top of the
  36. * func_stack (front of the list).
  37. *
  38. * @node: node for the global klp_ops list
  39. * @func_stack: list head for the stack of klp_func's (active func is on top)
  40. * @fops: registered ftrace ops struct
  41. */
  42. struct klp_ops {
  43. struct list_head node;
  44. struct list_head func_stack;
  45. struct ftrace_ops fops;
  46. };
  47. /*
  48. * The klp_mutex protects the global lists and state transitions of any
  49. * structure reachable from them. References to any structure must be obtained
  50. * under mutex protection (except in klp_ftrace_handler(), which uses RCU to
  51. * ensure it gets consistent data).
  52. */
  53. static DEFINE_MUTEX(klp_mutex);
  54. static LIST_HEAD(klp_patches);
  55. static LIST_HEAD(klp_ops);
  56. static struct kobject *klp_root_kobj;
  57. static struct klp_ops *klp_find_ops(unsigned long old_addr)
  58. {
  59. struct klp_ops *ops;
  60. struct klp_func *func;
  61. list_for_each_entry(ops, &klp_ops, node) {
  62. func = list_first_entry(&ops->func_stack, struct klp_func,
  63. stack_node);
  64. if (func->old_addr == old_addr)
  65. return ops;
  66. }
  67. return NULL;
  68. }
  69. static bool klp_is_module(struct klp_object *obj)
  70. {
  71. return obj->name;
  72. }
  73. static bool klp_is_object_loaded(struct klp_object *obj)
  74. {
  75. return !obj->name || obj->mod;
  76. }
  77. /* sets obj->mod if object is not vmlinux and module is found */
  78. static void klp_find_object_module(struct klp_object *obj)
  79. {
  80. if (!klp_is_module(obj))
  81. return;
  82. mutex_lock(&module_mutex);
  83. /*
  84. * We don't need to take a reference on the module here because we have
  85. * the klp_mutex, which is also taken by the module notifier. This
  86. * prevents any module from unloading until we release the klp_mutex.
  87. */
  88. obj->mod = find_module(obj->name);
  89. mutex_unlock(&module_mutex);
  90. }
  91. /* klp_mutex must be held by caller */
  92. static bool klp_is_patch_registered(struct klp_patch *patch)
  93. {
  94. struct klp_patch *mypatch;
  95. list_for_each_entry(mypatch, &klp_patches, list)
  96. if (mypatch == patch)
  97. return true;
  98. return false;
  99. }
  100. static bool klp_initialized(void)
  101. {
  102. return klp_root_kobj;
  103. }
  104. struct klp_find_arg {
  105. const char *objname;
  106. const char *name;
  107. unsigned long addr;
  108. /*
  109. * If count == 0, the symbol was not found. If count == 1, a unique
  110. * match was found and addr is set. If count > 1, there is
  111. * unresolvable ambiguity among "count" number of symbols with the same
  112. * name in the same object.
  113. */
  114. unsigned long count;
  115. };
  116. static int klp_find_callback(void *data, const char *name,
  117. struct module *mod, unsigned long addr)
  118. {
  119. struct klp_find_arg *args = data;
  120. if ((mod && !args->objname) || (!mod && args->objname))
  121. return 0;
  122. if (strcmp(args->name, name))
  123. return 0;
  124. if (args->objname && strcmp(args->objname, mod->name))
  125. return 0;
  126. /*
  127. * args->addr might be overwritten if another match is found
  128. * but klp_find_object_symbol() handles this and only returns the
  129. * addr if count == 1.
  130. */
  131. args->addr = addr;
  132. args->count++;
  133. return 0;
  134. }
  135. static int klp_find_object_symbol(const char *objname, const char *name,
  136. unsigned long *addr)
  137. {
  138. struct klp_find_arg args = {
  139. .objname = objname,
  140. .name = name,
  141. .addr = 0,
  142. .count = 0
  143. };
  144. kallsyms_on_each_symbol(klp_find_callback, &args);
  145. if (args.count == 0)
  146. pr_err("symbol '%s' not found in symbol table\n", name);
  147. else if (args.count > 1)
  148. pr_err("unresolvable ambiguity (%lu matches) on symbol '%s' in object '%s'\n",
  149. args.count, name, objname);
  150. else {
  151. *addr = args.addr;
  152. return 0;
  153. }
  154. *addr = 0;
  155. return -EINVAL;
  156. }
  157. struct klp_verify_args {
  158. const char *name;
  159. const unsigned long addr;
  160. };
  161. static int klp_verify_callback(void *data, const char *name,
  162. struct module *mod, unsigned long addr)
  163. {
  164. struct klp_verify_args *args = data;
  165. if (!mod &&
  166. !strcmp(args->name, name) &&
  167. args->addr == addr)
  168. return 1;
  169. return 0;
  170. }
  171. static int klp_verify_vmlinux_symbol(const char *name, unsigned long addr)
  172. {
  173. struct klp_verify_args args = {
  174. .name = name,
  175. .addr = addr,
  176. };
  177. if (kallsyms_on_each_symbol(klp_verify_callback, &args))
  178. return 0;
  179. pr_err("symbol '%s' not found at specified address 0x%016lx, kernel mismatch?\n",
  180. name, addr);
  181. return -EINVAL;
  182. }
  183. static int klp_find_verify_func_addr(struct klp_object *obj,
  184. struct klp_func *func)
  185. {
  186. int ret;
  187. #if defined(CONFIG_RANDOMIZE_BASE)
  188. /* KASLR is enabled, disregard old_addr from user */
  189. func->old_addr = 0;
  190. #endif
  191. if (!func->old_addr || klp_is_module(obj))
  192. ret = klp_find_object_symbol(obj->name, func->old_name,
  193. &func->old_addr);
  194. else
  195. ret = klp_verify_vmlinux_symbol(func->old_name,
  196. func->old_addr);
  197. return ret;
  198. }
  199. /*
  200. * external symbols are located outside the parent object (where the parent
  201. * object is either vmlinux or the kmod being patched).
  202. */
  203. static int klp_find_external_symbol(struct module *pmod, const char *name,
  204. unsigned long *addr)
  205. {
  206. const struct kernel_symbol *sym;
  207. /* first, check if it's an exported symbol */
  208. preempt_disable();
  209. sym = find_symbol(name, NULL, NULL, true, true);
  210. if (sym) {
  211. *addr = sym->value;
  212. preempt_enable();
  213. return 0;
  214. }
  215. preempt_enable();
  216. /* otherwise check if it's in another .o within the patch module */
  217. return klp_find_object_symbol(pmod->name, name, addr);
  218. }
  219. static int klp_write_object_relocations(struct module *pmod,
  220. struct klp_object *obj)
  221. {
  222. int ret;
  223. struct klp_reloc *reloc;
  224. if (WARN_ON(!klp_is_object_loaded(obj)))
  225. return -EINVAL;
  226. if (WARN_ON(!obj->relocs))
  227. return -EINVAL;
  228. for (reloc = obj->relocs; reloc->name; reloc++) {
  229. if (!klp_is_module(obj)) {
  230. ret = klp_verify_vmlinux_symbol(reloc->name,
  231. reloc->val);
  232. if (ret)
  233. return ret;
  234. } else {
  235. /* module, reloc->val needs to be discovered */
  236. if (reloc->external)
  237. ret = klp_find_external_symbol(pmod,
  238. reloc->name,
  239. &reloc->val);
  240. else
  241. ret = klp_find_object_symbol(obj->mod->name,
  242. reloc->name,
  243. &reloc->val);
  244. if (ret)
  245. return ret;
  246. }
  247. ret = klp_write_module_reloc(pmod, reloc->type, reloc->loc,
  248. reloc->val + reloc->addend);
  249. if (ret) {
  250. pr_err("relocation failed for symbol '%s' at 0x%016lx (%d)\n",
  251. reloc->name, reloc->val, ret);
  252. return ret;
  253. }
  254. }
  255. return 0;
  256. }
  257. static void notrace klp_ftrace_handler(unsigned long ip,
  258. unsigned long parent_ip,
  259. struct ftrace_ops *fops,
  260. struct pt_regs *regs)
  261. {
  262. struct klp_ops *ops;
  263. struct klp_func *func;
  264. ops = container_of(fops, struct klp_ops, fops);
  265. rcu_read_lock();
  266. func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
  267. stack_node);
  268. if (WARN_ON_ONCE(!func))
  269. goto unlock;
  270. klp_arch_set_pc(regs, (unsigned long)func->new_func);
  271. unlock:
  272. rcu_read_unlock();
  273. }
  274. static int klp_disable_func(struct klp_func *func)
  275. {
  276. struct klp_ops *ops;
  277. int ret;
  278. if (WARN_ON(func->state != KLP_ENABLED))
  279. return -EINVAL;
  280. if (WARN_ON(!func->old_addr))
  281. return -EINVAL;
  282. ops = klp_find_ops(func->old_addr);
  283. if (WARN_ON(!ops))
  284. return -EINVAL;
  285. if (list_is_singular(&ops->func_stack)) {
  286. ret = unregister_ftrace_function(&ops->fops);
  287. if (ret) {
  288. pr_err("failed to unregister ftrace handler for function '%s' (%d)\n",
  289. func->old_name, ret);
  290. return ret;
  291. }
  292. ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
  293. if (ret)
  294. pr_warn("function unregister succeeded but failed to clear the filter\n");
  295. list_del_rcu(&func->stack_node);
  296. list_del(&ops->node);
  297. kfree(ops);
  298. } else {
  299. list_del_rcu(&func->stack_node);
  300. }
  301. func->state = KLP_DISABLED;
  302. return 0;
  303. }
  304. static int klp_enable_func(struct klp_func *func)
  305. {
  306. struct klp_ops *ops;
  307. int ret;
  308. if (WARN_ON(!func->old_addr))
  309. return -EINVAL;
  310. if (WARN_ON(func->state != KLP_DISABLED))
  311. return -EINVAL;
  312. ops = klp_find_ops(func->old_addr);
  313. if (!ops) {
  314. ops = kzalloc(sizeof(*ops), GFP_KERNEL);
  315. if (!ops)
  316. return -ENOMEM;
  317. ops->fops.func = klp_ftrace_handler;
  318. ops->fops.flags = FTRACE_OPS_FL_SAVE_REGS |
  319. FTRACE_OPS_FL_DYNAMIC |
  320. FTRACE_OPS_FL_IPMODIFY;
  321. list_add(&ops->node, &klp_ops);
  322. INIT_LIST_HEAD(&ops->func_stack);
  323. list_add_rcu(&func->stack_node, &ops->func_stack);
  324. ret = ftrace_set_filter_ip(&ops->fops, func->old_addr, 0, 0);
  325. if (ret) {
  326. pr_err("failed to set ftrace filter for function '%s' (%d)\n",
  327. func->old_name, ret);
  328. goto err;
  329. }
  330. ret = register_ftrace_function(&ops->fops);
  331. if (ret) {
  332. pr_err("failed to register ftrace handler for function '%s' (%d)\n",
  333. func->old_name, ret);
  334. ftrace_set_filter_ip(&ops->fops, func->old_addr, 1, 0);
  335. goto err;
  336. }
  337. } else {
  338. list_add_rcu(&func->stack_node, &ops->func_stack);
  339. }
  340. func->state = KLP_ENABLED;
  341. return 0;
  342. err:
  343. list_del_rcu(&func->stack_node);
  344. list_del(&ops->node);
  345. kfree(ops);
  346. return ret;
  347. }
  348. static int klp_disable_object(struct klp_object *obj)
  349. {
  350. struct klp_func *func;
  351. int ret;
  352. for (func = obj->funcs; func->old_name; func++) {
  353. if (func->state != KLP_ENABLED)
  354. continue;
  355. ret = klp_disable_func(func);
  356. if (ret)
  357. return ret;
  358. }
  359. obj->state = KLP_DISABLED;
  360. return 0;
  361. }
  362. static int klp_enable_object(struct klp_object *obj)
  363. {
  364. struct klp_func *func;
  365. int ret;
  366. if (WARN_ON(obj->state != KLP_DISABLED))
  367. return -EINVAL;
  368. if (WARN_ON(!klp_is_object_loaded(obj)))
  369. return -EINVAL;
  370. for (func = obj->funcs; func->old_name; func++) {
  371. ret = klp_enable_func(func);
  372. if (ret)
  373. goto unregister;
  374. }
  375. obj->state = KLP_ENABLED;
  376. return 0;
  377. unregister:
  378. WARN_ON(klp_disable_object(obj));
  379. return ret;
  380. }
  381. static int __klp_disable_patch(struct klp_patch *patch)
  382. {
  383. struct klp_object *obj;
  384. int ret;
  385. /* enforce stacking: only the last enabled patch can be disabled */
  386. if (!list_is_last(&patch->list, &klp_patches) &&
  387. list_next_entry(patch, list)->state == KLP_ENABLED)
  388. return -EBUSY;
  389. pr_notice("disabling patch '%s'\n", patch->mod->name);
  390. for (obj = patch->objs; obj->funcs; obj++) {
  391. if (obj->state != KLP_ENABLED)
  392. continue;
  393. ret = klp_disable_object(obj);
  394. if (ret)
  395. return ret;
  396. }
  397. patch->state = KLP_DISABLED;
  398. return 0;
  399. }
  400. /**
  401. * klp_disable_patch() - disables a registered patch
  402. * @patch: The registered, enabled patch to be disabled
  403. *
  404. * Unregisters the patched functions from ftrace.
  405. *
  406. * Return: 0 on success, otherwise error
  407. */
  408. int klp_disable_patch(struct klp_patch *patch)
  409. {
  410. int ret;
  411. mutex_lock(&klp_mutex);
  412. if (!klp_is_patch_registered(patch)) {
  413. ret = -EINVAL;
  414. goto err;
  415. }
  416. if (patch->state == KLP_DISABLED) {
  417. ret = -EINVAL;
  418. goto err;
  419. }
  420. ret = __klp_disable_patch(patch);
  421. err:
  422. mutex_unlock(&klp_mutex);
  423. return ret;
  424. }
  425. EXPORT_SYMBOL_GPL(klp_disable_patch);
  426. static int __klp_enable_patch(struct klp_patch *patch)
  427. {
  428. struct klp_object *obj;
  429. int ret;
  430. if (WARN_ON(patch->state != KLP_DISABLED))
  431. return -EINVAL;
  432. /* enforce stacking: only the first disabled patch can be enabled */
  433. if (patch->list.prev != &klp_patches &&
  434. list_prev_entry(patch, list)->state == KLP_DISABLED)
  435. return -EBUSY;
  436. pr_notice_once("tainting kernel with TAINT_LIVEPATCH\n");
  437. add_taint(TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
  438. pr_notice("enabling patch '%s'\n", patch->mod->name);
  439. for (obj = patch->objs; obj->funcs; obj++) {
  440. klp_find_object_module(obj);
  441. if (!klp_is_object_loaded(obj))
  442. continue;
  443. ret = klp_enable_object(obj);
  444. if (ret)
  445. goto unregister;
  446. }
  447. patch->state = KLP_ENABLED;
  448. return 0;
  449. unregister:
  450. WARN_ON(__klp_disable_patch(patch));
  451. return ret;
  452. }
  453. /**
  454. * klp_enable_patch() - enables a registered patch
  455. * @patch: The registered, disabled patch to be enabled
  456. *
  457. * Performs the needed symbol lookups and code relocations,
  458. * then registers the patched functions with ftrace.
  459. *
  460. * Return: 0 on success, otherwise error
  461. */
  462. int klp_enable_patch(struct klp_patch *patch)
  463. {
  464. int ret;
  465. mutex_lock(&klp_mutex);
  466. if (!klp_is_patch_registered(patch)) {
  467. ret = -EINVAL;
  468. goto err;
  469. }
  470. ret = __klp_enable_patch(patch);
  471. err:
  472. mutex_unlock(&klp_mutex);
  473. return ret;
  474. }
  475. EXPORT_SYMBOL_GPL(klp_enable_patch);
  476. /*
  477. * Sysfs Interface
  478. *
  479. * /sys/kernel/livepatch
  480. * /sys/kernel/livepatch/<patch>
  481. * /sys/kernel/livepatch/<patch>/enabled
  482. * /sys/kernel/livepatch/<patch>/<object>
  483. * /sys/kernel/livepatch/<patch>/<object>/<func>
  484. */
  485. static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  486. const char *buf, size_t count)
  487. {
  488. struct klp_patch *patch;
  489. int ret;
  490. unsigned long val;
  491. ret = kstrtoul(buf, 10, &val);
  492. if (ret)
  493. return -EINVAL;
  494. if (val != KLP_DISABLED && val != KLP_ENABLED)
  495. return -EINVAL;
  496. patch = container_of(kobj, struct klp_patch, kobj);
  497. mutex_lock(&klp_mutex);
  498. if (val == patch->state) {
  499. /* already in requested state */
  500. ret = -EINVAL;
  501. goto err;
  502. }
  503. if (val == KLP_ENABLED) {
  504. ret = __klp_enable_patch(patch);
  505. if (ret)
  506. goto err;
  507. } else {
  508. ret = __klp_disable_patch(patch);
  509. if (ret)
  510. goto err;
  511. }
  512. mutex_unlock(&klp_mutex);
  513. return count;
  514. err:
  515. mutex_unlock(&klp_mutex);
  516. return ret;
  517. }
  518. static ssize_t enabled_show(struct kobject *kobj,
  519. struct kobj_attribute *attr, char *buf)
  520. {
  521. struct klp_patch *patch;
  522. patch = container_of(kobj, struct klp_patch, kobj);
  523. return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->state);
  524. }
  525. static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
  526. static struct attribute *klp_patch_attrs[] = {
  527. &enabled_kobj_attr.attr,
  528. NULL
  529. };
  530. static void klp_kobj_release_patch(struct kobject *kobj)
  531. {
  532. /*
  533. * Once we have a consistency model we'll need to module_put() the
  534. * patch module here. See klp_register_patch() for more details.
  535. */
  536. }
  537. static struct kobj_type klp_ktype_patch = {
  538. .release = klp_kobj_release_patch,
  539. .sysfs_ops = &kobj_sysfs_ops,
  540. .default_attrs = klp_patch_attrs,
  541. };
  542. static void klp_kobj_release_func(struct kobject *kobj)
  543. {
  544. }
  545. static struct kobj_type klp_ktype_func = {
  546. .release = klp_kobj_release_func,
  547. .sysfs_ops = &kobj_sysfs_ops,
  548. };
  549. /*
  550. * Free all functions' kobjects in the array up to some limit. When limit is
  551. * NULL, all kobjects are freed.
  552. */
  553. static void klp_free_funcs_limited(struct klp_object *obj,
  554. struct klp_func *limit)
  555. {
  556. struct klp_func *func;
  557. for (func = obj->funcs; func->old_name && func != limit; func++)
  558. kobject_put(&func->kobj);
  559. }
  560. /* Clean up when a patched object is unloaded */
  561. static void klp_free_object_loaded(struct klp_object *obj)
  562. {
  563. struct klp_func *func;
  564. obj->mod = NULL;
  565. for (func = obj->funcs; func->old_name; func++)
  566. func->old_addr = 0;
  567. }
  568. /*
  569. * Free all objects' kobjects in the array up to some limit. When limit is
  570. * NULL, all kobjects are freed.
  571. */
  572. static void klp_free_objects_limited(struct klp_patch *patch,
  573. struct klp_object *limit)
  574. {
  575. struct klp_object *obj;
  576. for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
  577. klp_free_funcs_limited(obj, NULL);
  578. kobject_put(obj->kobj);
  579. }
  580. }
  581. static void klp_free_patch(struct klp_patch *patch)
  582. {
  583. klp_free_objects_limited(patch, NULL);
  584. if (!list_empty(&patch->list))
  585. list_del(&patch->list);
  586. kobject_put(&patch->kobj);
  587. }
  588. static int klp_init_func(struct klp_object *obj, struct klp_func *func)
  589. {
  590. INIT_LIST_HEAD(&func->stack_node);
  591. func->state = KLP_DISABLED;
  592. return kobject_init_and_add(&func->kobj, &klp_ktype_func,
  593. obj->kobj, "%s", func->old_name);
  594. }
  595. /* parts of the initialization that is done only when the object is loaded */
  596. static int klp_init_object_loaded(struct klp_patch *patch,
  597. struct klp_object *obj)
  598. {
  599. struct klp_func *func;
  600. int ret;
  601. if (obj->relocs) {
  602. ret = klp_write_object_relocations(patch->mod, obj);
  603. if (ret)
  604. return ret;
  605. }
  606. for (func = obj->funcs; func->old_name; func++) {
  607. ret = klp_find_verify_func_addr(obj, func);
  608. if (ret)
  609. return ret;
  610. }
  611. return 0;
  612. }
  613. static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
  614. {
  615. struct klp_func *func;
  616. int ret;
  617. const char *name;
  618. if (!obj->funcs)
  619. return -EINVAL;
  620. obj->state = KLP_DISABLED;
  621. klp_find_object_module(obj);
  622. name = klp_is_module(obj) ? obj->name : "vmlinux";
  623. obj->kobj = kobject_create_and_add(name, &patch->kobj);
  624. if (!obj->kobj)
  625. return -ENOMEM;
  626. for (func = obj->funcs; func->old_name; func++) {
  627. ret = klp_init_func(obj, func);
  628. if (ret)
  629. goto free;
  630. }
  631. if (klp_is_object_loaded(obj)) {
  632. ret = klp_init_object_loaded(patch, obj);
  633. if (ret)
  634. goto free;
  635. }
  636. return 0;
  637. free:
  638. klp_free_funcs_limited(obj, func);
  639. kobject_put(obj->kobj);
  640. return ret;
  641. }
  642. static int klp_init_patch(struct klp_patch *patch)
  643. {
  644. struct klp_object *obj;
  645. int ret;
  646. if (!patch->objs)
  647. return -EINVAL;
  648. mutex_lock(&klp_mutex);
  649. patch->state = KLP_DISABLED;
  650. ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
  651. klp_root_kobj, "%s", patch->mod->name);
  652. if (ret)
  653. goto unlock;
  654. for (obj = patch->objs; obj->funcs; obj++) {
  655. ret = klp_init_object(patch, obj);
  656. if (ret)
  657. goto free;
  658. }
  659. list_add_tail(&patch->list, &klp_patches);
  660. mutex_unlock(&klp_mutex);
  661. return 0;
  662. free:
  663. klp_free_objects_limited(patch, obj);
  664. kobject_put(&patch->kobj);
  665. unlock:
  666. mutex_unlock(&klp_mutex);
  667. return ret;
  668. }
  669. /**
  670. * klp_unregister_patch() - unregisters a patch
  671. * @patch: Disabled patch to be unregistered
  672. *
  673. * Frees the data structures and removes the sysfs interface.
  674. *
  675. * Return: 0 on success, otherwise error
  676. */
  677. int klp_unregister_patch(struct klp_patch *patch)
  678. {
  679. int ret = 0;
  680. mutex_lock(&klp_mutex);
  681. if (!klp_is_patch_registered(patch)) {
  682. ret = -EINVAL;
  683. goto out;
  684. }
  685. if (patch->state == KLP_ENABLED) {
  686. ret = -EBUSY;
  687. goto out;
  688. }
  689. klp_free_patch(patch);
  690. out:
  691. mutex_unlock(&klp_mutex);
  692. return ret;
  693. }
  694. EXPORT_SYMBOL_GPL(klp_unregister_patch);
  695. /**
  696. * klp_register_patch() - registers a patch
  697. * @patch: Patch to be registered
  698. *
  699. * Initializes the data structure associated with the patch and
  700. * creates the sysfs interface.
  701. *
  702. * Return: 0 on success, otherwise error
  703. */
  704. int klp_register_patch(struct klp_patch *patch)
  705. {
  706. int ret;
  707. if (!klp_initialized())
  708. return -ENODEV;
  709. if (!patch || !patch->mod)
  710. return -EINVAL;
  711. /*
  712. * A reference is taken on the patch module to prevent it from being
  713. * unloaded. Right now, we don't allow patch modules to unload since
  714. * there is currently no method to determine if a thread is still
  715. * running in the patched code contained in the patch module once
  716. * the ftrace registration is successful.
  717. */
  718. if (!try_module_get(patch->mod))
  719. return -ENODEV;
  720. ret = klp_init_patch(patch);
  721. if (ret)
  722. module_put(patch->mod);
  723. return ret;
  724. }
  725. EXPORT_SYMBOL_GPL(klp_register_patch);
  726. static void klp_module_notify_coming(struct klp_patch *patch,
  727. struct klp_object *obj)
  728. {
  729. struct module *pmod = patch->mod;
  730. struct module *mod = obj->mod;
  731. int ret;
  732. ret = klp_init_object_loaded(patch, obj);
  733. if (ret)
  734. goto err;
  735. if (patch->state == KLP_DISABLED)
  736. return;
  737. pr_notice("applying patch '%s' to loading module '%s'\n",
  738. pmod->name, mod->name);
  739. ret = klp_enable_object(obj);
  740. if (!ret)
  741. return;
  742. err:
  743. pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
  744. pmod->name, mod->name, ret);
  745. }
  746. static void klp_module_notify_going(struct klp_patch *patch,
  747. struct klp_object *obj)
  748. {
  749. struct module *pmod = patch->mod;
  750. struct module *mod = obj->mod;
  751. int ret;
  752. if (patch->state == KLP_DISABLED)
  753. goto disabled;
  754. pr_notice("reverting patch '%s' on unloading module '%s'\n",
  755. pmod->name, mod->name);
  756. ret = klp_disable_object(obj);
  757. if (ret)
  758. pr_warn("failed to revert patch '%s' on module '%s' (%d)\n",
  759. pmod->name, mod->name, ret);
  760. disabled:
  761. klp_free_object_loaded(obj);
  762. }
  763. static int klp_module_notify(struct notifier_block *nb, unsigned long action,
  764. void *data)
  765. {
  766. struct module *mod = data;
  767. struct klp_patch *patch;
  768. struct klp_object *obj;
  769. if (action != MODULE_STATE_COMING && action != MODULE_STATE_GOING)
  770. return 0;
  771. mutex_lock(&klp_mutex);
  772. list_for_each_entry(patch, &klp_patches, list) {
  773. for (obj = patch->objs; obj->funcs; obj++) {
  774. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  775. continue;
  776. if (action == MODULE_STATE_COMING) {
  777. obj->mod = mod;
  778. klp_module_notify_coming(patch, obj);
  779. } else /* MODULE_STATE_GOING */
  780. klp_module_notify_going(patch, obj);
  781. break;
  782. }
  783. }
  784. mutex_unlock(&klp_mutex);
  785. return 0;
  786. }
  787. static struct notifier_block klp_module_nb = {
  788. .notifier_call = klp_module_notify,
  789. .priority = INT_MIN+1, /* called late but before ftrace notifier */
  790. };
  791. static int klp_init(void)
  792. {
  793. int ret;
  794. ret = klp_check_compiler_support();
  795. if (ret) {
  796. pr_info("Your compiler is too old; turning off.\n");
  797. return -EINVAL;
  798. }
  799. ret = register_module_notifier(&klp_module_nb);
  800. if (ret)
  801. return ret;
  802. klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
  803. if (!klp_root_kobj) {
  804. ret = -ENOMEM;
  805. goto unregister;
  806. }
  807. return 0;
  808. unregister:
  809. unregister_module_notifier(&klp_module_nb);
  810. return ret;
  811. }
  812. module_init(klp_init);