core.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * core.c - Kernel Live Patching Core
  3. *
  4. * Copyright (C) 2014 Seth Jennings <sjenning@redhat.com>
  5. * Copyright (C) 2014 SUSE
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version 2
  10. * of the License, or (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  19. */
  20. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  21. #include <linux/module.h>
  22. #include <linux/kernel.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/list.h>
  26. #include <linux/kallsyms.h>
  27. #include <linux/livepatch.h>
  28. #include <linux/elf.h>
  29. #include <linux/moduleloader.h>
  30. #include <linux/completion.h>
  31. #include <asm/cacheflush.h>
  32. #include "core.h"
  33. #include "patch.h"
  34. #include "transition.h"
  35. /*
  36. * klp_mutex is a coarse lock which serializes access to klp data. All
  37. * accesses to klp-related variables and structures must have mutex protection,
  38. * except within the following functions which carefully avoid the need for it:
  39. *
  40. * - klp_ftrace_handler()
  41. * - klp_update_patch_state()
  42. */
  43. DEFINE_MUTEX(klp_mutex);
  44. static LIST_HEAD(klp_patches);
  45. static struct kobject *klp_root_kobj;
  46. static bool klp_is_module(struct klp_object *obj)
  47. {
  48. return obj->name;
  49. }
  50. /* sets obj->mod if object is not vmlinux and module is found */
  51. static void klp_find_object_module(struct klp_object *obj)
  52. {
  53. struct module *mod;
  54. if (!klp_is_module(obj))
  55. return;
  56. mutex_lock(&module_mutex);
  57. /*
  58. * We do not want to block removal of patched modules and therefore
  59. * we do not take a reference here. The patches are removed by
  60. * klp_module_going() instead.
  61. */
  62. mod = find_module(obj->name);
  63. /*
  64. * Do not mess work of klp_module_coming() and klp_module_going().
  65. * Note that the patch might still be needed before klp_module_going()
  66. * is called. Module functions can be called even in the GOING state
  67. * until mod->exit() finishes. This is especially important for
  68. * patches that modify semantic of the functions.
  69. */
  70. if (mod && mod->klp_alive)
  71. obj->mod = mod;
  72. mutex_unlock(&module_mutex);
  73. }
  74. static bool klp_is_patch_registered(struct klp_patch *patch)
  75. {
  76. struct klp_patch *mypatch;
  77. list_for_each_entry(mypatch, &klp_patches, list)
  78. if (mypatch == patch)
  79. return true;
  80. return false;
  81. }
  82. static bool klp_initialized(void)
  83. {
  84. return !!klp_root_kobj;
  85. }
  86. struct klp_find_arg {
  87. const char *objname;
  88. const char *name;
  89. unsigned long addr;
  90. unsigned long count;
  91. unsigned long pos;
  92. };
  93. static int klp_find_callback(void *data, const char *name,
  94. struct module *mod, unsigned long addr)
  95. {
  96. struct klp_find_arg *args = data;
  97. if ((mod && !args->objname) || (!mod && args->objname))
  98. return 0;
  99. if (strcmp(args->name, name))
  100. return 0;
  101. if (args->objname && strcmp(args->objname, mod->name))
  102. return 0;
  103. args->addr = addr;
  104. args->count++;
  105. /*
  106. * Finish the search when the symbol is found for the desired position
  107. * or the position is not defined for a non-unique symbol.
  108. */
  109. if ((args->pos && (args->count == args->pos)) ||
  110. (!args->pos && (args->count > 1)))
  111. return 1;
  112. return 0;
  113. }
  114. static int klp_find_object_symbol(const char *objname, const char *name,
  115. unsigned long sympos, unsigned long *addr)
  116. {
  117. struct klp_find_arg args = {
  118. .objname = objname,
  119. .name = name,
  120. .addr = 0,
  121. .count = 0,
  122. .pos = sympos,
  123. };
  124. mutex_lock(&module_mutex);
  125. if (objname)
  126. module_kallsyms_on_each_symbol(klp_find_callback, &args);
  127. else
  128. kallsyms_on_each_symbol(klp_find_callback, &args);
  129. mutex_unlock(&module_mutex);
  130. /*
  131. * Ensure an address was found. If sympos is 0, ensure symbol is unique;
  132. * otherwise ensure the symbol position count matches sympos.
  133. */
  134. if (args.addr == 0)
  135. pr_err("symbol '%s' not found in symbol table\n", name);
  136. else if (args.count > 1 && sympos == 0) {
  137. pr_err("unresolvable ambiguity for symbol '%s' in object '%s'\n",
  138. name, objname);
  139. } else if (sympos != args.count && sympos > 0) {
  140. pr_err("symbol position %lu for symbol '%s' in object '%s' not found\n",
  141. sympos, name, objname ? objname : "vmlinux");
  142. } else {
  143. *addr = args.addr;
  144. return 0;
  145. }
  146. *addr = 0;
  147. return -EINVAL;
  148. }
  149. static int klp_resolve_symbols(Elf_Shdr *relasec, struct module *pmod)
  150. {
  151. int i, cnt, vmlinux, ret;
  152. char objname[MODULE_NAME_LEN];
  153. char symname[KSYM_NAME_LEN];
  154. char *strtab = pmod->core_kallsyms.strtab;
  155. Elf_Rela *relas;
  156. Elf_Sym *sym;
  157. unsigned long sympos, addr;
  158. /*
  159. * Since the field widths for objname and symname in the sscanf()
  160. * call are hard-coded and correspond to MODULE_NAME_LEN and
  161. * KSYM_NAME_LEN respectively, we must make sure that MODULE_NAME_LEN
  162. * and KSYM_NAME_LEN have the values we expect them to have.
  163. *
  164. * Because the value of MODULE_NAME_LEN can differ among architectures,
  165. * we use the smallest/strictest upper bound possible (56, based on
  166. * the current definition of MODULE_NAME_LEN) to prevent overflows.
  167. */
  168. BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
  169. relas = (Elf_Rela *) relasec->sh_addr;
  170. /* For each rela in this klp relocation section */
  171. for (i = 0; i < relasec->sh_size / sizeof(Elf_Rela); i++) {
  172. sym = pmod->core_kallsyms.symtab + ELF_R_SYM(relas[i].r_info);
  173. if (sym->st_shndx != SHN_LIVEPATCH) {
  174. pr_err("symbol %s is not marked as a livepatch symbol\n",
  175. strtab + sym->st_name);
  176. return -EINVAL;
  177. }
  178. /* Format: .klp.sym.objname.symname,sympos */
  179. cnt = sscanf(strtab + sym->st_name,
  180. ".klp.sym.%55[^.].%127[^,],%lu",
  181. objname, symname, &sympos);
  182. if (cnt != 3) {
  183. pr_err("symbol %s has an incorrectly formatted name\n",
  184. strtab + sym->st_name);
  185. return -EINVAL;
  186. }
  187. /* klp_find_object_symbol() treats a NULL objname as vmlinux */
  188. vmlinux = !strcmp(objname, "vmlinux");
  189. ret = klp_find_object_symbol(vmlinux ? NULL : objname,
  190. symname, sympos, &addr);
  191. if (ret)
  192. return ret;
  193. sym->st_value = addr;
  194. }
  195. return 0;
  196. }
  197. static int klp_write_object_relocations(struct module *pmod,
  198. struct klp_object *obj)
  199. {
  200. int i, cnt, ret = 0;
  201. const char *objname, *secname;
  202. char sec_objname[MODULE_NAME_LEN];
  203. Elf_Shdr *sec;
  204. if (WARN_ON(!klp_is_object_loaded(obj)))
  205. return -EINVAL;
  206. objname = klp_is_module(obj) ? obj->name : "vmlinux";
  207. /* For each klp relocation section */
  208. for (i = 1; i < pmod->klp_info->hdr.e_shnum; i++) {
  209. sec = pmod->klp_info->sechdrs + i;
  210. secname = pmod->klp_info->secstrings + sec->sh_name;
  211. if (!(sec->sh_flags & SHF_RELA_LIVEPATCH))
  212. continue;
  213. /*
  214. * Format: .klp.rela.sec_objname.section_name
  215. * See comment in klp_resolve_symbols() for an explanation
  216. * of the selected field width value.
  217. */
  218. cnt = sscanf(secname, ".klp.rela.%55[^.]", sec_objname);
  219. if (cnt != 1) {
  220. pr_err("section %s has an incorrectly formatted name\n",
  221. secname);
  222. ret = -EINVAL;
  223. break;
  224. }
  225. if (strcmp(objname, sec_objname))
  226. continue;
  227. ret = klp_resolve_symbols(sec, pmod);
  228. if (ret)
  229. break;
  230. ret = apply_relocate_add(pmod->klp_info->sechdrs,
  231. pmod->core_kallsyms.strtab,
  232. pmod->klp_info->symndx, i, pmod);
  233. if (ret)
  234. break;
  235. }
  236. return ret;
  237. }
  238. static int __klp_disable_patch(struct klp_patch *patch)
  239. {
  240. struct klp_object *obj;
  241. if (WARN_ON(!patch->enabled))
  242. return -EINVAL;
  243. if (klp_transition_patch)
  244. return -EBUSY;
  245. /* enforce stacking: only the last enabled patch can be disabled */
  246. if (!list_is_last(&patch->list, &klp_patches) &&
  247. list_next_entry(patch, list)->enabled)
  248. return -EBUSY;
  249. klp_init_transition(patch, KLP_UNPATCHED);
  250. klp_for_each_object(patch, obj)
  251. if (obj->patched)
  252. klp_pre_unpatch_callback(obj);
  253. /*
  254. * Enforce the order of the func->transition writes in
  255. * klp_init_transition() and the TIF_PATCH_PENDING writes in
  256. * klp_start_transition(). In the rare case where klp_ftrace_handler()
  257. * is called shortly after klp_update_patch_state() switches the task,
  258. * this ensures the handler sees that func->transition is set.
  259. */
  260. smp_wmb();
  261. klp_start_transition();
  262. klp_try_complete_transition();
  263. patch->enabled = false;
  264. return 0;
  265. }
  266. /**
  267. * klp_disable_patch() - disables a registered patch
  268. * @patch: The registered, enabled patch to be disabled
  269. *
  270. * Unregisters the patched functions from ftrace.
  271. *
  272. * Return: 0 on success, otherwise error
  273. */
  274. int klp_disable_patch(struct klp_patch *patch)
  275. {
  276. int ret;
  277. mutex_lock(&klp_mutex);
  278. if (!klp_is_patch_registered(patch)) {
  279. ret = -EINVAL;
  280. goto err;
  281. }
  282. if (!patch->enabled) {
  283. ret = -EINVAL;
  284. goto err;
  285. }
  286. ret = __klp_disable_patch(patch);
  287. err:
  288. mutex_unlock(&klp_mutex);
  289. return ret;
  290. }
  291. EXPORT_SYMBOL_GPL(klp_disable_patch);
  292. static int __klp_enable_patch(struct klp_patch *patch)
  293. {
  294. struct klp_object *obj;
  295. int ret;
  296. if (klp_transition_patch)
  297. return -EBUSY;
  298. if (WARN_ON(patch->enabled))
  299. return -EINVAL;
  300. /* enforce stacking: only the first disabled patch can be enabled */
  301. if (patch->list.prev != &klp_patches &&
  302. !list_prev_entry(patch, list)->enabled)
  303. return -EBUSY;
  304. /*
  305. * A reference is taken on the patch module to prevent it from being
  306. * unloaded.
  307. *
  308. * Note: For immediate (no consistency model) patches we don't allow
  309. * patch modules to unload since there is no safe/sane method to
  310. * determine if a thread is still running in the patched code contained
  311. * in the patch module once the ftrace registration is successful.
  312. */
  313. if (!try_module_get(patch->mod))
  314. return -ENODEV;
  315. pr_notice("enabling patch '%s'\n", patch->mod->name);
  316. klp_init_transition(patch, KLP_PATCHED);
  317. /*
  318. * Enforce the order of the func->transition writes in
  319. * klp_init_transition() and the ops->func_stack writes in
  320. * klp_patch_object(), so that klp_ftrace_handler() will see the
  321. * func->transition updates before the handler is registered and the
  322. * new funcs become visible to the handler.
  323. */
  324. smp_wmb();
  325. klp_for_each_object(patch, obj) {
  326. if (!klp_is_object_loaded(obj))
  327. continue;
  328. ret = klp_pre_patch_callback(obj);
  329. if (ret) {
  330. pr_warn("pre-patch callback failed for object '%s'\n",
  331. klp_is_module(obj) ? obj->name : "vmlinux");
  332. goto err;
  333. }
  334. ret = klp_patch_object(obj);
  335. if (ret) {
  336. pr_warn("failed to patch object '%s'\n",
  337. klp_is_module(obj) ? obj->name : "vmlinux");
  338. goto err;
  339. }
  340. }
  341. klp_start_transition();
  342. klp_try_complete_transition();
  343. patch->enabled = true;
  344. return 0;
  345. err:
  346. pr_warn("failed to enable patch '%s'\n", patch->mod->name);
  347. klp_cancel_transition();
  348. return ret;
  349. }
  350. /**
  351. * klp_enable_patch() - enables a registered patch
  352. * @patch: The registered, disabled patch to be enabled
  353. *
  354. * Performs the needed symbol lookups and code relocations,
  355. * then registers the patched functions with ftrace.
  356. *
  357. * Return: 0 on success, otherwise error
  358. */
  359. int klp_enable_patch(struct klp_patch *patch)
  360. {
  361. int ret;
  362. mutex_lock(&klp_mutex);
  363. if (!klp_is_patch_registered(patch)) {
  364. ret = -EINVAL;
  365. goto err;
  366. }
  367. ret = __klp_enable_patch(patch);
  368. err:
  369. mutex_unlock(&klp_mutex);
  370. return ret;
  371. }
  372. EXPORT_SYMBOL_GPL(klp_enable_patch);
  373. /*
  374. * Sysfs Interface
  375. *
  376. * /sys/kernel/livepatch
  377. * /sys/kernel/livepatch/<patch>
  378. * /sys/kernel/livepatch/<patch>/enabled
  379. * /sys/kernel/livepatch/<patch>/transition
  380. * /sys/kernel/livepatch/<patch>/<object>
  381. * /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
  382. */
  383. static ssize_t enabled_store(struct kobject *kobj, struct kobj_attribute *attr,
  384. const char *buf, size_t count)
  385. {
  386. struct klp_patch *patch;
  387. int ret;
  388. bool enabled;
  389. ret = kstrtobool(buf, &enabled);
  390. if (ret)
  391. return ret;
  392. patch = container_of(kobj, struct klp_patch, kobj);
  393. mutex_lock(&klp_mutex);
  394. if (!klp_is_patch_registered(patch)) {
  395. /*
  396. * Module with the patch could either disappear meanwhile or is
  397. * not properly initialized yet.
  398. */
  399. ret = -EINVAL;
  400. goto err;
  401. }
  402. if (patch->enabled == enabled) {
  403. /* already in requested state */
  404. ret = -EINVAL;
  405. goto err;
  406. }
  407. if (patch == klp_transition_patch) {
  408. klp_reverse_transition();
  409. } else if (enabled) {
  410. ret = __klp_enable_patch(patch);
  411. if (ret)
  412. goto err;
  413. } else {
  414. ret = __klp_disable_patch(patch);
  415. if (ret)
  416. goto err;
  417. }
  418. mutex_unlock(&klp_mutex);
  419. return count;
  420. err:
  421. mutex_unlock(&klp_mutex);
  422. return ret;
  423. }
  424. static ssize_t enabled_show(struct kobject *kobj,
  425. struct kobj_attribute *attr, char *buf)
  426. {
  427. struct klp_patch *patch;
  428. patch = container_of(kobj, struct klp_patch, kobj);
  429. return snprintf(buf, PAGE_SIZE-1, "%d\n", patch->enabled);
  430. }
  431. static ssize_t transition_show(struct kobject *kobj,
  432. struct kobj_attribute *attr, char *buf)
  433. {
  434. struct klp_patch *patch;
  435. patch = container_of(kobj, struct klp_patch, kobj);
  436. return snprintf(buf, PAGE_SIZE-1, "%d\n",
  437. patch == klp_transition_patch);
  438. }
  439. static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
  440. static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
  441. static struct attribute *klp_patch_attrs[] = {
  442. &enabled_kobj_attr.attr,
  443. &transition_kobj_attr.attr,
  444. NULL
  445. };
  446. static void klp_kobj_release_patch(struct kobject *kobj)
  447. {
  448. struct klp_patch *patch;
  449. patch = container_of(kobj, struct klp_patch, kobj);
  450. complete(&patch->finish);
  451. }
  452. static struct kobj_type klp_ktype_patch = {
  453. .release = klp_kobj_release_patch,
  454. .sysfs_ops = &kobj_sysfs_ops,
  455. .default_attrs = klp_patch_attrs,
  456. };
  457. static void klp_kobj_release_object(struct kobject *kobj)
  458. {
  459. }
  460. static struct kobj_type klp_ktype_object = {
  461. .release = klp_kobj_release_object,
  462. .sysfs_ops = &kobj_sysfs_ops,
  463. };
  464. static void klp_kobj_release_func(struct kobject *kobj)
  465. {
  466. }
  467. static struct kobj_type klp_ktype_func = {
  468. .release = klp_kobj_release_func,
  469. .sysfs_ops = &kobj_sysfs_ops,
  470. };
  471. /*
  472. * Free all functions' kobjects in the array up to some limit. When limit is
  473. * NULL, all kobjects are freed.
  474. */
  475. static void klp_free_funcs_limited(struct klp_object *obj,
  476. struct klp_func *limit)
  477. {
  478. struct klp_func *func;
  479. for (func = obj->funcs; func->old_name && func != limit; func++)
  480. kobject_put(&func->kobj);
  481. }
  482. /* Clean up when a patched object is unloaded */
  483. static void klp_free_object_loaded(struct klp_object *obj)
  484. {
  485. struct klp_func *func;
  486. obj->mod = NULL;
  487. klp_for_each_func(obj, func)
  488. func->old_addr = 0;
  489. }
  490. /*
  491. * Free all objects' kobjects in the array up to some limit. When limit is
  492. * NULL, all kobjects are freed.
  493. */
  494. static void klp_free_objects_limited(struct klp_patch *patch,
  495. struct klp_object *limit)
  496. {
  497. struct klp_object *obj;
  498. for (obj = patch->objs; obj->funcs && obj != limit; obj++) {
  499. klp_free_funcs_limited(obj, NULL);
  500. kobject_put(&obj->kobj);
  501. }
  502. }
  503. static void klp_free_patch(struct klp_patch *patch)
  504. {
  505. klp_free_objects_limited(patch, NULL);
  506. if (!list_empty(&patch->list))
  507. list_del(&patch->list);
  508. }
  509. static int klp_init_func(struct klp_object *obj, struct klp_func *func)
  510. {
  511. if (!func->old_name || !func->new_func)
  512. return -EINVAL;
  513. INIT_LIST_HEAD(&func->stack_node);
  514. func->patched = false;
  515. func->transition = false;
  516. /* The format for the sysfs directory is <function,sympos> where sympos
  517. * is the nth occurrence of this symbol in kallsyms for the patched
  518. * object. If the user selects 0 for old_sympos, then 1 will be used
  519. * since a unique symbol will be the first occurrence.
  520. */
  521. return kobject_init_and_add(&func->kobj, &klp_ktype_func,
  522. &obj->kobj, "%s,%lu", func->old_name,
  523. func->old_sympos ? func->old_sympos : 1);
  524. }
  525. /* Arches may override this to finish any remaining arch-specific tasks */
  526. void __weak arch_klp_init_object_loaded(struct klp_patch *patch,
  527. struct klp_object *obj)
  528. {
  529. }
  530. /* parts of the initialization that is done only when the object is loaded */
  531. static int klp_init_object_loaded(struct klp_patch *patch,
  532. struct klp_object *obj)
  533. {
  534. struct klp_func *func;
  535. int ret;
  536. module_disable_ro(patch->mod);
  537. ret = klp_write_object_relocations(patch->mod, obj);
  538. if (ret) {
  539. module_enable_ro(patch->mod, true);
  540. return ret;
  541. }
  542. arch_klp_init_object_loaded(patch, obj);
  543. module_enable_ro(patch->mod, true);
  544. klp_for_each_func(obj, func) {
  545. ret = klp_find_object_symbol(obj->name, func->old_name,
  546. func->old_sympos,
  547. &func->old_addr);
  548. if (ret)
  549. return ret;
  550. ret = kallsyms_lookup_size_offset(func->old_addr,
  551. &func->old_size, NULL);
  552. if (!ret) {
  553. pr_err("kallsyms size lookup failed for '%s'\n",
  554. func->old_name);
  555. return -ENOENT;
  556. }
  557. ret = kallsyms_lookup_size_offset((unsigned long)func->new_func,
  558. &func->new_size, NULL);
  559. if (!ret) {
  560. pr_err("kallsyms size lookup failed for '%s' replacement\n",
  561. func->old_name);
  562. return -ENOENT;
  563. }
  564. }
  565. return 0;
  566. }
  567. static int klp_init_object(struct klp_patch *patch, struct klp_object *obj)
  568. {
  569. struct klp_func *func;
  570. int ret;
  571. const char *name;
  572. if (!obj->funcs)
  573. return -EINVAL;
  574. obj->patched = false;
  575. obj->mod = NULL;
  576. klp_find_object_module(obj);
  577. name = klp_is_module(obj) ? obj->name : "vmlinux";
  578. ret = kobject_init_and_add(&obj->kobj, &klp_ktype_object,
  579. &patch->kobj, "%s", name);
  580. if (ret)
  581. return ret;
  582. klp_for_each_func(obj, func) {
  583. ret = klp_init_func(obj, func);
  584. if (ret)
  585. goto free;
  586. }
  587. if (klp_is_object_loaded(obj)) {
  588. ret = klp_init_object_loaded(patch, obj);
  589. if (ret)
  590. goto free;
  591. }
  592. return 0;
  593. free:
  594. klp_free_funcs_limited(obj, func);
  595. kobject_put(&obj->kobj);
  596. return ret;
  597. }
  598. static int klp_init_patch(struct klp_patch *patch)
  599. {
  600. struct klp_object *obj;
  601. int ret;
  602. if (!patch->objs)
  603. return -EINVAL;
  604. mutex_lock(&klp_mutex);
  605. patch->enabled = false;
  606. init_completion(&patch->finish);
  607. ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
  608. klp_root_kobj, "%s", patch->mod->name);
  609. if (ret) {
  610. mutex_unlock(&klp_mutex);
  611. return ret;
  612. }
  613. klp_for_each_object(patch, obj) {
  614. ret = klp_init_object(patch, obj);
  615. if (ret)
  616. goto free;
  617. }
  618. list_add_tail(&patch->list, &klp_patches);
  619. mutex_unlock(&klp_mutex);
  620. return 0;
  621. free:
  622. klp_free_objects_limited(patch, obj);
  623. mutex_unlock(&klp_mutex);
  624. kobject_put(&patch->kobj);
  625. wait_for_completion(&patch->finish);
  626. return ret;
  627. }
  628. /**
  629. * klp_unregister_patch() - unregisters a patch
  630. * @patch: Disabled patch to be unregistered
  631. *
  632. * Frees the data structures and removes the sysfs interface.
  633. *
  634. * Return: 0 on success, otherwise error
  635. */
  636. int klp_unregister_patch(struct klp_patch *patch)
  637. {
  638. int ret;
  639. mutex_lock(&klp_mutex);
  640. if (!klp_is_patch_registered(patch)) {
  641. ret = -EINVAL;
  642. goto err;
  643. }
  644. if (patch->enabled) {
  645. ret = -EBUSY;
  646. goto err;
  647. }
  648. klp_free_patch(patch);
  649. mutex_unlock(&klp_mutex);
  650. kobject_put(&patch->kobj);
  651. wait_for_completion(&patch->finish);
  652. return 0;
  653. err:
  654. mutex_unlock(&klp_mutex);
  655. return ret;
  656. }
  657. EXPORT_SYMBOL_GPL(klp_unregister_patch);
  658. /**
  659. * klp_register_patch() - registers a patch
  660. * @patch: Patch to be registered
  661. *
  662. * Initializes the data structure associated with the patch and
  663. * creates the sysfs interface.
  664. *
  665. * There is no need to take the reference on the patch module here. It is done
  666. * later when the patch is enabled.
  667. *
  668. * Return: 0 on success, otherwise error
  669. */
  670. int klp_register_patch(struct klp_patch *patch)
  671. {
  672. if (!patch || !patch->mod)
  673. return -EINVAL;
  674. if (!is_livepatch_module(patch->mod)) {
  675. pr_err("module %s is not marked as a livepatch module\n",
  676. patch->mod->name);
  677. return -EINVAL;
  678. }
  679. if (!klp_initialized())
  680. return -ENODEV;
  681. /*
  682. * Architectures without reliable stack traces have to set
  683. * patch->immediate because there's currently no way to patch kthreads
  684. * with the consistency model.
  685. */
  686. if (!klp_have_reliable_stack() && !patch->immediate) {
  687. pr_err("This architecture doesn't have support for the livepatch consistency model.\n");
  688. return -ENOSYS;
  689. }
  690. return klp_init_patch(patch);
  691. }
  692. EXPORT_SYMBOL_GPL(klp_register_patch);
  693. /*
  694. * Remove parts of patches that touch a given kernel module. The list of
  695. * patches processed might be limited. When limit is NULL, all patches
  696. * will be handled.
  697. */
  698. static void klp_cleanup_module_patches_limited(struct module *mod,
  699. struct klp_patch *limit)
  700. {
  701. struct klp_patch *patch;
  702. struct klp_object *obj;
  703. list_for_each_entry(patch, &klp_patches, list) {
  704. if (patch == limit)
  705. break;
  706. klp_for_each_object(patch, obj) {
  707. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  708. continue;
  709. /*
  710. * Only unpatch the module if the patch is enabled or
  711. * is in transition.
  712. */
  713. if (patch->enabled || patch == klp_transition_patch) {
  714. if (patch != klp_transition_patch)
  715. klp_pre_unpatch_callback(obj);
  716. pr_notice("reverting patch '%s' on unloading module '%s'\n",
  717. patch->mod->name, obj->mod->name);
  718. klp_unpatch_object(obj);
  719. klp_post_unpatch_callback(obj);
  720. }
  721. klp_free_object_loaded(obj);
  722. break;
  723. }
  724. }
  725. }
  726. int klp_module_coming(struct module *mod)
  727. {
  728. int ret;
  729. struct klp_patch *patch;
  730. struct klp_object *obj;
  731. if (WARN_ON(mod->state != MODULE_STATE_COMING))
  732. return -EINVAL;
  733. mutex_lock(&klp_mutex);
  734. /*
  735. * Each module has to know that klp_module_coming()
  736. * has been called. We never know what module will
  737. * get patched by a new patch.
  738. */
  739. mod->klp_alive = true;
  740. list_for_each_entry(patch, &klp_patches, list) {
  741. klp_for_each_object(patch, obj) {
  742. if (!klp_is_module(obj) || strcmp(obj->name, mod->name))
  743. continue;
  744. obj->mod = mod;
  745. ret = klp_init_object_loaded(patch, obj);
  746. if (ret) {
  747. pr_warn("failed to initialize patch '%s' for module '%s' (%d)\n",
  748. patch->mod->name, obj->mod->name, ret);
  749. goto err;
  750. }
  751. /*
  752. * Only patch the module if the patch is enabled or is
  753. * in transition.
  754. */
  755. if (!patch->enabled && patch != klp_transition_patch)
  756. break;
  757. pr_notice("applying patch '%s' to loading module '%s'\n",
  758. patch->mod->name, obj->mod->name);
  759. ret = klp_pre_patch_callback(obj);
  760. if (ret) {
  761. pr_warn("pre-patch callback failed for object '%s'\n",
  762. obj->name);
  763. goto err;
  764. }
  765. ret = klp_patch_object(obj);
  766. if (ret) {
  767. pr_warn("failed to apply patch '%s' to module '%s' (%d)\n",
  768. patch->mod->name, obj->mod->name, ret);
  769. klp_post_unpatch_callback(obj);
  770. goto err;
  771. }
  772. if (patch != klp_transition_patch)
  773. klp_post_patch_callback(obj);
  774. break;
  775. }
  776. }
  777. mutex_unlock(&klp_mutex);
  778. return 0;
  779. err:
  780. /*
  781. * If a patch is unsuccessfully applied, return
  782. * error to the module loader.
  783. */
  784. pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n",
  785. patch->mod->name, obj->mod->name, obj->mod->name);
  786. mod->klp_alive = false;
  787. klp_cleanup_module_patches_limited(mod, patch);
  788. mutex_unlock(&klp_mutex);
  789. return ret;
  790. }
  791. void klp_module_going(struct module *mod)
  792. {
  793. if (WARN_ON(mod->state != MODULE_STATE_GOING &&
  794. mod->state != MODULE_STATE_COMING))
  795. return;
  796. mutex_lock(&klp_mutex);
  797. /*
  798. * Each module has to know that klp_module_going()
  799. * has been called. We never know what module will
  800. * get patched by a new patch.
  801. */
  802. mod->klp_alive = false;
  803. klp_cleanup_module_patches_limited(mod, NULL);
  804. mutex_unlock(&klp_mutex);
  805. }
  806. static int __init klp_init(void)
  807. {
  808. int ret;
  809. ret = klp_check_compiler_support();
  810. if (ret) {
  811. pr_info("Your compiler is too old; turning off.\n");
  812. return -EINVAL;
  813. }
  814. klp_root_kobj = kobject_create_and_add("livepatch", kernel_kobj);
  815. if (!klp_root_kobj)
  816. return -ENOMEM;
  817. return 0;
  818. }
  819. module_init(klp_init);