transition.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644
  1. /*
  2. * transition.c - Kernel Live Patching transition functions
  3. *
  4. * Copyright (C) 2015-2016 Josh Poimboeuf <jpoimboe@redhat.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  20. #include <linux/cpu.h>
  21. #include <linux/stacktrace.h>
  22. #include "core.h"
  23. #include "patch.h"
  24. #include "transition.h"
  25. #include "../sched/sched.h"
  26. #define MAX_STACK_ENTRIES 100
  27. #define STACK_ERR_BUF_SIZE 128
  28. struct klp_patch *klp_transition_patch;
  29. static int klp_target_state = KLP_UNDEFINED;
  30. static bool klp_forced = false;
  31. /*
  32. * This work can be performed periodically to finish patching or unpatching any
  33. * "straggler" tasks which failed to transition in the first attempt.
  34. */
  35. static void klp_transition_work_fn(struct work_struct *work)
  36. {
  37. mutex_lock(&klp_mutex);
  38. if (klp_transition_patch)
  39. klp_try_complete_transition();
  40. mutex_unlock(&klp_mutex);
  41. }
  42. static DECLARE_DELAYED_WORK(klp_transition_work, klp_transition_work_fn);
  43. /*
  44. * This function is just a stub to implement a hard force
  45. * of synchronize_sched(). This requires synchronizing
  46. * tasks even in userspace and idle.
  47. */
  48. static void klp_sync(struct work_struct *work)
  49. {
  50. }
  51. /*
  52. * We allow to patch also functions where RCU is not watching,
  53. * e.g. before user_exit(). We can not rely on the RCU infrastructure
  54. * to do the synchronization. Instead hard force the sched synchronization.
  55. *
  56. * This approach allows to use RCU functions for manipulating func_stack
  57. * safely.
  58. */
  59. static void klp_synchronize_transition(void)
  60. {
  61. schedule_on_each_cpu(klp_sync);
  62. }
  63. /*
  64. * The transition to the target patch state is complete. Clean up the data
  65. * structures.
  66. */
  67. static void klp_complete_transition(void)
  68. {
  69. struct klp_object *obj;
  70. struct klp_func *func;
  71. struct task_struct *g, *task;
  72. unsigned int cpu;
  73. pr_debug("'%s': completing %s transition\n",
  74. klp_transition_patch->mod->name,
  75. klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
  76. if (klp_target_state == KLP_UNPATCHED) {
  77. /*
  78. * All tasks have transitioned to KLP_UNPATCHED so we can now
  79. * remove the new functions from the func_stack.
  80. */
  81. klp_unpatch_objects(klp_transition_patch);
  82. /*
  83. * Make sure klp_ftrace_handler() can no longer see functions
  84. * from this patch on the ops->func_stack. Otherwise, after
  85. * func->transition gets cleared, the handler may choose a
  86. * removed function.
  87. */
  88. klp_synchronize_transition();
  89. }
  90. klp_for_each_object(klp_transition_patch, obj)
  91. klp_for_each_func(obj, func)
  92. func->transition = false;
  93. /* Prevent klp_ftrace_handler() from seeing KLP_UNDEFINED state */
  94. if (klp_target_state == KLP_PATCHED)
  95. klp_synchronize_transition();
  96. read_lock(&tasklist_lock);
  97. for_each_process_thread(g, task) {
  98. WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
  99. task->patch_state = KLP_UNDEFINED;
  100. }
  101. read_unlock(&tasklist_lock);
  102. for_each_possible_cpu(cpu) {
  103. task = idle_task(cpu);
  104. WARN_ON_ONCE(test_tsk_thread_flag(task, TIF_PATCH_PENDING));
  105. task->patch_state = KLP_UNDEFINED;
  106. }
  107. klp_for_each_object(klp_transition_patch, obj) {
  108. if (!klp_is_object_loaded(obj))
  109. continue;
  110. if (klp_target_state == KLP_PATCHED)
  111. klp_post_patch_callback(obj);
  112. else if (klp_target_state == KLP_UNPATCHED)
  113. klp_post_unpatch_callback(obj);
  114. }
  115. pr_notice("'%s': %s complete\n", klp_transition_patch->mod->name,
  116. klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
  117. /*
  118. * klp_forced set implies unbounded increase of module's ref count if
  119. * the module is disabled/enabled in a loop.
  120. */
  121. if (!klp_forced && klp_target_state == KLP_UNPATCHED)
  122. module_put(klp_transition_patch->mod);
  123. klp_target_state = KLP_UNDEFINED;
  124. klp_transition_patch = NULL;
  125. }
  126. /*
  127. * This is called in the error path, to cancel a transition before it has
  128. * started, i.e. klp_init_transition() has been called but
  129. * klp_start_transition() hasn't. If the transition *has* been started,
  130. * klp_reverse_transition() should be used instead.
  131. */
  132. void klp_cancel_transition(void)
  133. {
  134. if (WARN_ON_ONCE(klp_target_state != KLP_PATCHED))
  135. return;
  136. pr_debug("'%s': canceling patching transition, going to unpatch\n",
  137. klp_transition_patch->mod->name);
  138. klp_target_state = KLP_UNPATCHED;
  139. klp_complete_transition();
  140. }
  141. /*
  142. * Switch the patched state of the task to the set of functions in the target
  143. * patch state.
  144. *
  145. * NOTE: If task is not 'current', the caller must ensure the task is inactive.
  146. * Otherwise klp_ftrace_handler() might read the wrong 'patch_state' value.
  147. */
  148. void klp_update_patch_state(struct task_struct *task)
  149. {
  150. /*
  151. * A variant of synchronize_sched() is used to allow patching functions
  152. * where RCU is not watching, see klp_synchronize_transition().
  153. */
  154. preempt_disable_notrace();
  155. /*
  156. * This test_and_clear_tsk_thread_flag() call also serves as a read
  157. * barrier (smp_rmb) for two cases:
  158. *
  159. * 1) Enforce the order of the TIF_PATCH_PENDING read and the
  160. * klp_target_state read. The corresponding write barrier is in
  161. * klp_init_transition().
  162. *
  163. * 2) Enforce the order of the TIF_PATCH_PENDING read and a future read
  164. * of func->transition, if klp_ftrace_handler() is called later on
  165. * the same CPU. See __klp_disable_patch().
  166. */
  167. if (test_and_clear_tsk_thread_flag(task, TIF_PATCH_PENDING))
  168. task->patch_state = READ_ONCE(klp_target_state);
  169. preempt_enable_notrace();
  170. }
  171. /*
  172. * Determine whether the given stack trace includes any references to a
  173. * to-be-patched or to-be-unpatched function.
  174. */
  175. static int klp_check_stack_func(struct klp_func *func,
  176. struct stack_trace *trace)
  177. {
  178. unsigned long func_addr, func_size, address;
  179. struct klp_ops *ops;
  180. int i;
  181. for (i = 0; i < trace->nr_entries; i++) {
  182. address = trace->entries[i];
  183. if (klp_target_state == KLP_UNPATCHED) {
  184. /*
  185. * Check for the to-be-unpatched function
  186. * (the func itself).
  187. */
  188. func_addr = (unsigned long)func->new_func;
  189. func_size = func->new_size;
  190. } else {
  191. /*
  192. * Check for the to-be-patched function
  193. * (the previous func).
  194. */
  195. ops = klp_find_ops(func->old_addr);
  196. if (list_is_singular(&ops->func_stack)) {
  197. /* original function */
  198. func_addr = func->old_addr;
  199. func_size = func->old_size;
  200. } else {
  201. /* previously patched function */
  202. struct klp_func *prev;
  203. prev = list_next_entry(func, stack_node);
  204. func_addr = (unsigned long)prev->new_func;
  205. func_size = prev->new_size;
  206. }
  207. }
  208. if (address >= func_addr && address < func_addr + func_size)
  209. return -EAGAIN;
  210. }
  211. return 0;
  212. }
  213. /*
  214. * Determine whether it's safe to transition the task to the target patch state
  215. * by looking for any to-be-patched or to-be-unpatched functions on its stack.
  216. */
  217. static int klp_check_stack(struct task_struct *task, char *err_buf)
  218. {
  219. static unsigned long entries[MAX_STACK_ENTRIES];
  220. struct stack_trace trace;
  221. struct klp_object *obj;
  222. struct klp_func *func;
  223. int ret;
  224. trace.skip = 0;
  225. trace.nr_entries = 0;
  226. trace.max_entries = MAX_STACK_ENTRIES;
  227. trace.entries = entries;
  228. ret = save_stack_trace_tsk_reliable(task, &trace);
  229. WARN_ON_ONCE(ret == -ENOSYS);
  230. if (ret) {
  231. snprintf(err_buf, STACK_ERR_BUF_SIZE,
  232. "%s: %s:%d has an unreliable stack\n",
  233. __func__, task->comm, task->pid);
  234. return ret;
  235. }
  236. klp_for_each_object(klp_transition_patch, obj) {
  237. if (!obj->patched)
  238. continue;
  239. klp_for_each_func(obj, func) {
  240. ret = klp_check_stack_func(func, &trace);
  241. if (ret) {
  242. snprintf(err_buf, STACK_ERR_BUF_SIZE,
  243. "%s: %s:%d is sleeping on function %s\n",
  244. __func__, task->comm, task->pid,
  245. func->old_name);
  246. return ret;
  247. }
  248. }
  249. }
  250. return 0;
  251. }
  252. /*
  253. * Try to safely switch a task to the target patch state. If it's currently
  254. * running, or it's sleeping on a to-be-patched or to-be-unpatched function, or
  255. * if the stack is unreliable, return false.
  256. */
  257. static bool klp_try_switch_task(struct task_struct *task)
  258. {
  259. struct rq *rq;
  260. struct rq_flags flags;
  261. int ret;
  262. bool success = false;
  263. char err_buf[STACK_ERR_BUF_SIZE];
  264. err_buf[0] = '\0';
  265. /* check if this task has already switched over */
  266. if (task->patch_state == klp_target_state)
  267. return true;
  268. /*
  269. * For arches which don't have reliable stack traces, we have to rely
  270. * on other methods (e.g., switching tasks at kernel exit).
  271. */
  272. if (!klp_have_reliable_stack())
  273. return false;
  274. /*
  275. * Now try to check the stack for any to-be-patched or to-be-unpatched
  276. * functions. If all goes well, switch the task to the target patch
  277. * state.
  278. */
  279. rq = task_rq_lock(task, &flags);
  280. if (task_running(rq, task) && task != current) {
  281. snprintf(err_buf, STACK_ERR_BUF_SIZE,
  282. "%s: %s:%d is running\n", __func__, task->comm,
  283. task->pid);
  284. goto done;
  285. }
  286. ret = klp_check_stack(task, err_buf);
  287. if (ret)
  288. goto done;
  289. success = true;
  290. clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
  291. task->patch_state = klp_target_state;
  292. done:
  293. task_rq_unlock(rq, task, &flags);
  294. /*
  295. * Due to console deadlock issues, pr_debug() can't be used while
  296. * holding the task rq lock. Instead we have to use a temporary buffer
  297. * and print the debug message after releasing the lock.
  298. */
  299. if (err_buf[0] != '\0')
  300. pr_debug("%s", err_buf);
  301. return success;
  302. }
  303. /*
  304. * Try to switch all remaining tasks to the target patch state by walking the
  305. * stacks of sleeping tasks and looking for any to-be-patched or
  306. * to-be-unpatched functions. If such functions are found, the task can't be
  307. * switched yet.
  308. *
  309. * If any tasks are still stuck in the initial patch state, schedule a retry.
  310. */
  311. void klp_try_complete_transition(void)
  312. {
  313. unsigned int cpu;
  314. struct task_struct *g, *task;
  315. bool complete = true;
  316. WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
  317. /*
  318. * Try to switch the tasks to the target patch state by walking their
  319. * stacks and looking for any to-be-patched or to-be-unpatched
  320. * functions. If such functions are found on a stack, or if the stack
  321. * is deemed unreliable, the task can't be switched yet.
  322. *
  323. * Usually this will transition most (or all) of the tasks on a system
  324. * unless the patch includes changes to a very common function.
  325. */
  326. read_lock(&tasklist_lock);
  327. for_each_process_thread(g, task)
  328. if (!klp_try_switch_task(task))
  329. complete = false;
  330. read_unlock(&tasklist_lock);
  331. /*
  332. * Ditto for the idle "swapper" tasks.
  333. */
  334. get_online_cpus();
  335. for_each_possible_cpu(cpu) {
  336. task = idle_task(cpu);
  337. if (cpu_online(cpu)) {
  338. if (!klp_try_switch_task(task))
  339. complete = false;
  340. } else if (task->patch_state != klp_target_state) {
  341. /* offline idle tasks can be switched immediately */
  342. clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
  343. task->patch_state = klp_target_state;
  344. }
  345. }
  346. put_online_cpus();
  347. if (!complete) {
  348. /*
  349. * Some tasks weren't able to be switched over. Try again
  350. * later and/or wait for other methods like kernel exit
  351. * switching.
  352. */
  353. schedule_delayed_work(&klp_transition_work,
  354. round_jiffies_relative(HZ));
  355. return;
  356. }
  357. /* we're done, now cleanup the data structures */
  358. klp_complete_transition();
  359. }
  360. /*
  361. * Start the transition to the specified target patch state so tasks can begin
  362. * switching to it.
  363. */
  364. void klp_start_transition(void)
  365. {
  366. struct task_struct *g, *task;
  367. unsigned int cpu;
  368. WARN_ON_ONCE(klp_target_state == KLP_UNDEFINED);
  369. pr_notice("'%s': starting %s transition\n",
  370. klp_transition_patch->mod->name,
  371. klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
  372. /*
  373. * Mark all normal tasks as needing a patch state update. They'll
  374. * switch either in klp_try_complete_transition() or as they exit the
  375. * kernel.
  376. */
  377. read_lock(&tasklist_lock);
  378. for_each_process_thread(g, task)
  379. if (task->patch_state != klp_target_state)
  380. set_tsk_thread_flag(task, TIF_PATCH_PENDING);
  381. read_unlock(&tasklist_lock);
  382. /*
  383. * Mark all idle tasks as needing a patch state update. They'll switch
  384. * either in klp_try_complete_transition() or at the idle loop switch
  385. * point.
  386. */
  387. for_each_possible_cpu(cpu) {
  388. task = idle_task(cpu);
  389. if (task->patch_state != klp_target_state)
  390. set_tsk_thread_flag(task, TIF_PATCH_PENDING);
  391. }
  392. }
  393. /*
  394. * Initialize the global target patch state and all tasks to the initial patch
  395. * state, and initialize all function transition states to true in preparation
  396. * for patching or unpatching.
  397. */
  398. void klp_init_transition(struct klp_patch *patch, int state)
  399. {
  400. struct task_struct *g, *task;
  401. unsigned int cpu;
  402. struct klp_object *obj;
  403. struct klp_func *func;
  404. int initial_state = !state;
  405. WARN_ON_ONCE(klp_target_state != KLP_UNDEFINED);
  406. klp_transition_patch = patch;
  407. /*
  408. * Set the global target patch state which tasks will switch to. This
  409. * has no effect until the TIF_PATCH_PENDING flags get set later.
  410. */
  411. klp_target_state = state;
  412. pr_debug("'%s': initializing %s transition\n", patch->mod->name,
  413. klp_target_state == KLP_PATCHED ? "patching" : "unpatching");
  414. /*
  415. * Initialize all tasks to the initial patch state to prepare them for
  416. * switching to the target state.
  417. */
  418. read_lock(&tasklist_lock);
  419. for_each_process_thread(g, task) {
  420. WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
  421. task->patch_state = initial_state;
  422. }
  423. read_unlock(&tasklist_lock);
  424. /*
  425. * Ditto for the idle "swapper" tasks.
  426. */
  427. for_each_possible_cpu(cpu) {
  428. task = idle_task(cpu);
  429. WARN_ON_ONCE(task->patch_state != KLP_UNDEFINED);
  430. task->patch_state = initial_state;
  431. }
  432. /*
  433. * Enforce the order of the task->patch_state initializations and the
  434. * func->transition updates to ensure that klp_ftrace_handler() doesn't
  435. * see a func in transition with a task->patch_state of KLP_UNDEFINED.
  436. *
  437. * Also enforce the order of the klp_target_state write and future
  438. * TIF_PATCH_PENDING writes to ensure klp_update_patch_state() doesn't
  439. * set a task->patch_state to KLP_UNDEFINED.
  440. */
  441. smp_wmb();
  442. /*
  443. * Set the func transition states so klp_ftrace_handler() will know to
  444. * switch to the transition logic.
  445. *
  446. * When patching, the funcs aren't yet in the func_stack and will be
  447. * made visible to the ftrace handler shortly by the calls to
  448. * klp_patch_object().
  449. *
  450. * When unpatching, the funcs are already in the func_stack and so are
  451. * already visible to the ftrace handler.
  452. */
  453. klp_for_each_object(patch, obj)
  454. klp_for_each_func(obj, func)
  455. func->transition = true;
  456. }
  457. /*
  458. * This function can be called in the middle of an existing transition to
  459. * reverse the direction of the target patch state. This can be done to
  460. * effectively cancel an existing enable or disable operation if there are any
  461. * tasks which are stuck in the initial patch state.
  462. */
  463. void klp_reverse_transition(void)
  464. {
  465. unsigned int cpu;
  466. struct task_struct *g, *task;
  467. pr_debug("'%s': reversing transition from %s\n",
  468. klp_transition_patch->mod->name,
  469. klp_target_state == KLP_PATCHED ? "patching to unpatching" :
  470. "unpatching to patching");
  471. klp_transition_patch->enabled = !klp_transition_patch->enabled;
  472. klp_target_state = !klp_target_state;
  473. /*
  474. * Clear all TIF_PATCH_PENDING flags to prevent races caused by
  475. * klp_update_patch_state() running in parallel with
  476. * klp_start_transition().
  477. */
  478. read_lock(&tasklist_lock);
  479. for_each_process_thread(g, task)
  480. clear_tsk_thread_flag(task, TIF_PATCH_PENDING);
  481. read_unlock(&tasklist_lock);
  482. for_each_possible_cpu(cpu)
  483. clear_tsk_thread_flag(idle_task(cpu), TIF_PATCH_PENDING);
  484. /* Let any remaining calls to klp_update_patch_state() complete */
  485. klp_synchronize_transition();
  486. klp_start_transition();
  487. }
  488. /* Called from copy_process() during fork */
  489. void klp_copy_process(struct task_struct *child)
  490. {
  491. child->patch_state = current->patch_state;
  492. /* TIF_PATCH_PENDING gets copied in setup_thread_stack() */
  493. }
  494. /*
  495. * Sends a fake signal to all non-kthread tasks with TIF_PATCH_PENDING set.
  496. * Kthreads with TIF_PATCH_PENDING set are woken up. Only admin can request this
  497. * action currently.
  498. */
  499. void klp_send_signals(void)
  500. {
  501. struct task_struct *g, *task;
  502. pr_notice("signaling remaining tasks\n");
  503. read_lock(&tasklist_lock);
  504. for_each_process_thread(g, task) {
  505. if (!klp_patch_pending(task))
  506. continue;
  507. /*
  508. * There is a small race here. We could see TIF_PATCH_PENDING
  509. * set and decide to wake up a kthread or send a fake signal.
  510. * Meanwhile the task could migrate itself and the action
  511. * would be meaningless. It is not serious though.
  512. */
  513. if (task->flags & PF_KTHREAD) {
  514. /*
  515. * Wake up a kthread which sleeps interruptedly and
  516. * still has not been migrated.
  517. */
  518. wake_up_state(task, TASK_INTERRUPTIBLE);
  519. } else {
  520. /*
  521. * Send fake signal to all non-kthread tasks which are
  522. * still not migrated.
  523. */
  524. spin_lock_irq(&task->sighand->siglock);
  525. signal_wake_up(task, 0);
  526. spin_unlock_irq(&task->sighand->siglock);
  527. }
  528. }
  529. read_unlock(&tasklist_lock);
  530. }
  531. /*
  532. * Drop TIF_PATCH_PENDING of all tasks on admin's request. This forces an
  533. * existing transition to finish.
  534. *
  535. * NOTE: klp_update_patch_state(task) requires the task to be inactive or
  536. * 'current'. This is not the case here and the consistency model could be
  537. * broken. Administrator, who is the only one to execute the
  538. * klp_force_transitions(), has to be aware of this.
  539. */
  540. void klp_force_transition(void)
  541. {
  542. struct task_struct *g, *task;
  543. unsigned int cpu;
  544. pr_warn("forcing remaining tasks to the patched state\n");
  545. read_lock(&tasklist_lock);
  546. for_each_process_thread(g, task)
  547. klp_update_patch_state(task);
  548. read_unlock(&tasklist_lock);
  549. for_each_possible_cpu(cpu)
  550. klp_update_patch_state(idle_task(cpu));
  551. klp_forced = true;
  552. }