update.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright IBM Corporation, 2001
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. *
  23. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  24. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  25. * Papers:
  26. * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  27. * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  28. *
  29. * For detailed explanation of Read-Copy Update mechanism see -
  30. * http://lse.sourceforge.net/locking/rcupdate.html
  31. *
  32. */
  33. #include <linux/types.h>
  34. #include <linux/kernel.h>
  35. #include <linux/init.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/smp.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/sched.h>
  40. #include <linux/atomic.h>
  41. #include <linux/bitops.h>
  42. #include <linux/percpu.h>
  43. #include <linux/notifier.h>
  44. #include <linux/cpu.h>
  45. #include <linux/mutex.h>
  46. #include <linux/export.h>
  47. #include <linux/hardirq.h>
  48. #include <linux/delay.h>
  49. #include <linux/moduleparam.h>
  50. #include <linux/kthread.h>
  51. #include <linux/tick.h>
  52. #define CREATE_TRACE_POINTS
  53. #include "rcu.h"
  54. #ifdef MODULE_PARAM_PREFIX
  55. #undef MODULE_PARAM_PREFIX
  56. #endif
  57. #define MODULE_PARAM_PREFIX "rcupdate."
  58. #ifndef CONFIG_TINY_RCU
  59. module_param(rcu_expedited, int, 0);
  60. module_param(rcu_normal, int, 0);
  61. static int rcu_normal_after_boot;
  62. module_param(rcu_normal_after_boot, int, 0);
  63. #endif /* #ifndef CONFIG_TINY_RCU */
  64. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  65. /**
  66. * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
  67. *
  68. * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an
  69. * RCU-sched read-side critical section. In absence of
  70. * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
  71. * critical section unless it can prove otherwise. Note that disabling
  72. * of preemption (including disabling irqs) counts as an RCU-sched
  73. * read-side critical section. This is useful for debug checks in functions
  74. * that required that they be called within an RCU-sched read-side
  75. * critical section.
  76. *
  77. * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
  78. * and while lockdep is disabled.
  79. *
  80. * Note that if the CPU is in the idle loop from an RCU point of
  81. * view (ie: that we are in the section between rcu_idle_enter() and
  82. * rcu_idle_exit()) then rcu_read_lock_held() returns false even if the CPU
  83. * did an rcu_read_lock(). The reason for this is that RCU ignores CPUs
  84. * that are in such a section, considering these as in extended quiescent
  85. * state, so such a CPU is effectively never in an RCU read-side critical
  86. * section regardless of what RCU primitives it invokes. This state of
  87. * affairs is required --- we need to keep an RCU-free window in idle
  88. * where the CPU may possibly enter into low power mode. This way we can
  89. * notice an extended quiescent state to other CPUs that started a grace
  90. * period. Otherwise we would delay any grace period as long as we run in
  91. * the idle task.
  92. *
  93. * Similarly, we avoid claiming an SRCU read lock held if the current
  94. * CPU is offline.
  95. */
  96. int rcu_read_lock_sched_held(void)
  97. {
  98. int lockdep_opinion = 0;
  99. if (!debug_lockdep_rcu_enabled())
  100. return 1;
  101. if (!rcu_is_watching())
  102. return 0;
  103. if (!rcu_lockdep_current_cpu_online())
  104. return 0;
  105. if (debug_locks)
  106. lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
  107. return lockdep_opinion || !preemptible();
  108. }
  109. EXPORT_SYMBOL(rcu_read_lock_sched_held);
  110. #endif
  111. #ifndef CONFIG_TINY_RCU
  112. /*
  113. * Should expedited grace-period primitives always fall back to their
  114. * non-expedited counterparts? Intended for use within RCU. Note
  115. * that if the user specifies both rcu_expedited and rcu_normal, then
  116. * rcu_normal wins.
  117. */
  118. bool rcu_gp_is_normal(void)
  119. {
  120. return READ_ONCE(rcu_normal);
  121. }
  122. EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
  123. static atomic_t rcu_expedited_nesting =
  124. ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
  125. /*
  126. * Should normal grace-period primitives be expedited? Intended for
  127. * use within RCU. Note that this function takes the rcu_expedited
  128. * sysfs/boot variable into account as well as the rcu_expedite_gp()
  129. * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
  130. * returns false is a -really- bad idea.
  131. */
  132. bool rcu_gp_is_expedited(void)
  133. {
  134. return rcu_expedited || atomic_read(&rcu_expedited_nesting);
  135. }
  136. EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
  137. /**
  138. * rcu_expedite_gp - Expedite future RCU grace periods
  139. *
  140. * After a call to this function, future calls to synchronize_rcu() and
  141. * friends act as the corresponding synchronize_rcu_expedited() function
  142. * had instead been called.
  143. */
  144. void rcu_expedite_gp(void)
  145. {
  146. atomic_inc(&rcu_expedited_nesting);
  147. }
  148. EXPORT_SYMBOL_GPL(rcu_expedite_gp);
  149. /**
  150. * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
  151. *
  152. * Undo a prior call to rcu_expedite_gp(). If all prior calls to
  153. * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
  154. * and if the rcu_expedited sysfs/boot parameter is not set, then all
  155. * subsequent calls to synchronize_rcu() and friends will return to
  156. * their normal non-expedited behavior.
  157. */
  158. void rcu_unexpedite_gp(void)
  159. {
  160. atomic_dec(&rcu_expedited_nesting);
  161. }
  162. EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
  163. /*
  164. * Inform RCU of the end of the in-kernel boot sequence.
  165. */
  166. void rcu_end_inkernel_boot(void)
  167. {
  168. if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
  169. rcu_unexpedite_gp();
  170. if (rcu_normal_after_boot)
  171. WRITE_ONCE(rcu_normal, 1);
  172. }
  173. #endif /* #ifndef CONFIG_TINY_RCU */
  174. #ifdef CONFIG_PREEMPT_RCU
  175. /*
  176. * Preemptible RCU implementation for rcu_read_lock().
  177. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  178. * if we block.
  179. */
  180. void __rcu_read_lock(void)
  181. {
  182. current->rcu_read_lock_nesting++;
  183. barrier(); /* critical section after entry code. */
  184. }
  185. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  186. /*
  187. * Preemptible RCU implementation for rcu_read_unlock().
  188. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  189. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  190. * invoke rcu_read_unlock_special() to clean up after a context switch
  191. * in an RCU read-side critical section and other special cases.
  192. */
  193. void __rcu_read_unlock(void)
  194. {
  195. struct task_struct *t = current;
  196. if (t->rcu_read_lock_nesting != 1) {
  197. --t->rcu_read_lock_nesting;
  198. } else {
  199. barrier(); /* critical section before exit code. */
  200. t->rcu_read_lock_nesting = INT_MIN;
  201. barrier(); /* assign before ->rcu_read_unlock_special load */
  202. if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
  203. rcu_read_unlock_special(t);
  204. barrier(); /* ->rcu_read_unlock_special load before assign */
  205. t->rcu_read_lock_nesting = 0;
  206. }
  207. #ifdef CONFIG_PROVE_LOCKING
  208. {
  209. int rrln = READ_ONCE(t->rcu_read_lock_nesting);
  210. WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
  211. }
  212. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  213. }
  214. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  215. #endif /* #ifdef CONFIG_PREEMPT_RCU */
  216. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  217. static struct lock_class_key rcu_lock_key;
  218. struct lockdep_map rcu_lock_map =
  219. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
  220. EXPORT_SYMBOL_GPL(rcu_lock_map);
  221. static struct lock_class_key rcu_bh_lock_key;
  222. struct lockdep_map rcu_bh_lock_map =
  223. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
  224. EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
  225. static struct lock_class_key rcu_sched_lock_key;
  226. struct lockdep_map rcu_sched_lock_map =
  227. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
  228. EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
  229. static struct lock_class_key rcu_callback_key;
  230. struct lockdep_map rcu_callback_map =
  231. STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
  232. EXPORT_SYMBOL_GPL(rcu_callback_map);
  233. int notrace debug_lockdep_rcu_enabled(void)
  234. {
  235. return rcu_scheduler_active && debug_locks &&
  236. current->lockdep_recursion == 0;
  237. }
  238. EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
  239. /**
  240. * rcu_read_lock_held() - might we be in RCU read-side critical section?
  241. *
  242. * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
  243. * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
  244. * this assumes we are in an RCU read-side critical section unless it can
  245. * prove otherwise. This is useful for debug checks in functions that
  246. * require that they be called within an RCU read-side critical section.
  247. *
  248. * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
  249. * and while lockdep is disabled.
  250. *
  251. * Note that rcu_read_lock() and the matching rcu_read_unlock() must
  252. * occur in the same context, for example, it is illegal to invoke
  253. * rcu_read_unlock() in process context if the matching rcu_read_lock()
  254. * was invoked from within an irq handler.
  255. *
  256. * Note that rcu_read_lock() is disallowed if the CPU is either idle or
  257. * offline from an RCU perspective, so check for those as well.
  258. */
  259. int rcu_read_lock_held(void)
  260. {
  261. if (!debug_lockdep_rcu_enabled())
  262. return 1;
  263. if (!rcu_is_watching())
  264. return 0;
  265. if (!rcu_lockdep_current_cpu_online())
  266. return 0;
  267. return lock_is_held(&rcu_lock_map);
  268. }
  269. EXPORT_SYMBOL_GPL(rcu_read_lock_held);
  270. /**
  271. * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
  272. *
  273. * Check for bottom half being disabled, which covers both the
  274. * CONFIG_PROVE_RCU and not cases. Note that if someone uses
  275. * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
  276. * will show the situation. This is useful for debug checks in functions
  277. * that require that they be called within an RCU read-side critical
  278. * section.
  279. *
  280. * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
  281. *
  282. * Note that rcu_read_lock() is disallowed if the CPU is either idle or
  283. * offline from an RCU perspective, so check for those as well.
  284. */
  285. int rcu_read_lock_bh_held(void)
  286. {
  287. if (!debug_lockdep_rcu_enabled())
  288. return 1;
  289. if (!rcu_is_watching())
  290. return 0;
  291. if (!rcu_lockdep_current_cpu_online())
  292. return 0;
  293. return in_softirq() || irqs_disabled();
  294. }
  295. EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
  296. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  297. /**
  298. * wakeme_after_rcu() - Callback function to awaken a task after grace period
  299. * @head: Pointer to rcu_head member within rcu_synchronize structure
  300. *
  301. * Awaken the corresponding task now that a grace period has elapsed.
  302. */
  303. void wakeme_after_rcu(struct rcu_head *head)
  304. {
  305. struct rcu_synchronize *rcu;
  306. rcu = container_of(head, struct rcu_synchronize, head);
  307. complete(&rcu->completion);
  308. }
  309. EXPORT_SYMBOL_GPL(wakeme_after_rcu);
  310. void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
  311. struct rcu_synchronize *rs_array)
  312. {
  313. int i;
  314. /* Initialize and register callbacks for each flavor specified. */
  315. for (i = 0; i < n; i++) {
  316. if (checktiny &&
  317. (crcu_array[i] == call_rcu ||
  318. crcu_array[i] == call_rcu_bh)) {
  319. might_sleep();
  320. continue;
  321. }
  322. init_rcu_head_on_stack(&rs_array[i].head);
  323. init_completion(&rs_array[i].completion);
  324. (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
  325. }
  326. /* Wait for all callbacks to be invoked. */
  327. for (i = 0; i < n; i++) {
  328. if (checktiny &&
  329. (crcu_array[i] == call_rcu ||
  330. crcu_array[i] == call_rcu_bh))
  331. continue;
  332. wait_for_completion(&rs_array[i].completion);
  333. destroy_rcu_head_on_stack(&rs_array[i].head);
  334. }
  335. }
  336. EXPORT_SYMBOL_GPL(__wait_rcu_gp);
  337. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  338. void init_rcu_head(struct rcu_head *head)
  339. {
  340. debug_object_init(head, &rcuhead_debug_descr);
  341. }
  342. void destroy_rcu_head(struct rcu_head *head)
  343. {
  344. debug_object_free(head, &rcuhead_debug_descr);
  345. }
  346. static bool rcuhead_is_static_object(void *addr)
  347. {
  348. return true;
  349. }
  350. /**
  351. * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
  352. * @head: pointer to rcu_head structure to be initialized
  353. *
  354. * This function informs debugobjects of a new rcu_head structure that
  355. * has been allocated as an auto variable on the stack. This function
  356. * is not required for rcu_head structures that are statically defined or
  357. * that are dynamically allocated on the heap. This function has no
  358. * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
  359. */
  360. void init_rcu_head_on_stack(struct rcu_head *head)
  361. {
  362. debug_object_init_on_stack(head, &rcuhead_debug_descr);
  363. }
  364. EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
  365. /**
  366. * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
  367. * @head: pointer to rcu_head structure to be initialized
  368. *
  369. * This function informs debugobjects that an on-stack rcu_head structure
  370. * is about to go out of scope. As with init_rcu_head_on_stack(), this
  371. * function is not required for rcu_head structures that are statically
  372. * defined or that are dynamically allocated on the heap. Also as with
  373. * init_rcu_head_on_stack(), this function has no effect for
  374. * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
  375. */
  376. void destroy_rcu_head_on_stack(struct rcu_head *head)
  377. {
  378. debug_object_free(head, &rcuhead_debug_descr);
  379. }
  380. EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
  381. struct debug_obj_descr rcuhead_debug_descr = {
  382. .name = "rcu_head",
  383. .is_static_object = rcuhead_is_static_object,
  384. };
  385. EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
  386. #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  387. #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
  388. void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
  389. unsigned long secs,
  390. unsigned long c_old, unsigned long c)
  391. {
  392. trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
  393. }
  394. EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
  395. #else
  396. #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  397. do { } while (0)
  398. #endif
  399. #ifdef CONFIG_RCU_STALL_COMMON
  400. #ifdef CONFIG_PROVE_RCU
  401. #define RCU_STALL_DELAY_DELTA (5 * HZ)
  402. #else
  403. #define RCU_STALL_DELAY_DELTA 0
  404. #endif
  405. int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
  406. static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
  407. module_param(rcu_cpu_stall_suppress, int, 0644);
  408. module_param(rcu_cpu_stall_timeout, int, 0644);
  409. int rcu_jiffies_till_stall_check(void)
  410. {
  411. int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
  412. /*
  413. * Limit check must be consistent with the Kconfig limits
  414. * for CONFIG_RCU_CPU_STALL_TIMEOUT.
  415. */
  416. if (till_stall_check < 3) {
  417. WRITE_ONCE(rcu_cpu_stall_timeout, 3);
  418. till_stall_check = 3;
  419. } else if (till_stall_check > 300) {
  420. WRITE_ONCE(rcu_cpu_stall_timeout, 300);
  421. till_stall_check = 300;
  422. }
  423. return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
  424. }
  425. void rcu_sysrq_start(void)
  426. {
  427. if (!rcu_cpu_stall_suppress)
  428. rcu_cpu_stall_suppress = 2;
  429. }
  430. void rcu_sysrq_end(void)
  431. {
  432. if (rcu_cpu_stall_suppress == 2)
  433. rcu_cpu_stall_suppress = 0;
  434. }
  435. static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  436. {
  437. rcu_cpu_stall_suppress = 1;
  438. return NOTIFY_DONE;
  439. }
  440. static struct notifier_block rcu_panic_block = {
  441. .notifier_call = rcu_panic,
  442. };
  443. static int __init check_cpu_stall_init(void)
  444. {
  445. atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
  446. return 0;
  447. }
  448. early_initcall(check_cpu_stall_init);
  449. #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
  450. #ifdef CONFIG_TASKS_RCU
  451. /*
  452. * Simple variant of RCU whose quiescent states are voluntary context switch,
  453. * user-space execution, and idle. As such, grace periods can take one good
  454. * long time. There are no read-side primitives similar to rcu_read_lock()
  455. * and rcu_read_unlock() because this implementation is intended to get
  456. * the system into a safe state for some of the manipulations involved in
  457. * tracing and the like. Finally, this implementation does not support
  458. * high call_rcu_tasks() rates from multiple CPUs. If this is required,
  459. * per-CPU callback lists will be needed.
  460. */
  461. /* Global list of callbacks and associated lock. */
  462. static struct rcu_head *rcu_tasks_cbs_head;
  463. static struct rcu_head **rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
  464. static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
  465. static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);
  466. /* Track exiting tasks in order to allow them to be waited for. */
  467. DEFINE_SRCU(tasks_rcu_exit_srcu);
  468. /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
  469. static int rcu_task_stall_timeout __read_mostly = HZ * 60 * 10;
  470. module_param(rcu_task_stall_timeout, int, 0644);
  471. static void rcu_spawn_tasks_kthread(void);
  472. static struct task_struct *rcu_tasks_kthread_ptr;
  473. /*
  474. * Post an RCU-tasks callback. First call must be from process context
  475. * after the scheduler if fully operational.
  476. */
  477. void call_rcu_tasks(struct rcu_head *rhp, rcu_callback_t func)
  478. {
  479. unsigned long flags;
  480. bool needwake;
  481. bool havetask = READ_ONCE(rcu_tasks_kthread_ptr);
  482. rhp->next = NULL;
  483. rhp->func = func;
  484. raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
  485. needwake = !rcu_tasks_cbs_head;
  486. *rcu_tasks_cbs_tail = rhp;
  487. rcu_tasks_cbs_tail = &rhp->next;
  488. raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
  489. /* We can't create the thread unless interrupts are enabled. */
  490. if ((needwake && havetask) ||
  491. (!havetask && !irqs_disabled_flags(flags))) {
  492. rcu_spawn_tasks_kthread();
  493. wake_up(&rcu_tasks_cbs_wq);
  494. }
  495. }
  496. EXPORT_SYMBOL_GPL(call_rcu_tasks);
  497. /**
  498. * synchronize_rcu_tasks - wait until an rcu-tasks grace period has elapsed.
  499. *
  500. * Control will return to the caller some time after a full rcu-tasks
  501. * grace period has elapsed, in other words after all currently
  502. * executing rcu-tasks read-side critical sections have elapsed. These
  503. * read-side critical sections are delimited by calls to schedule(),
  504. * cond_resched_rcu_qs(), idle execution, userspace execution, calls
  505. * to synchronize_rcu_tasks(), and (in theory, anyway) cond_resched().
  506. *
  507. * This is a very specialized primitive, intended only for a few uses in
  508. * tracing and other situations requiring manipulation of function
  509. * preambles and profiling hooks. The synchronize_rcu_tasks() function
  510. * is not (yet) intended for heavy use from multiple CPUs.
  511. *
  512. * Note that this guarantee implies further memory-ordering guarantees.
  513. * On systems with more than one CPU, when synchronize_rcu_tasks() returns,
  514. * each CPU is guaranteed to have executed a full memory barrier since the
  515. * end of its last RCU-tasks read-side critical section whose beginning
  516. * preceded the call to synchronize_rcu_tasks(). In addition, each CPU
  517. * having an RCU-tasks read-side critical section that extends beyond
  518. * the return from synchronize_rcu_tasks() is guaranteed to have executed
  519. * a full memory barrier after the beginning of synchronize_rcu_tasks()
  520. * and before the beginning of that RCU-tasks read-side critical section.
  521. * Note that these guarantees include CPUs that are offline, idle, or
  522. * executing in user mode, as well as CPUs that are executing in the kernel.
  523. *
  524. * Furthermore, if CPU A invoked synchronize_rcu_tasks(), which returned
  525. * to its caller on CPU B, then both CPU A and CPU B are guaranteed
  526. * to have executed a full memory barrier during the execution of
  527. * synchronize_rcu_tasks() -- even if CPU A and CPU B are the same CPU
  528. * (but again only if the system has more than one CPU).
  529. */
  530. void synchronize_rcu_tasks(void)
  531. {
  532. /* Complain if the scheduler has not started. */
  533. RCU_LOCKDEP_WARN(!rcu_scheduler_active,
  534. "synchronize_rcu_tasks called too soon");
  535. /* Wait for the grace period. */
  536. wait_rcu_gp(call_rcu_tasks);
  537. }
  538. EXPORT_SYMBOL_GPL(synchronize_rcu_tasks);
  539. /**
  540. * rcu_barrier_tasks - Wait for in-flight call_rcu_tasks() callbacks.
  541. *
  542. * Although the current implementation is guaranteed to wait, it is not
  543. * obligated to, for example, if there are no pending callbacks.
  544. */
  545. void rcu_barrier_tasks(void)
  546. {
  547. /* There is only one callback queue, so this is easy. ;-) */
  548. synchronize_rcu_tasks();
  549. }
  550. EXPORT_SYMBOL_GPL(rcu_barrier_tasks);
  551. /* See if tasks are still holding out, complain if so. */
  552. static void check_holdout_task(struct task_struct *t,
  553. bool needreport, bool *firstreport)
  554. {
  555. int cpu;
  556. if (!READ_ONCE(t->rcu_tasks_holdout) ||
  557. t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
  558. !READ_ONCE(t->on_rq) ||
  559. (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
  560. !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
  561. WRITE_ONCE(t->rcu_tasks_holdout, false);
  562. list_del_init(&t->rcu_tasks_holdout_list);
  563. put_task_struct(t);
  564. return;
  565. }
  566. if (!needreport)
  567. return;
  568. if (*firstreport) {
  569. pr_err("INFO: rcu_tasks detected stalls on tasks:\n");
  570. *firstreport = false;
  571. }
  572. cpu = task_cpu(t);
  573. pr_alert("%p: %c%c nvcsw: %lu/%lu holdout: %d idle_cpu: %d/%d\n",
  574. t, ".I"[is_idle_task(t)],
  575. "N."[cpu < 0 || !tick_nohz_full_cpu(cpu)],
  576. t->rcu_tasks_nvcsw, t->nvcsw, t->rcu_tasks_holdout,
  577. t->rcu_tasks_idle_cpu, cpu);
  578. sched_show_task(t);
  579. }
  580. /* RCU-tasks kthread that detects grace periods and invokes callbacks. */
  581. static int __noreturn rcu_tasks_kthread(void *arg)
  582. {
  583. unsigned long flags;
  584. struct task_struct *g, *t;
  585. unsigned long lastreport;
  586. struct rcu_head *list;
  587. struct rcu_head *next;
  588. LIST_HEAD(rcu_tasks_holdouts);
  589. /* Run on housekeeping CPUs by default. Sysadm can move if desired. */
  590. housekeeping_affine(current);
  591. /*
  592. * Each pass through the following loop makes one check for
  593. * newly arrived callbacks, and, if there are some, waits for
  594. * one RCU-tasks grace period and then invokes the callbacks.
  595. * This loop is terminated by the system going down. ;-)
  596. */
  597. for (;;) {
  598. /* Pick up any new callbacks. */
  599. raw_spin_lock_irqsave(&rcu_tasks_cbs_lock, flags);
  600. list = rcu_tasks_cbs_head;
  601. rcu_tasks_cbs_head = NULL;
  602. rcu_tasks_cbs_tail = &rcu_tasks_cbs_head;
  603. raw_spin_unlock_irqrestore(&rcu_tasks_cbs_lock, flags);
  604. /* If there were none, wait a bit and start over. */
  605. if (!list) {
  606. wait_event_interruptible(rcu_tasks_cbs_wq,
  607. rcu_tasks_cbs_head);
  608. if (!rcu_tasks_cbs_head) {
  609. WARN_ON(signal_pending(current));
  610. schedule_timeout_interruptible(HZ/10);
  611. }
  612. continue;
  613. }
  614. /*
  615. * Wait for all pre-existing t->on_rq and t->nvcsw
  616. * transitions to complete. Invoking synchronize_sched()
  617. * suffices because all these transitions occur with
  618. * interrupts disabled. Without this synchronize_sched(),
  619. * a read-side critical section that started before the
  620. * grace period might be incorrectly seen as having started
  621. * after the grace period.
  622. *
  623. * This synchronize_sched() also dispenses with the
  624. * need for a memory barrier on the first store to
  625. * ->rcu_tasks_holdout, as it forces the store to happen
  626. * after the beginning of the grace period.
  627. */
  628. synchronize_sched();
  629. /*
  630. * There were callbacks, so we need to wait for an
  631. * RCU-tasks grace period. Start off by scanning
  632. * the task list for tasks that are not already
  633. * voluntarily blocked. Mark these tasks and make
  634. * a list of them in rcu_tasks_holdouts.
  635. */
  636. rcu_read_lock();
  637. for_each_process_thread(g, t) {
  638. if (t != current && READ_ONCE(t->on_rq) &&
  639. !is_idle_task(t)) {
  640. get_task_struct(t);
  641. t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
  642. WRITE_ONCE(t->rcu_tasks_holdout, true);
  643. list_add(&t->rcu_tasks_holdout_list,
  644. &rcu_tasks_holdouts);
  645. }
  646. }
  647. rcu_read_unlock();
  648. /*
  649. * Wait for tasks that are in the process of exiting.
  650. * This does only part of the job, ensuring that all
  651. * tasks that were previously exiting reach the point
  652. * where they have disabled preemption, allowing the
  653. * later synchronize_sched() to finish the job.
  654. */
  655. synchronize_srcu(&tasks_rcu_exit_srcu);
  656. /*
  657. * Each pass through the following loop scans the list
  658. * of holdout tasks, removing any that are no longer
  659. * holdouts. When the list is empty, we are done.
  660. */
  661. lastreport = jiffies;
  662. while (!list_empty(&rcu_tasks_holdouts)) {
  663. bool firstreport;
  664. bool needreport;
  665. int rtst;
  666. struct task_struct *t1;
  667. schedule_timeout_interruptible(HZ);
  668. rtst = READ_ONCE(rcu_task_stall_timeout);
  669. needreport = rtst > 0 &&
  670. time_after(jiffies, lastreport + rtst);
  671. if (needreport)
  672. lastreport = jiffies;
  673. firstreport = true;
  674. WARN_ON(signal_pending(current));
  675. list_for_each_entry_safe(t, t1, &rcu_tasks_holdouts,
  676. rcu_tasks_holdout_list) {
  677. check_holdout_task(t, needreport, &firstreport);
  678. cond_resched();
  679. }
  680. }
  681. /*
  682. * Because ->on_rq and ->nvcsw are not guaranteed
  683. * to have a full memory barriers prior to them in the
  684. * schedule() path, memory reordering on other CPUs could
  685. * cause their RCU-tasks read-side critical sections to
  686. * extend past the end of the grace period. However,
  687. * because these ->nvcsw updates are carried out with
  688. * interrupts disabled, we can use synchronize_sched()
  689. * to force the needed ordering on all such CPUs.
  690. *
  691. * This synchronize_sched() also confines all
  692. * ->rcu_tasks_holdout accesses to be within the grace
  693. * period, avoiding the need for memory barriers for
  694. * ->rcu_tasks_holdout accesses.
  695. *
  696. * In addition, this synchronize_sched() waits for exiting
  697. * tasks to complete their final preempt_disable() region
  698. * of execution, cleaning up after the synchronize_srcu()
  699. * above.
  700. */
  701. synchronize_sched();
  702. /* Invoke the callbacks. */
  703. while (list) {
  704. next = list->next;
  705. local_bh_disable();
  706. list->func(list);
  707. local_bh_enable();
  708. list = next;
  709. cond_resched();
  710. }
  711. schedule_timeout_uninterruptible(HZ/10);
  712. }
  713. }
  714. /* Spawn rcu_tasks_kthread() at first call to call_rcu_tasks(). */
  715. static void rcu_spawn_tasks_kthread(void)
  716. {
  717. static DEFINE_MUTEX(rcu_tasks_kthread_mutex);
  718. struct task_struct *t;
  719. if (READ_ONCE(rcu_tasks_kthread_ptr)) {
  720. smp_mb(); /* Ensure caller sees full kthread. */
  721. return;
  722. }
  723. mutex_lock(&rcu_tasks_kthread_mutex);
  724. if (rcu_tasks_kthread_ptr) {
  725. mutex_unlock(&rcu_tasks_kthread_mutex);
  726. return;
  727. }
  728. t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
  729. BUG_ON(IS_ERR(t));
  730. smp_mb(); /* Ensure others see full kthread. */
  731. WRITE_ONCE(rcu_tasks_kthread_ptr, t);
  732. mutex_unlock(&rcu_tasks_kthread_mutex);
  733. }
  734. #endif /* #ifdef CONFIG_TASKS_RCU */
  735. #ifdef CONFIG_PROVE_RCU
  736. /*
  737. * Early boot self test parameters, one for each flavor
  738. */
  739. static bool rcu_self_test;
  740. static bool rcu_self_test_bh;
  741. static bool rcu_self_test_sched;
  742. module_param(rcu_self_test, bool, 0444);
  743. module_param(rcu_self_test_bh, bool, 0444);
  744. module_param(rcu_self_test_sched, bool, 0444);
  745. static int rcu_self_test_counter;
  746. static void test_callback(struct rcu_head *r)
  747. {
  748. rcu_self_test_counter++;
  749. pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
  750. }
  751. static void early_boot_test_call_rcu(void)
  752. {
  753. static struct rcu_head head;
  754. call_rcu(&head, test_callback);
  755. }
  756. static void early_boot_test_call_rcu_bh(void)
  757. {
  758. static struct rcu_head head;
  759. call_rcu_bh(&head, test_callback);
  760. }
  761. static void early_boot_test_call_rcu_sched(void)
  762. {
  763. static struct rcu_head head;
  764. call_rcu_sched(&head, test_callback);
  765. }
  766. void rcu_early_boot_tests(void)
  767. {
  768. pr_info("Running RCU self tests\n");
  769. if (rcu_self_test)
  770. early_boot_test_call_rcu();
  771. if (rcu_self_test_bh)
  772. early_boot_test_call_rcu_bh();
  773. if (rcu_self_test_sched)
  774. early_boot_test_call_rcu_sched();
  775. }
  776. static int rcu_verify_early_boot_tests(void)
  777. {
  778. int ret = 0;
  779. int early_boot_test_counter = 0;
  780. if (rcu_self_test) {
  781. early_boot_test_counter++;
  782. rcu_barrier();
  783. }
  784. if (rcu_self_test_bh) {
  785. early_boot_test_counter++;
  786. rcu_barrier_bh();
  787. }
  788. if (rcu_self_test_sched) {
  789. early_boot_test_counter++;
  790. rcu_barrier_sched();
  791. }
  792. if (rcu_self_test_counter != early_boot_test_counter) {
  793. WARN_ON(1);
  794. ret = -1;
  795. }
  796. return ret;
  797. }
  798. late_initcall(rcu_verify_early_boot_tests);
  799. #else
  800. void rcu_early_boot_tests(void) {}
  801. #endif /* CONFIG_PROVE_RCU */