update.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright IBM Corporation, 2001
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. *
  23. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  24. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  25. * Papers:
  26. * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
  27. * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
  28. *
  29. * For detailed explanation of Read-Copy Update mechanism see -
  30. * http://lse.sourceforge.net/locking/rcupdate.html
  31. *
  32. */
  33. #include <linux/types.h>
  34. #include <linux/kernel.h>
  35. #include <linux/init.h>
  36. #include <linux/spinlock.h>
  37. #include <linux/smp.h>
  38. #include <linux/interrupt.h>
  39. #include <linux/sched.h>
  40. #include <linux/atomic.h>
  41. #include <linux/bitops.h>
  42. #include <linux/percpu.h>
  43. #include <linux/notifier.h>
  44. #include <linux/cpu.h>
  45. #include <linux/mutex.h>
  46. #include <linux/export.h>
  47. #include <linux/hardirq.h>
  48. #include <linux/delay.h>
  49. #include <linux/module.h>
  50. #define CREATE_TRACE_POINTS
  51. #include <trace/events/rcu.h>
  52. #include "rcu.h"
  53. MODULE_ALIAS("rcupdate");
  54. #ifdef MODULE_PARAM_PREFIX
  55. #undef MODULE_PARAM_PREFIX
  56. #endif
  57. #define MODULE_PARAM_PREFIX "rcupdate."
  58. module_param(rcu_expedited, int, 0);
  59. #ifdef CONFIG_PREEMPT_RCU
  60. /*
  61. * Preemptible RCU implementation for rcu_read_lock().
  62. * Just increment ->rcu_read_lock_nesting, shared state will be updated
  63. * if we block.
  64. */
  65. void __rcu_read_lock(void)
  66. {
  67. current->rcu_read_lock_nesting++;
  68. barrier(); /* critical section after entry code. */
  69. }
  70. EXPORT_SYMBOL_GPL(__rcu_read_lock);
  71. /*
  72. * Preemptible RCU implementation for rcu_read_unlock().
  73. * Decrement ->rcu_read_lock_nesting. If the result is zero (outermost
  74. * rcu_read_unlock()) and ->rcu_read_unlock_special is non-zero, then
  75. * invoke rcu_read_unlock_special() to clean up after a context switch
  76. * in an RCU read-side critical section and other special cases.
  77. */
  78. void __rcu_read_unlock(void)
  79. {
  80. struct task_struct *t = current;
  81. if (t->rcu_read_lock_nesting != 1) {
  82. --t->rcu_read_lock_nesting;
  83. } else {
  84. barrier(); /* critical section before exit code. */
  85. t->rcu_read_lock_nesting = INT_MIN;
  86. #ifdef CONFIG_PROVE_RCU_DELAY
  87. udelay(10); /* Make preemption more probable. */
  88. #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
  89. barrier(); /* assign before ->rcu_read_unlock_special load */
  90. if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
  91. rcu_read_unlock_special(t);
  92. barrier(); /* ->rcu_read_unlock_special load before assign */
  93. t->rcu_read_lock_nesting = 0;
  94. }
  95. #ifdef CONFIG_PROVE_LOCKING
  96. {
  97. int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
  98. WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
  99. }
  100. #endif /* #ifdef CONFIG_PROVE_LOCKING */
  101. }
  102. EXPORT_SYMBOL_GPL(__rcu_read_unlock);
  103. #endif /* #ifdef CONFIG_PREEMPT_RCU */
  104. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  105. static struct lock_class_key rcu_lock_key;
  106. struct lockdep_map rcu_lock_map =
  107. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
  108. EXPORT_SYMBOL_GPL(rcu_lock_map);
  109. static struct lock_class_key rcu_bh_lock_key;
  110. struct lockdep_map rcu_bh_lock_map =
  111. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_bh", &rcu_bh_lock_key);
  112. EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
  113. static struct lock_class_key rcu_sched_lock_key;
  114. struct lockdep_map rcu_sched_lock_map =
  115. STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key);
  116. EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
  117. static struct lock_class_key rcu_callback_key;
  118. struct lockdep_map rcu_callback_map =
  119. STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
  120. EXPORT_SYMBOL_GPL(rcu_callback_map);
  121. int notrace debug_lockdep_rcu_enabled(void)
  122. {
  123. return rcu_scheduler_active && debug_locks &&
  124. current->lockdep_recursion == 0;
  125. }
  126. EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
  127. /**
  128. * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
  129. *
  130. * Check for bottom half being disabled, which covers both the
  131. * CONFIG_PROVE_RCU and not cases. Note that if someone uses
  132. * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
  133. * will show the situation. This is useful for debug checks in functions
  134. * that require that they be called within an RCU read-side critical
  135. * section.
  136. *
  137. * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
  138. *
  139. * Note that rcu_read_lock() is disallowed if the CPU is either idle or
  140. * offline from an RCU perspective, so check for those as well.
  141. */
  142. int rcu_read_lock_bh_held(void)
  143. {
  144. if (!debug_lockdep_rcu_enabled())
  145. return 1;
  146. if (!rcu_is_watching())
  147. return 0;
  148. if (!rcu_lockdep_current_cpu_online())
  149. return 0;
  150. return in_softirq() || irqs_disabled();
  151. }
  152. EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
  153. #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  154. struct rcu_synchronize {
  155. struct rcu_head head;
  156. struct completion completion;
  157. };
  158. /*
  159. * Awaken the corresponding synchronize_rcu() instance now that a
  160. * grace period has elapsed.
  161. */
  162. static void wakeme_after_rcu(struct rcu_head *head)
  163. {
  164. struct rcu_synchronize *rcu;
  165. rcu = container_of(head, struct rcu_synchronize, head);
  166. complete(&rcu->completion);
  167. }
  168. void wait_rcu_gp(call_rcu_func_t crf)
  169. {
  170. struct rcu_synchronize rcu;
  171. init_rcu_head_on_stack(&rcu.head);
  172. init_completion(&rcu.completion);
  173. /* Will wake me after RCU finished. */
  174. crf(&rcu.head, wakeme_after_rcu);
  175. /* Wait for it. */
  176. wait_for_completion(&rcu.completion);
  177. destroy_rcu_head_on_stack(&rcu.head);
  178. }
  179. EXPORT_SYMBOL_GPL(wait_rcu_gp);
  180. #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
  181. static inline void debug_init_rcu_head(struct rcu_head *head)
  182. {
  183. debug_object_init(head, &rcuhead_debug_descr);
  184. }
  185. static inline void debug_rcu_head_free(struct rcu_head *head)
  186. {
  187. debug_object_free(head, &rcuhead_debug_descr);
  188. }
  189. /*
  190. * fixup_activate is called when:
  191. * - an active object is activated
  192. * - an unknown object is activated (might be a statically initialized object)
  193. * Activation is performed internally by call_rcu().
  194. */
  195. static int rcuhead_fixup_activate(void *addr, enum debug_obj_state state)
  196. {
  197. struct rcu_head *head = addr;
  198. switch (state) {
  199. case ODEBUG_STATE_NOTAVAILABLE:
  200. /*
  201. * This is not really a fixup. We just make sure that it is
  202. * tracked in the object tracker.
  203. */
  204. debug_object_init(head, &rcuhead_debug_descr);
  205. debug_object_activate(head, &rcuhead_debug_descr);
  206. return 0;
  207. default:
  208. return 1;
  209. }
  210. }
  211. /**
  212. * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
  213. * @head: pointer to rcu_head structure to be initialized
  214. *
  215. * This function informs debugobjects of a new rcu_head structure that
  216. * has been allocated as an auto variable on the stack. This function
  217. * is not required for rcu_head structures that are statically defined or
  218. * that are dynamically allocated on the heap. This function has no
  219. * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
  220. */
  221. void init_rcu_head_on_stack(struct rcu_head *head)
  222. {
  223. debug_object_init_on_stack(head, &rcuhead_debug_descr);
  224. }
  225. EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
  226. /**
  227. * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
  228. * @head: pointer to rcu_head structure to be initialized
  229. *
  230. * This function informs debugobjects that an on-stack rcu_head structure
  231. * is about to go out of scope. As with init_rcu_head_on_stack(), this
  232. * function is not required for rcu_head structures that are statically
  233. * defined or that are dynamically allocated on the heap. Also as with
  234. * init_rcu_head_on_stack(), this function has no effect for
  235. * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
  236. */
  237. void destroy_rcu_head_on_stack(struct rcu_head *head)
  238. {
  239. debug_object_free(head, &rcuhead_debug_descr);
  240. }
  241. EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
  242. struct debug_obj_descr rcuhead_debug_descr = {
  243. .name = "rcu_head",
  244. .fixup_activate = rcuhead_fixup_activate,
  245. };
  246. EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
  247. #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
  248. #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE)
  249. void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
  250. unsigned long secs,
  251. unsigned long c_old, unsigned long c)
  252. {
  253. trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
  254. }
  255. EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
  256. #else
  257. #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
  258. do { } while (0)
  259. #endif
  260. #ifdef CONFIG_RCU_STALL_COMMON
  261. #ifdef CONFIG_PROVE_RCU
  262. #define RCU_STALL_DELAY_DELTA (5 * HZ)
  263. #else
  264. #define RCU_STALL_DELAY_DELTA 0
  265. #endif
  266. int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
  267. static int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
  268. module_param(rcu_cpu_stall_suppress, int, 0644);
  269. module_param(rcu_cpu_stall_timeout, int, 0644);
  270. int rcu_jiffies_till_stall_check(void)
  271. {
  272. int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
  273. /*
  274. * Limit check must be consistent with the Kconfig limits
  275. * for CONFIG_RCU_CPU_STALL_TIMEOUT.
  276. */
  277. if (till_stall_check < 3) {
  278. ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
  279. till_stall_check = 3;
  280. } else if (till_stall_check > 300) {
  281. ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
  282. till_stall_check = 300;
  283. }
  284. return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
  285. }
  286. static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
  287. {
  288. rcu_cpu_stall_suppress = 1;
  289. return NOTIFY_DONE;
  290. }
  291. static struct notifier_block rcu_panic_block = {
  292. .notifier_call = rcu_panic,
  293. };
  294. static int __init check_cpu_stall_init(void)
  295. {
  296. atomic_notifier_chain_register(&panic_notifier_list, &rcu_panic_block);
  297. return 0;
  298. }
  299. early_initcall(check_cpu_stall_init);
  300. #endif /* #ifdef CONFIG_RCU_STALL_COMMON */