srcu.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /*
  2. * Sleepable Read-Copy Update mechanism for mutual exclusion.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2006
  19. * Copyright (C) Fujitsu, 2012
  20. *
  21. * Author: Paul McKenney <paulmck@us.ibm.com>
  22. * Lai Jiangshan <laijs@cn.fujitsu.com>
  23. *
  24. * For detailed explanation of Read-Copy Update mechanism see -
  25. * Documentation/RCU/ *.txt
  26. *
  27. */
  28. #include <linux/export.h>
  29. #include <linux/mutex.h>
  30. #include <linux/percpu.h>
  31. #include <linux/preempt.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/sched.h>
  34. #include <linux/smp.h>
  35. #include <linux/delay.h>
  36. #include <linux/srcu.h>
  37. #include "rcu.h"
  38. /*
  39. * Initialize an rcu_batch structure to empty.
  40. */
  41. static inline void rcu_batch_init(struct rcu_batch *b)
  42. {
  43. b->head = NULL;
  44. b->tail = &b->head;
  45. }
  46. /*
  47. * Enqueue a callback onto the tail of the specified rcu_batch structure.
  48. */
  49. static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
  50. {
  51. *b->tail = head;
  52. b->tail = &head->next;
  53. }
  54. /*
  55. * Is the specified rcu_batch structure empty?
  56. */
  57. static inline bool rcu_batch_empty(struct rcu_batch *b)
  58. {
  59. return b->tail == &b->head;
  60. }
  61. /*
  62. * Remove the callback at the head of the specified rcu_batch structure
  63. * and return a pointer to it, or return NULL if the structure is empty.
  64. */
  65. static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
  66. {
  67. struct rcu_head *head;
  68. if (rcu_batch_empty(b))
  69. return NULL;
  70. head = b->head;
  71. b->head = head->next;
  72. if (b->tail == &head->next)
  73. rcu_batch_init(b);
  74. return head;
  75. }
  76. /*
  77. * Move all callbacks from the rcu_batch structure specified by "from" to
  78. * the structure specified by "to".
  79. */
  80. static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
  81. {
  82. if (!rcu_batch_empty(from)) {
  83. *to->tail = from->head;
  84. to->tail = from->tail;
  85. rcu_batch_init(from);
  86. }
  87. }
  88. static int init_srcu_struct_fields(struct srcu_struct *sp)
  89. {
  90. sp->completed = 0;
  91. spin_lock_init(&sp->queue_lock);
  92. sp->running = false;
  93. rcu_batch_init(&sp->batch_queue);
  94. rcu_batch_init(&sp->batch_check0);
  95. rcu_batch_init(&sp->batch_check1);
  96. rcu_batch_init(&sp->batch_done);
  97. INIT_DELAYED_WORK(&sp->work, process_srcu);
  98. sp->per_cpu_ref = alloc_percpu(struct srcu_struct_array);
  99. return sp->per_cpu_ref ? 0 : -ENOMEM;
  100. }
  101. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  102. int __init_srcu_struct(struct srcu_struct *sp, const char *name,
  103. struct lock_class_key *key)
  104. {
  105. /* Don't re-initialize a lock while it is held. */
  106. debug_check_no_locks_freed((void *)sp, sizeof(*sp));
  107. lockdep_init_map(&sp->dep_map, name, key, 0);
  108. return init_srcu_struct_fields(sp);
  109. }
  110. EXPORT_SYMBOL_GPL(__init_srcu_struct);
  111. #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  112. /**
  113. * init_srcu_struct - initialize a sleep-RCU structure
  114. * @sp: structure to initialize.
  115. *
  116. * Must invoke this on a given srcu_struct before passing that srcu_struct
  117. * to any other function. Each srcu_struct represents a separate domain
  118. * of SRCU protection.
  119. */
  120. int init_srcu_struct(struct srcu_struct *sp)
  121. {
  122. return init_srcu_struct_fields(sp);
  123. }
  124. EXPORT_SYMBOL_GPL(init_srcu_struct);
  125. #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  126. /*
  127. * Returns approximate total of the readers' ->seq[] values for the
  128. * rank of per-CPU counters specified by idx.
  129. */
  130. static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
  131. {
  132. int cpu;
  133. unsigned long sum = 0;
  134. unsigned long t;
  135. for_each_possible_cpu(cpu) {
  136. t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
  137. sum += t;
  138. }
  139. return sum;
  140. }
  141. /*
  142. * Returns approximate number of readers active on the specified rank
  143. * of the per-CPU ->c[] counters.
  144. */
  145. static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
  146. {
  147. int cpu;
  148. unsigned long sum = 0;
  149. unsigned long t;
  150. for_each_possible_cpu(cpu) {
  151. t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
  152. sum += t;
  153. }
  154. return sum;
  155. }
  156. /*
  157. * Return true if the number of pre-existing readers is determined to
  158. * be stably zero. An example unstable zero can occur if the call
  159. * to srcu_readers_active_idx() misses an __srcu_read_lock() increment,
  160. * but due to task migration, sees the corresponding __srcu_read_unlock()
  161. * decrement. This can happen because srcu_readers_active_idx() takes
  162. * time to sum the array, and might in fact be interrupted or preempted
  163. * partway through the summation.
  164. */
  165. static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
  166. {
  167. unsigned long seq;
  168. seq = srcu_readers_seq_idx(sp, idx);
  169. /*
  170. * The following smp_mb() A pairs with the smp_mb() B located in
  171. * __srcu_read_lock(). This pairing ensures that if an
  172. * __srcu_read_lock() increments its counter after the summation
  173. * in srcu_readers_active_idx(), then the corresponding SRCU read-side
  174. * critical section will see any changes made prior to the start
  175. * of the current SRCU grace period.
  176. *
  177. * Also, if the above call to srcu_readers_seq_idx() saw the
  178. * increment of ->seq[], then the call to srcu_readers_active_idx()
  179. * must see the increment of ->c[].
  180. */
  181. smp_mb(); /* A */
  182. /*
  183. * Note that srcu_readers_active_idx() can incorrectly return
  184. * zero even though there is a pre-existing reader throughout.
  185. * To see this, suppose that task A is in a very long SRCU
  186. * read-side critical section that started on CPU 0, and that
  187. * no other reader exists, so that the sum of the counters
  188. * is equal to one. Then suppose that task B starts executing
  189. * srcu_readers_active_idx(), summing up to CPU 1, and then that
  190. * task C starts reading on CPU 0, so that its increment is not
  191. * summed, but finishes reading on CPU 2, so that its decrement
  192. * -is- summed. Then when task B completes its sum, it will
  193. * incorrectly get zero, despite the fact that task A has been
  194. * in its SRCU read-side critical section the whole time.
  195. *
  196. * We therefore do a validation step should srcu_readers_active_idx()
  197. * return zero.
  198. */
  199. if (srcu_readers_active_idx(sp, idx) != 0)
  200. return false;
  201. /*
  202. * The remainder of this function is the validation step.
  203. * The following smp_mb() D pairs with the smp_mb() C in
  204. * __srcu_read_unlock(). If the __srcu_read_unlock() was seen
  205. * by srcu_readers_active_idx() above, then any destructive
  206. * operation performed after the grace period will happen after
  207. * the corresponding SRCU read-side critical section.
  208. *
  209. * Note that there can be at most NR_CPUS worth of readers using
  210. * the old index, which is not enough to overflow even a 32-bit
  211. * integer. (Yes, this does mean that systems having more than
  212. * a billion or so CPUs need to be 64-bit systems.) Therefore,
  213. * the sum of the ->seq[] counters cannot possibly overflow.
  214. * Therefore, the only way that the return values of the two
  215. * calls to srcu_readers_seq_idx() can be equal is if there were
  216. * no increments of the corresponding rank of ->seq[] counts
  217. * in the interim. But the missed-increment scenario laid out
  218. * above includes an increment of the ->seq[] counter by
  219. * the corresponding __srcu_read_lock(). Therefore, if this
  220. * scenario occurs, the return values from the two calls to
  221. * srcu_readers_seq_idx() will differ, and thus the validation
  222. * step below suffices.
  223. */
  224. smp_mb(); /* D */
  225. return srcu_readers_seq_idx(sp, idx) == seq;
  226. }
  227. /**
  228. * srcu_readers_active - returns approximate number of readers.
  229. * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
  230. *
  231. * Note that this is not an atomic primitive, and can therefore suffer
  232. * severe errors when invoked on an active srcu_struct. That said, it
  233. * can be useful as an error check at cleanup time.
  234. */
  235. static int srcu_readers_active(struct srcu_struct *sp)
  236. {
  237. int cpu;
  238. unsigned long sum = 0;
  239. for_each_possible_cpu(cpu) {
  240. sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
  241. sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
  242. }
  243. return sum;
  244. }
  245. /**
  246. * cleanup_srcu_struct - deconstruct a sleep-RCU structure
  247. * @sp: structure to clean up.
  248. *
  249. * Must invoke this after you are finished using a given srcu_struct that
  250. * was initialized via init_srcu_struct(), else you leak memory.
  251. */
  252. void cleanup_srcu_struct(struct srcu_struct *sp)
  253. {
  254. if (WARN_ON(srcu_readers_active(sp)))
  255. return; /* Leakage unless caller handles error. */
  256. free_percpu(sp->per_cpu_ref);
  257. sp->per_cpu_ref = NULL;
  258. }
  259. EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  260. /*
  261. * Counts the new reader in the appropriate per-CPU element of the
  262. * srcu_struct. Must be called from process context.
  263. * Returns an index that must be passed to the matching srcu_read_unlock().
  264. */
  265. int __srcu_read_lock(struct srcu_struct *sp)
  266. {
  267. int idx;
  268. idx = ACCESS_ONCE(sp->completed) & 0x1;
  269. preempt_disable();
  270. __this_cpu_inc(sp->per_cpu_ref->c[idx]);
  271. smp_mb(); /* B */ /* Avoid leaking the critical section. */
  272. __this_cpu_inc(sp->per_cpu_ref->seq[idx]);
  273. preempt_enable();
  274. return idx;
  275. }
  276. EXPORT_SYMBOL_GPL(__srcu_read_lock);
  277. /*
  278. * Removes the count for the old reader from the appropriate per-CPU
  279. * element of the srcu_struct. Note that this may well be a different
  280. * CPU than that which was incremented by the corresponding srcu_read_lock().
  281. * Must be called from process context.
  282. */
  283. void __srcu_read_unlock(struct srcu_struct *sp, int idx)
  284. {
  285. smp_mb(); /* C */ /* Avoid leaking the critical section. */
  286. this_cpu_dec(sp->per_cpu_ref->c[idx]);
  287. }
  288. EXPORT_SYMBOL_GPL(__srcu_read_unlock);
  289. /*
  290. * We use an adaptive strategy for synchronize_srcu() and especially for
  291. * synchronize_srcu_expedited(). We spin for a fixed time period
  292. * (defined below) to allow SRCU readers to exit their read-side critical
  293. * sections. If there are still some readers after 10 microseconds,
  294. * we repeatedly block for 1-millisecond time periods. This approach
  295. * has done well in testing, so there is no need for a config parameter.
  296. */
  297. #define SRCU_RETRY_CHECK_DELAY 5
  298. #define SYNCHRONIZE_SRCU_TRYCOUNT 2
  299. #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
  300. /*
  301. * @@@ Wait until all pre-existing readers complete. Such readers
  302. * will have used the index specified by "idx".
  303. * the caller should ensures the ->completed is not changed while checking
  304. * and idx = (->completed & 1) ^ 1
  305. */
  306. static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
  307. {
  308. for (;;) {
  309. if (srcu_readers_active_idx_check(sp, idx))
  310. return true;
  311. if (--trycount <= 0)
  312. return false;
  313. udelay(SRCU_RETRY_CHECK_DELAY);
  314. }
  315. }
  316. /*
  317. * Increment the ->completed counter so that future SRCU readers will
  318. * use the other rank of the ->c[] and ->seq[] arrays. This allows
  319. * us to wait for pre-existing readers in a starvation-free manner.
  320. */
  321. static void srcu_flip(struct srcu_struct *sp)
  322. {
  323. sp->completed++;
  324. }
  325. /*
  326. * Enqueue an SRCU callback on the specified srcu_struct structure,
  327. * initiating grace-period processing if it is not already running.
  328. *
  329. * Note that all CPUs must agree that the grace period extended beyond
  330. * all pre-existing SRCU read-side critical section. On systems with
  331. * more than one CPU, this means that when "func()" is invoked, each CPU
  332. * is guaranteed to have executed a full memory barrier since the end of
  333. * its last corresponding SRCU read-side critical section whose beginning
  334. * preceded the call to call_rcu(). It also means that each CPU executing
  335. * an SRCU read-side critical section that continues beyond the start of
  336. * "func()" must have executed a memory barrier after the call_rcu()
  337. * but before the beginning of that SRCU read-side critical section.
  338. * Note that these guarantees include CPUs that are offline, idle, or
  339. * executing in user mode, as well as CPUs that are executing in the kernel.
  340. *
  341. * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
  342. * resulting SRCU callback function "func()", then both CPU A and CPU
  343. * B are guaranteed to execute a full memory barrier during the time
  344. * interval between the call to call_rcu() and the invocation of "func()".
  345. * This guarantee applies even if CPU A and CPU B are the same CPU (but
  346. * again only if the system has more than one CPU).
  347. *
  348. * Of course, these guarantees apply only for invocations of call_srcu(),
  349. * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
  350. * srcu_struct structure.
  351. */
  352. void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
  353. void (*func)(struct rcu_head *head))
  354. {
  355. unsigned long flags;
  356. head->next = NULL;
  357. head->func = func;
  358. spin_lock_irqsave(&sp->queue_lock, flags);
  359. rcu_batch_queue(&sp->batch_queue, head);
  360. if (!sp->running) {
  361. sp->running = true;
  362. queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
  363. }
  364. spin_unlock_irqrestore(&sp->queue_lock, flags);
  365. }
  366. EXPORT_SYMBOL_GPL(call_srcu);
  367. struct rcu_synchronize {
  368. struct rcu_head head;
  369. struct completion completion;
  370. };
  371. /*
  372. * Awaken the corresponding synchronize_srcu() instance now that a
  373. * grace period has elapsed.
  374. */
  375. static void wakeme_after_rcu(struct rcu_head *head)
  376. {
  377. struct rcu_synchronize *rcu;
  378. rcu = container_of(head, struct rcu_synchronize, head);
  379. complete(&rcu->completion);
  380. }
  381. static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
  382. static void srcu_reschedule(struct srcu_struct *sp);
  383. /*
  384. * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
  385. */
  386. static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
  387. {
  388. struct rcu_synchronize rcu;
  389. struct rcu_head *head = &rcu.head;
  390. bool done = false;
  391. rcu_lockdep_assert(!lock_is_held(&sp->dep_map) &&
  392. !lock_is_held(&rcu_bh_lock_map) &&
  393. !lock_is_held(&rcu_lock_map) &&
  394. !lock_is_held(&rcu_sched_lock_map),
  395. "Illegal synchronize_srcu() in same-type SRCU (or RCU) read-side critical section");
  396. might_sleep();
  397. init_completion(&rcu.completion);
  398. head->next = NULL;
  399. head->func = wakeme_after_rcu;
  400. spin_lock_irq(&sp->queue_lock);
  401. if (!sp->running) {
  402. /* steal the processing owner */
  403. sp->running = true;
  404. rcu_batch_queue(&sp->batch_check0, head);
  405. spin_unlock_irq(&sp->queue_lock);
  406. srcu_advance_batches(sp, trycount);
  407. if (!rcu_batch_empty(&sp->batch_done)) {
  408. BUG_ON(sp->batch_done.head != head);
  409. rcu_batch_dequeue(&sp->batch_done);
  410. done = true;
  411. }
  412. /* give the processing owner to work_struct */
  413. srcu_reschedule(sp);
  414. } else {
  415. rcu_batch_queue(&sp->batch_queue, head);
  416. spin_unlock_irq(&sp->queue_lock);
  417. }
  418. if (!done)
  419. wait_for_completion(&rcu.completion);
  420. }
  421. /**
  422. * synchronize_srcu - wait for prior SRCU read-side critical-section completion
  423. * @sp: srcu_struct with which to synchronize.
  424. *
  425. * Wait for the count to drain to zero of both indexes. To avoid the
  426. * possible starvation of synchronize_srcu(), it waits for the count of
  427. * the index=((->completed & 1) ^ 1) to drain to zero at first,
  428. * and then flip the completed and wait for the count of the other index.
  429. *
  430. * Can block; must be called from process context.
  431. *
  432. * Note that it is illegal to call synchronize_srcu() from the corresponding
  433. * SRCU read-side critical section; doing so will result in deadlock.
  434. * However, it is perfectly legal to call synchronize_srcu() on one
  435. * srcu_struct from some other srcu_struct's read-side critical section,
  436. * as long as the resulting graph of srcu_structs is acyclic.
  437. *
  438. * There are memory-ordering constraints implied by synchronize_srcu().
  439. * On systems with more than one CPU, when synchronize_srcu() returns,
  440. * each CPU is guaranteed to have executed a full memory barrier since
  441. * the end of its last corresponding SRCU-sched read-side critical section
  442. * whose beginning preceded the call to synchronize_srcu(). In addition,
  443. * each CPU having an SRCU read-side critical section that extends beyond
  444. * the return from synchronize_srcu() is guaranteed to have executed a
  445. * full memory barrier after the beginning of synchronize_srcu() and before
  446. * the beginning of that SRCU read-side critical section. Note that these
  447. * guarantees include CPUs that are offline, idle, or executing in user mode,
  448. * as well as CPUs that are executing in the kernel.
  449. *
  450. * Furthermore, if CPU A invoked synchronize_srcu(), which returned
  451. * to its caller on CPU B, then both CPU A and CPU B are guaranteed
  452. * to have executed a full memory barrier during the execution of
  453. * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
  454. * are the same CPU, but again only if the system has more than one CPU.
  455. *
  456. * Of course, these memory-ordering guarantees apply only when
  457. * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
  458. * passed the same srcu_struct structure.
  459. */
  460. void synchronize_srcu(struct srcu_struct *sp)
  461. {
  462. __synchronize_srcu(sp, rcu_expedited
  463. ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
  464. : SYNCHRONIZE_SRCU_TRYCOUNT);
  465. }
  466. EXPORT_SYMBOL_GPL(synchronize_srcu);
  467. /**
  468. * synchronize_srcu_expedited - Brute-force SRCU grace period
  469. * @sp: srcu_struct with which to synchronize.
  470. *
  471. * Wait for an SRCU grace period to elapse, but be more aggressive about
  472. * spinning rather than blocking when waiting.
  473. *
  474. * Note that synchronize_srcu_expedited() has the same deadlock and
  475. * memory-ordering properties as does synchronize_srcu().
  476. */
  477. void synchronize_srcu_expedited(struct srcu_struct *sp)
  478. {
  479. __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
  480. }
  481. EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
  482. /**
  483. * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
  484. * @sp: srcu_struct on which to wait for in-flight callbacks.
  485. */
  486. void srcu_barrier(struct srcu_struct *sp)
  487. {
  488. synchronize_srcu(sp);
  489. }
  490. EXPORT_SYMBOL_GPL(srcu_barrier);
  491. /**
  492. * srcu_batches_completed - return batches completed.
  493. * @sp: srcu_struct on which to report batch completion.
  494. *
  495. * Report the number of batches, correlated with, but not necessarily
  496. * precisely the same as, the number of grace periods that have elapsed.
  497. */
  498. unsigned long srcu_batches_completed(struct srcu_struct *sp)
  499. {
  500. return sp->completed;
  501. }
  502. EXPORT_SYMBOL_GPL(srcu_batches_completed);
  503. #define SRCU_CALLBACK_BATCH 10
  504. #define SRCU_INTERVAL 1
  505. /*
  506. * Move any new SRCU callbacks to the first stage of the SRCU grace
  507. * period pipeline.
  508. */
  509. static void srcu_collect_new(struct srcu_struct *sp)
  510. {
  511. if (!rcu_batch_empty(&sp->batch_queue)) {
  512. spin_lock_irq(&sp->queue_lock);
  513. rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
  514. spin_unlock_irq(&sp->queue_lock);
  515. }
  516. }
  517. /*
  518. * Core SRCU state machine. Advance callbacks from ->batch_check0 to
  519. * ->batch_check1 and then to ->batch_done as readers drain.
  520. */
  521. static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
  522. {
  523. int idx = 1 ^ (sp->completed & 1);
  524. /*
  525. * Because readers might be delayed for an extended period after
  526. * fetching ->completed for their index, at any point in time there
  527. * might well be readers using both idx=0 and idx=1. We therefore
  528. * need to wait for readers to clear from both index values before
  529. * invoking a callback.
  530. */
  531. if (rcu_batch_empty(&sp->batch_check0) &&
  532. rcu_batch_empty(&sp->batch_check1))
  533. return; /* no callbacks need to be advanced */
  534. if (!try_check_zero(sp, idx, trycount))
  535. return; /* failed to advance, will try after SRCU_INTERVAL */
  536. /*
  537. * The callbacks in ->batch_check1 have already done with their
  538. * first zero check and flip back when they were enqueued on
  539. * ->batch_check0 in a previous invocation of srcu_advance_batches().
  540. * (Presumably try_check_zero() returned false during that
  541. * invocation, leaving the callbacks stranded on ->batch_check1.)
  542. * They are therefore ready to invoke, so move them to ->batch_done.
  543. */
  544. rcu_batch_move(&sp->batch_done, &sp->batch_check1);
  545. if (rcu_batch_empty(&sp->batch_check0))
  546. return; /* no callbacks need to be advanced */
  547. srcu_flip(sp);
  548. /*
  549. * The callbacks in ->batch_check0 just finished their
  550. * first check zero and flip, so move them to ->batch_check1
  551. * for future checking on the other idx.
  552. */
  553. rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
  554. /*
  555. * SRCU read-side critical sections are normally short, so check
  556. * at least twice in quick succession after a flip.
  557. */
  558. trycount = trycount < 2 ? 2 : trycount;
  559. if (!try_check_zero(sp, idx^1, trycount))
  560. return; /* failed to advance, will try after SRCU_INTERVAL */
  561. /*
  562. * The callbacks in ->batch_check1 have now waited for all
  563. * pre-existing readers using both idx values. They are therefore
  564. * ready to invoke, so move them to ->batch_done.
  565. */
  566. rcu_batch_move(&sp->batch_done, &sp->batch_check1);
  567. }
  568. /*
  569. * Invoke a limited number of SRCU callbacks that have passed through
  570. * their grace period. If there are more to do, SRCU will reschedule
  571. * the workqueue.
  572. */
  573. static void srcu_invoke_callbacks(struct srcu_struct *sp)
  574. {
  575. int i;
  576. struct rcu_head *head;
  577. for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
  578. head = rcu_batch_dequeue(&sp->batch_done);
  579. if (!head)
  580. break;
  581. local_bh_disable();
  582. head->func(head);
  583. local_bh_enable();
  584. }
  585. }
  586. /*
  587. * Finished one round of SRCU grace period. Start another if there are
  588. * more SRCU callbacks queued, otherwise put SRCU into not-running state.
  589. */
  590. static void srcu_reschedule(struct srcu_struct *sp)
  591. {
  592. bool pending = true;
  593. if (rcu_batch_empty(&sp->batch_done) &&
  594. rcu_batch_empty(&sp->batch_check1) &&
  595. rcu_batch_empty(&sp->batch_check0) &&
  596. rcu_batch_empty(&sp->batch_queue)) {
  597. spin_lock_irq(&sp->queue_lock);
  598. if (rcu_batch_empty(&sp->batch_done) &&
  599. rcu_batch_empty(&sp->batch_check1) &&
  600. rcu_batch_empty(&sp->batch_check0) &&
  601. rcu_batch_empty(&sp->batch_queue)) {
  602. sp->running = false;
  603. pending = false;
  604. }
  605. spin_unlock_irq(&sp->queue_lock);
  606. }
  607. if (pending)
  608. queue_delayed_work(system_power_efficient_wq,
  609. &sp->work, SRCU_INTERVAL);
  610. }
  611. /*
  612. * This is the work-queue function that handles SRCU grace periods.
  613. */
  614. void process_srcu(struct work_struct *work)
  615. {
  616. struct srcu_struct *sp;
  617. sp = container_of(work, struct srcu_struct, work.work);
  618. srcu_collect_new(sp);
  619. srcu_advance_batches(sp, 1);
  620. srcu_invoke_callbacks(sp);
  621. srcu_reschedule(sp);
  622. }
  623. EXPORT_SYMBOL_GPL(process_srcu);