srcu.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641
  1. /*
  2. * Sleepable Read-Copy Update mechanism for mutual exclusion.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright (C) IBM Corporation, 2006
  19. * Copyright (C) Fujitsu, 2012
  20. *
  21. * Author: Paul McKenney <paulmck@us.ibm.com>
  22. * Lai Jiangshan <laijs@cn.fujitsu.com>
  23. *
  24. * For detailed explanation of Read-Copy Update mechanism see -
  25. * Documentation/RCU/ *.txt
  26. *
  27. */
  28. #include <linux/export.h>
  29. #include <linux/mutex.h>
  30. #include <linux/percpu.h>
  31. #include <linux/preempt.h>
  32. #include <linux/rcupdate.h>
  33. #include <linux/sched.h>
  34. #include <linux/smp.h>
  35. #include <linux/delay.h>
  36. #include <linux/srcu.h>
  37. #include "rcu.h"
  38. /*
  39. * Initialize an rcu_batch structure to empty.
  40. */
  41. static inline void rcu_batch_init(struct rcu_batch *b)
  42. {
  43. b->head = NULL;
  44. b->tail = &b->head;
  45. }
  46. /*
  47. * Enqueue a callback onto the tail of the specified rcu_batch structure.
  48. */
  49. static inline void rcu_batch_queue(struct rcu_batch *b, struct rcu_head *head)
  50. {
  51. *b->tail = head;
  52. b->tail = &head->next;
  53. }
  54. /*
  55. * Is the specified rcu_batch structure empty?
  56. */
  57. static inline bool rcu_batch_empty(struct rcu_batch *b)
  58. {
  59. return b->tail == &b->head;
  60. }
  61. /*
  62. * Remove the callback at the head of the specified rcu_batch structure
  63. * and return a pointer to it, or return NULL if the structure is empty.
  64. */
  65. static inline struct rcu_head *rcu_batch_dequeue(struct rcu_batch *b)
  66. {
  67. struct rcu_head *head;
  68. if (rcu_batch_empty(b))
  69. return NULL;
  70. head = b->head;
  71. b->head = head->next;
  72. if (b->tail == &head->next)
  73. rcu_batch_init(b);
  74. return head;
  75. }
  76. /*
  77. * Move all callbacks from the rcu_batch structure specified by "from" to
  78. * the structure specified by "to".
  79. */
  80. static inline void rcu_batch_move(struct rcu_batch *to, struct rcu_batch *from)
  81. {
  82. if (!rcu_batch_empty(from)) {
  83. *to->tail = from->head;
  84. to->tail = from->tail;
  85. rcu_batch_init(from);
  86. }
  87. }
  88. static int init_srcu_struct_fields(struct srcu_struct *sp)
  89. {
  90. sp->completed = 0;
  91. spin_lock_init(&sp->queue_lock);
  92. sp->running = false;
  93. rcu_batch_init(&sp->batch_queue);
  94. rcu_batch_init(&sp->batch_check0);
  95. rcu_batch_init(&sp->batch_check1);
  96. rcu_batch_init(&sp->batch_done);
  97. INIT_DELAYED_WORK(&sp->work, process_srcu);
  98. sp->per_cpu_ref = alloc_percpu(struct srcu_array);
  99. return sp->per_cpu_ref ? 0 : -ENOMEM;
  100. }
  101. #ifdef CONFIG_DEBUG_LOCK_ALLOC
  102. int __init_srcu_struct(struct srcu_struct *sp, const char *name,
  103. struct lock_class_key *key)
  104. {
  105. /* Don't re-initialize a lock while it is held. */
  106. debug_check_no_locks_freed((void *)sp, sizeof(*sp));
  107. lockdep_init_map(&sp->dep_map, name, key, 0);
  108. return init_srcu_struct_fields(sp);
  109. }
  110. EXPORT_SYMBOL_GPL(__init_srcu_struct);
  111. #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  112. /**
  113. * init_srcu_struct - initialize a sleep-RCU structure
  114. * @sp: structure to initialize.
  115. *
  116. * Must invoke this on a given srcu_struct before passing that srcu_struct
  117. * to any other function. Each srcu_struct represents a separate domain
  118. * of SRCU protection.
  119. */
  120. int init_srcu_struct(struct srcu_struct *sp)
  121. {
  122. return init_srcu_struct_fields(sp);
  123. }
  124. EXPORT_SYMBOL_GPL(init_srcu_struct);
  125. #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  126. /*
  127. * Returns approximate total of the readers' ->lock_count[] values for the
  128. * rank of per-CPU counters specified by idx.
  129. */
  130. static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
  131. {
  132. int cpu;
  133. unsigned long sum = 0;
  134. for_each_possible_cpu(cpu) {
  135. struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
  136. sum += READ_ONCE(cpuc->lock_count[idx]);
  137. }
  138. return sum;
  139. }
  140. /*
  141. * Returns approximate total of the readers' ->unlock_count[] values for the
  142. * rank of per-CPU counters specified by idx.
  143. */
  144. static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
  145. {
  146. int cpu;
  147. unsigned long sum = 0;
  148. for_each_possible_cpu(cpu) {
  149. struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
  150. sum += READ_ONCE(cpuc->unlock_count[idx]);
  151. }
  152. return sum;
  153. }
  154. /*
  155. * Return true if the number of pre-existing readers is determined to
  156. * be zero.
  157. */
  158. static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
  159. {
  160. unsigned long unlocks;
  161. unlocks = srcu_readers_unlock_idx(sp, idx);
  162. /*
  163. * Make sure that a lock is always counted if the corresponding unlock
  164. * is counted. Needs to be a smp_mb() as the read side may contain a
  165. * read from a variable that is written to before the synchronize_srcu()
  166. * in the write side. In this case smp_mb()s A and B act like the store
  167. * buffering pattern.
  168. *
  169. * This smp_mb() also pairs with smp_mb() C to prevent accesses after the
  170. * synchronize_srcu() from being executed before the grace period ends.
  171. */
  172. smp_mb(); /* A */
  173. /*
  174. * If the locks are the same as the unlocks, then there must have
  175. * been no readers on this index at some time in between. This does not
  176. * mean that there are no more readers, as one could have read the
  177. * current index but not have incremented the lock counter yet.
  178. *
  179. * Possible bug: There is no guarantee that there haven't been ULONG_MAX
  180. * increments of ->lock_count[] since the unlocks were counted, meaning
  181. * that this could return true even if there are still active readers.
  182. * Since there are no memory barriers around srcu_flip(), the CPU is not
  183. * required to increment ->completed before running
  184. * srcu_readers_unlock_idx(), which means that there could be an
  185. * arbitrarily large number of critical sections that execute after
  186. * srcu_readers_unlock_idx() but use the old value of ->completed.
  187. */
  188. return srcu_readers_lock_idx(sp, idx) == unlocks;
  189. }
  190. /**
  191. * srcu_readers_active - returns true if there are readers. and false
  192. * otherwise
  193. * @sp: which srcu_struct to count active readers (holding srcu_read_lock).
  194. *
  195. * Note that this is not an atomic primitive, and can therefore suffer
  196. * severe errors when invoked on an active srcu_struct. That said, it
  197. * can be useful as an error check at cleanup time.
  198. */
  199. static bool srcu_readers_active(struct srcu_struct *sp)
  200. {
  201. int cpu;
  202. unsigned long sum = 0;
  203. for_each_possible_cpu(cpu) {
  204. struct srcu_array *cpuc = per_cpu_ptr(sp->per_cpu_ref, cpu);
  205. sum += READ_ONCE(cpuc->lock_count[0]);
  206. sum += READ_ONCE(cpuc->lock_count[1]);
  207. sum -= READ_ONCE(cpuc->unlock_count[0]);
  208. sum -= READ_ONCE(cpuc->unlock_count[1]);
  209. }
  210. return sum;
  211. }
  212. /**
  213. * cleanup_srcu_struct - deconstruct a sleep-RCU structure
  214. * @sp: structure to clean up.
  215. *
  216. * Must invoke this after you are finished using a given srcu_struct that
  217. * was initialized via init_srcu_struct(), else you leak memory.
  218. */
  219. void cleanup_srcu_struct(struct srcu_struct *sp)
  220. {
  221. if (WARN_ON(srcu_readers_active(sp)))
  222. return; /* Leakage unless caller handles error. */
  223. free_percpu(sp->per_cpu_ref);
  224. sp->per_cpu_ref = NULL;
  225. }
  226. EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  227. /*
  228. * Counts the new reader in the appropriate per-CPU element of the
  229. * srcu_struct. Must be called from process context.
  230. * Returns an index that must be passed to the matching srcu_read_unlock().
  231. */
  232. int __srcu_read_lock(struct srcu_struct *sp)
  233. {
  234. int idx;
  235. idx = READ_ONCE(sp->completed) & 0x1;
  236. __this_cpu_inc(sp->per_cpu_ref->lock_count[idx]);
  237. smp_mb(); /* B */ /* Avoid leaking the critical section. */
  238. return idx;
  239. }
  240. EXPORT_SYMBOL_GPL(__srcu_read_lock);
  241. /*
  242. * Removes the count for the old reader from the appropriate per-CPU
  243. * element of the srcu_struct. Note that this may well be a different
  244. * CPU than that which was incremented by the corresponding srcu_read_lock().
  245. * Must be called from process context.
  246. */
  247. void __srcu_read_unlock(struct srcu_struct *sp, int idx)
  248. {
  249. smp_mb(); /* C */ /* Avoid leaking the critical section. */
  250. this_cpu_inc(sp->per_cpu_ref->unlock_count[idx]);
  251. }
  252. EXPORT_SYMBOL_GPL(__srcu_read_unlock);
  253. /*
  254. * We use an adaptive strategy for synchronize_srcu() and especially for
  255. * synchronize_srcu_expedited(). We spin for a fixed time period
  256. * (defined below) to allow SRCU readers to exit their read-side critical
  257. * sections. If there are still some readers after 10 microseconds,
  258. * we repeatedly block for 1-millisecond time periods. This approach
  259. * has done well in testing, so there is no need for a config parameter.
  260. */
  261. #define SRCU_RETRY_CHECK_DELAY 5
  262. #define SYNCHRONIZE_SRCU_TRYCOUNT 2
  263. #define SYNCHRONIZE_SRCU_EXP_TRYCOUNT 12
  264. /*
  265. * @@@ Wait until all pre-existing readers complete. Such readers
  266. * will have used the index specified by "idx".
  267. * the caller should ensures the ->completed is not changed while checking
  268. * and idx = (->completed & 1) ^ 1
  269. */
  270. static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
  271. {
  272. for (;;) {
  273. if (srcu_readers_active_idx_check(sp, idx))
  274. return true;
  275. if (--trycount <= 0)
  276. return false;
  277. udelay(SRCU_RETRY_CHECK_DELAY);
  278. }
  279. }
  280. /*
  281. * Increment the ->completed counter so that future SRCU readers will
  282. * use the other rank of the ->(un)lock_count[] arrays. This allows
  283. * us to wait for pre-existing readers in a starvation-free manner.
  284. */
  285. static void srcu_flip(struct srcu_struct *sp)
  286. {
  287. sp->completed++;
  288. }
  289. /*
  290. * Enqueue an SRCU callback on the specified srcu_struct structure,
  291. * initiating grace-period processing if it is not already running.
  292. *
  293. * Note that all CPUs must agree that the grace period extended beyond
  294. * all pre-existing SRCU read-side critical section. On systems with
  295. * more than one CPU, this means that when "func()" is invoked, each CPU
  296. * is guaranteed to have executed a full memory barrier since the end of
  297. * its last corresponding SRCU read-side critical section whose beginning
  298. * preceded the call to call_rcu(). It also means that each CPU executing
  299. * an SRCU read-side critical section that continues beyond the start of
  300. * "func()" must have executed a memory barrier after the call_rcu()
  301. * but before the beginning of that SRCU read-side critical section.
  302. * Note that these guarantees include CPUs that are offline, idle, or
  303. * executing in user mode, as well as CPUs that are executing in the kernel.
  304. *
  305. * Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
  306. * resulting SRCU callback function "func()", then both CPU A and CPU
  307. * B are guaranteed to execute a full memory barrier during the time
  308. * interval between the call to call_rcu() and the invocation of "func()".
  309. * This guarantee applies even if CPU A and CPU B are the same CPU (but
  310. * again only if the system has more than one CPU).
  311. *
  312. * Of course, these guarantees apply only for invocations of call_srcu(),
  313. * srcu_read_lock(), and srcu_read_unlock() that are all passed the same
  314. * srcu_struct structure.
  315. */
  316. void call_srcu(struct srcu_struct *sp, struct rcu_head *head,
  317. rcu_callback_t func)
  318. {
  319. unsigned long flags;
  320. head->next = NULL;
  321. head->func = func;
  322. spin_lock_irqsave(&sp->queue_lock, flags);
  323. rcu_batch_queue(&sp->batch_queue, head);
  324. if (!sp->running) {
  325. sp->running = true;
  326. queue_delayed_work(system_power_efficient_wq, &sp->work, 0);
  327. }
  328. spin_unlock_irqrestore(&sp->queue_lock, flags);
  329. }
  330. EXPORT_SYMBOL_GPL(call_srcu);
  331. static void srcu_advance_batches(struct srcu_struct *sp, int trycount);
  332. static void srcu_reschedule(struct srcu_struct *sp);
  333. /*
  334. * Helper function for synchronize_srcu() and synchronize_srcu_expedited().
  335. */
  336. static void __synchronize_srcu(struct srcu_struct *sp, int trycount)
  337. {
  338. struct rcu_synchronize rcu;
  339. struct rcu_head *head = &rcu.head;
  340. bool done = false;
  341. RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
  342. lock_is_held(&rcu_bh_lock_map) ||
  343. lock_is_held(&rcu_lock_map) ||
  344. lock_is_held(&rcu_sched_lock_map),
  345. "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
  346. might_sleep();
  347. init_completion(&rcu.completion);
  348. head->next = NULL;
  349. head->func = wakeme_after_rcu;
  350. spin_lock_irq(&sp->queue_lock);
  351. if (!sp->running) {
  352. /* steal the processing owner */
  353. sp->running = true;
  354. rcu_batch_queue(&sp->batch_check0, head);
  355. spin_unlock_irq(&sp->queue_lock);
  356. srcu_advance_batches(sp, trycount);
  357. if (!rcu_batch_empty(&sp->batch_done)) {
  358. BUG_ON(sp->batch_done.head != head);
  359. rcu_batch_dequeue(&sp->batch_done);
  360. done = true;
  361. }
  362. /* give the processing owner to work_struct */
  363. srcu_reschedule(sp);
  364. } else {
  365. rcu_batch_queue(&sp->batch_queue, head);
  366. spin_unlock_irq(&sp->queue_lock);
  367. }
  368. if (!done)
  369. wait_for_completion(&rcu.completion);
  370. }
  371. /**
  372. * synchronize_srcu - wait for prior SRCU read-side critical-section completion
  373. * @sp: srcu_struct with which to synchronize.
  374. *
  375. * Wait for the count to drain to zero of both indexes. To avoid the
  376. * possible starvation of synchronize_srcu(), it waits for the count of
  377. * the index=((->completed & 1) ^ 1) to drain to zero at first,
  378. * and then flip the completed and wait for the count of the other index.
  379. *
  380. * Can block; must be called from process context.
  381. *
  382. * Note that it is illegal to call synchronize_srcu() from the corresponding
  383. * SRCU read-side critical section; doing so will result in deadlock.
  384. * However, it is perfectly legal to call synchronize_srcu() on one
  385. * srcu_struct from some other srcu_struct's read-side critical section,
  386. * as long as the resulting graph of srcu_structs is acyclic.
  387. *
  388. * There are memory-ordering constraints implied by synchronize_srcu().
  389. * On systems with more than one CPU, when synchronize_srcu() returns,
  390. * each CPU is guaranteed to have executed a full memory barrier since
  391. * the end of its last corresponding SRCU-sched read-side critical section
  392. * whose beginning preceded the call to synchronize_srcu(). In addition,
  393. * each CPU having an SRCU read-side critical section that extends beyond
  394. * the return from synchronize_srcu() is guaranteed to have executed a
  395. * full memory barrier after the beginning of synchronize_srcu() and before
  396. * the beginning of that SRCU read-side critical section. Note that these
  397. * guarantees include CPUs that are offline, idle, or executing in user mode,
  398. * as well as CPUs that are executing in the kernel.
  399. *
  400. * Furthermore, if CPU A invoked synchronize_srcu(), which returned
  401. * to its caller on CPU B, then both CPU A and CPU B are guaranteed
  402. * to have executed a full memory barrier during the execution of
  403. * synchronize_srcu(). This guarantee applies even if CPU A and CPU B
  404. * are the same CPU, but again only if the system has more than one CPU.
  405. *
  406. * Of course, these memory-ordering guarantees apply only when
  407. * synchronize_srcu(), srcu_read_lock(), and srcu_read_unlock() are
  408. * passed the same srcu_struct structure.
  409. */
  410. void synchronize_srcu(struct srcu_struct *sp)
  411. {
  412. __synchronize_srcu(sp, (rcu_gp_is_expedited() && !rcu_gp_is_normal())
  413. ? SYNCHRONIZE_SRCU_EXP_TRYCOUNT
  414. : SYNCHRONIZE_SRCU_TRYCOUNT);
  415. }
  416. EXPORT_SYMBOL_GPL(synchronize_srcu);
  417. /**
  418. * synchronize_srcu_expedited - Brute-force SRCU grace period
  419. * @sp: srcu_struct with which to synchronize.
  420. *
  421. * Wait for an SRCU grace period to elapse, but be more aggressive about
  422. * spinning rather than blocking when waiting.
  423. *
  424. * Note that synchronize_srcu_expedited() has the same deadlock and
  425. * memory-ordering properties as does synchronize_srcu().
  426. */
  427. void synchronize_srcu_expedited(struct srcu_struct *sp)
  428. {
  429. __synchronize_srcu(sp, SYNCHRONIZE_SRCU_EXP_TRYCOUNT);
  430. }
  431. EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
  432. /**
  433. * srcu_barrier - Wait until all in-flight call_srcu() callbacks complete.
  434. * @sp: srcu_struct on which to wait for in-flight callbacks.
  435. */
  436. void srcu_barrier(struct srcu_struct *sp)
  437. {
  438. synchronize_srcu(sp);
  439. }
  440. EXPORT_SYMBOL_GPL(srcu_barrier);
  441. /**
  442. * srcu_batches_completed - return batches completed.
  443. * @sp: srcu_struct on which to report batch completion.
  444. *
  445. * Report the number of batches, correlated with, but not necessarily
  446. * precisely the same as, the number of grace periods that have elapsed.
  447. */
  448. unsigned long srcu_batches_completed(struct srcu_struct *sp)
  449. {
  450. return sp->completed;
  451. }
  452. EXPORT_SYMBOL_GPL(srcu_batches_completed);
  453. #define SRCU_CALLBACK_BATCH 10
  454. #define SRCU_INTERVAL 1
  455. /*
  456. * Move any new SRCU callbacks to the first stage of the SRCU grace
  457. * period pipeline.
  458. */
  459. static void srcu_collect_new(struct srcu_struct *sp)
  460. {
  461. if (!rcu_batch_empty(&sp->batch_queue)) {
  462. spin_lock_irq(&sp->queue_lock);
  463. rcu_batch_move(&sp->batch_check0, &sp->batch_queue);
  464. spin_unlock_irq(&sp->queue_lock);
  465. }
  466. }
  467. /*
  468. * Core SRCU state machine. Advance callbacks from ->batch_check0 to
  469. * ->batch_check1 and then to ->batch_done as readers drain.
  470. */
  471. static void srcu_advance_batches(struct srcu_struct *sp, int trycount)
  472. {
  473. int idx = 1 ^ (sp->completed & 1);
  474. /*
  475. * Because readers might be delayed for an extended period after
  476. * fetching ->completed for their index, at any point in time there
  477. * might well be readers using both idx=0 and idx=1. We therefore
  478. * need to wait for readers to clear from both index values before
  479. * invoking a callback.
  480. */
  481. if (rcu_batch_empty(&sp->batch_check0) &&
  482. rcu_batch_empty(&sp->batch_check1))
  483. return; /* no callbacks need to be advanced */
  484. if (!try_check_zero(sp, idx, trycount))
  485. return; /* failed to advance, will try after SRCU_INTERVAL */
  486. /*
  487. * The callbacks in ->batch_check1 have already done with their
  488. * first zero check and flip back when they were enqueued on
  489. * ->batch_check0 in a previous invocation of srcu_advance_batches().
  490. * (Presumably try_check_zero() returned false during that
  491. * invocation, leaving the callbacks stranded on ->batch_check1.)
  492. * They are therefore ready to invoke, so move them to ->batch_done.
  493. */
  494. rcu_batch_move(&sp->batch_done, &sp->batch_check1);
  495. if (rcu_batch_empty(&sp->batch_check0))
  496. return; /* no callbacks need to be advanced */
  497. srcu_flip(sp);
  498. /*
  499. * The callbacks in ->batch_check0 just finished their
  500. * first check zero and flip, so move them to ->batch_check1
  501. * for future checking on the other idx.
  502. */
  503. rcu_batch_move(&sp->batch_check1, &sp->batch_check0);
  504. /*
  505. * SRCU read-side critical sections are normally short, so check
  506. * at least twice in quick succession after a flip.
  507. */
  508. trycount = trycount < 2 ? 2 : trycount;
  509. if (!try_check_zero(sp, idx^1, trycount))
  510. return; /* failed to advance, will try after SRCU_INTERVAL */
  511. /*
  512. * The callbacks in ->batch_check1 have now waited for all
  513. * pre-existing readers using both idx values. They are therefore
  514. * ready to invoke, so move them to ->batch_done.
  515. */
  516. rcu_batch_move(&sp->batch_done, &sp->batch_check1);
  517. }
  518. /*
  519. * Invoke a limited number of SRCU callbacks that have passed through
  520. * their grace period. If there are more to do, SRCU will reschedule
  521. * the workqueue.
  522. */
  523. static void srcu_invoke_callbacks(struct srcu_struct *sp)
  524. {
  525. int i;
  526. struct rcu_head *head;
  527. for (i = 0; i < SRCU_CALLBACK_BATCH; i++) {
  528. head = rcu_batch_dequeue(&sp->batch_done);
  529. if (!head)
  530. break;
  531. local_bh_disable();
  532. head->func(head);
  533. local_bh_enable();
  534. }
  535. }
  536. /*
  537. * Finished one round of SRCU grace period. Start another if there are
  538. * more SRCU callbacks queued, otherwise put SRCU into not-running state.
  539. */
  540. static void srcu_reschedule(struct srcu_struct *sp)
  541. {
  542. bool pending = true;
  543. if (rcu_batch_empty(&sp->batch_done) &&
  544. rcu_batch_empty(&sp->batch_check1) &&
  545. rcu_batch_empty(&sp->batch_check0) &&
  546. rcu_batch_empty(&sp->batch_queue)) {
  547. spin_lock_irq(&sp->queue_lock);
  548. if (rcu_batch_empty(&sp->batch_done) &&
  549. rcu_batch_empty(&sp->batch_check1) &&
  550. rcu_batch_empty(&sp->batch_check0) &&
  551. rcu_batch_empty(&sp->batch_queue)) {
  552. sp->running = false;
  553. pending = false;
  554. }
  555. spin_unlock_irq(&sp->queue_lock);
  556. }
  557. if (pending)
  558. queue_delayed_work(system_power_efficient_wq,
  559. &sp->work, SRCU_INTERVAL);
  560. }
  561. /*
  562. * This is the work-queue function that handles SRCU grace periods.
  563. */
  564. void process_srcu(struct work_struct *work)
  565. {
  566. struct srcu_struct *sp;
  567. sp = container_of(work, struct srcu_struct, work.work);
  568. srcu_collect_new(sp);
  569. srcu_advance_batches(sp, 1);
  570. srcu_invoke_callbacks(sp);
  571. srcu_reschedule(sp);
  572. }
  573. EXPORT_SYMBOL_GPL(process_srcu);