tree.c 128 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright IBM Corporation, 2008
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23. *
  24. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26. *
  27. * For detailed explanation of Read-Copy Update mechanism see -
  28. * Documentation/RCU
  29. */
  30. #include <linux/types.h>
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/smp.h>
  35. #include <linux/rcupdate.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/sched.h>
  38. #include <linux/nmi.h>
  39. #include <linux/atomic.h>
  40. #include <linux/bitops.h>
  41. #include <linux/export.h>
  42. #include <linux/completion.h>
  43. #include <linux/moduleparam.h>
  44. #include <linux/module.h>
  45. #include <linux/percpu.h>
  46. #include <linux/notifier.h>
  47. #include <linux/cpu.h>
  48. #include <linux/mutex.h>
  49. #include <linux/time.h>
  50. #include <linux/kernel_stat.h>
  51. #include <linux/wait.h>
  52. #include <linux/kthread.h>
  53. #include <linux/prefetch.h>
  54. #include <linux/delay.h>
  55. #include <linux/stop_machine.h>
  56. #include <linux/random.h>
  57. #include <linux/ftrace_event.h>
  58. #include <linux/suspend.h>
  59. #include "tree.h"
  60. #include "rcu.h"
  61. MODULE_ALIAS("rcutree");
  62. #ifdef MODULE_PARAM_PREFIX
  63. #undef MODULE_PARAM_PREFIX
  64. #endif
  65. #define MODULE_PARAM_PREFIX "rcutree."
  66. /* Data structures. */
  67. static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
  68. static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
  69. /*
  70. * In order to export the rcu_state name to the tracing tools, it
  71. * needs to be added in the __tracepoint_string section.
  72. * This requires defining a separate variable tp_<sname>_varname
  73. * that points to the string being used, and this will allow
  74. * the tracing userspace tools to be able to decipher the string
  75. * address to the matching string.
  76. */
  77. #ifdef CONFIG_TRACING
  78. # define DEFINE_RCU_TPS(sname) \
  79. static char sname##_varname[] = #sname; \
  80. static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
  81. # define RCU_STATE_NAME(sname) sname##_varname
  82. #else
  83. # define DEFINE_RCU_TPS(sname)
  84. # define RCU_STATE_NAME(sname) __stringify(sname)
  85. #endif
  86. #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  87. DEFINE_RCU_TPS(sname) \
  88. DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
  89. struct rcu_state sname##_state = { \
  90. .level = { &sname##_state.node[0] }, \
  91. .rda = &sname##_data, \
  92. .call = cr, \
  93. .fqs_state = RCU_GP_IDLE, \
  94. .gpnum = 0UL - 300UL, \
  95. .completed = 0UL - 300UL, \
  96. .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
  97. .orphan_nxttail = &sname##_state.orphan_nxtlist, \
  98. .orphan_donetail = &sname##_state.orphan_donelist, \
  99. .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
  100. .name = RCU_STATE_NAME(sname), \
  101. .abbr = sabbr, \
  102. }
  103. RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
  104. RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
  105. static struct rcu_state *rcu_state_p;
  106. LIST_HEAD(rcu_struct_flavors);
  107. /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
  108. static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
  109. module_param(rcu_fanout_leaf, int, 0444);
  110. int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
  111. static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */
  112. NUM_RCU_LVL_0,
  113. NUM_RCU_LVL_1,
  114. NUM_RCU_LVL_2,
  115. NUM_RCU_LVL_3,
  116. NUM_RCU_LVL_4,
  117. };
  118. int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
  119. /*
  120. * The rcu_scheduler_active variable transitions from zero to one just
  121. * before the first task is spawned. So when this variable is zero, RCU
  122. * can assume that there is but one task, allowing RCU to (for example)
  123. * optimize synchronize_sched() to a simple barrier(). When this variable
  124. * is one, RCU must actually do all the hard work required to detect real
  125. * grace periods. This variable is also used to suppress boot-time false
  126. * positives from lockdep-RCU error checking.
  127. */
  128. int rcu_scheduler_active __read_mostly;
  129. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  130. /*
  131. * The rcu_scheduler_fully_active variable transitions from zero to one
  132. * during the early_initcall() processing, which is after the scheduler
  133. * is capable of creating new tasks. So RCU processing (for example,
  134. * creating tasks for RCU priority boosting) must be delayed until after
  135. * rcu_scheduler_fully_active transitions from zero to one. We also
  136. * currently delay invocation of any RCU callbacks until after this point.
  137. *
  138. * It might later prove better for people registering RCU callbacks during
  139. * early boot to take responsibility for these callbacks, but one step at
  140. * a time.
  141. */
  142. static int rcu_scheduler_fully_active __read_mostly;
  143. static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
  144. static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
  145. static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
  146. static void invoke_rcu_core(void);
  147. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
  148. /* rcuc/rcub kthread realtime priority */
  149. static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
  150. module_param(kthread_prio, int, 0644);
  151. /* Delay in jiffies for grace-period initialization delays, debug only. */
  152. #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
  153. static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
  154. module_param(gp_init_delay, int, 0644);
  155. #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
  156. static const int gp_init_delay;
  157. #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
  158. #define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
  159. /*
  160. * Track the rcutorture test sequence number and the update version
  161. * number within a given test. The rcutorture_testseq is incremented
  162. * on every rcutorture module load and unload, so has an odd value
  163. * when a test is running. The rcutorture_vernum is set to zero
  164. * when rcutorture starts and is incremented on each rcutorture update.
  165. * These variables enable correlating rcutorture output with the
  166. * RCU tracing information.
  167. */
  168. unsigned long rcutorture_testseq;
  169. unsigned long rcutorture_vernum;
  170. /*
  171. * Compute the mask of online CPUs for the specified rcu_node structure.
  172. * This will not be stable unless the rcu_node structure's ->lock is
  173. * held, but the bit corresponding to the current CPU will be stable
  174. * in most contexts.
  175. */
  176. unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  177. {
  178. return READ_ONCE(rnp->qsmaskinitnext);
  179. }
  180. /*
  181. * Return true if an RCU grace period is in progress. The READ_ONCE()s
  182. * permit this function to be invoked without holding the root rcu_node
  183. * structure's ->lock, but of course results can be subject to change.
  184. */
  185. static int rcu_gp_in_progress(struct rcu_state *rsp)
  186. {
  187. return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
  188. }
  189. /*
  190. * Note a quiescent state. Because we do not need to know
  191. * how many quiescent states passed, just if there was at least
  192. * one since the start of the grace period, this just sets a flag.
  193. * The caller must have disabled preemption.
  194. */
  195. void rcu_sched_qs(void)
  196. {
  197. if (!__this_cpu_read(rcu_sched_data.passed_quiesce)) {
  198. trace_rcu_grace_period(TPS("rcu_sched"),
  199. __this_cpu_read(rcu_sched_data.gpnum),
  200. TPS("cpuqs"));
  201. __this_cpu_write(rcu_sched_data.passed_quiesce, 1);
  202. }
  203. }
  204. void rcu_bh_qs(void)
  205. {
  206. if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
  207. trace_rcu_grace_period(TPS("rcu_bh"),
  208. __this_cpu_read(rcu_bh_data.gpnum),
  209. TPS("cpuqs"));
  210. __this_cpu_write(rcu_bh_data.passed_quiesce, 1);
  211. }
  212. }
  213. static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
  214. static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  215. .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
  216. .dynticks = ATOMIC_INIT(1),
  217. #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
  218. .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
  219. .dynticks_idle = ATOMIC_INIT(1),
  220. #endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
  221. };
  222. DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
  223. EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
  224. /*
  225. * Let the RCU core know that this CPU has gone through the scheduler,
  226. * which is a quiescent state. This is called when the need for a
  227. * quiescent state is urgent, so we burn an atomic operation and full
  228. * memory barriers to let the RCU core know about it, regardless of what
  229. * this CPU might (or might not) do in the near future.
  230. *
  231. * We inform the RCU core by emulating a zero-duration dyntick-idle
  232. * period, which we in turn do by incrementing the ->dynticks counter
  233. * by two.
  234. */
  235. static void rcu_momentary_dyntick_idle(void)
  236. {
  237. unsigned long flags;
  238. struct rcu_data *rdp;
  239. struct rcu_dynticks *rdtp;
  240. int resched_mask;
  241. struct rcu_state *rsp;
  242. local_irq_save(flags);
  243. /*
  244. * Yes, we can lose flag-setting operations. This is OK, because
  245. * the flag will be set again after some delay.
  246. */
  247. resched_mask = raw_cpu_read(rcu_sched_qs_mask);
  248. raw_cpu_write(rcu_sched_qs_mask, 0);
  249. /* Find the flavor that needs a quiescent state. */
  250. for_each_rcu_flavor(rsp) {
  251. rdp = raw_cpu_ptr(rsp->rda);
  252. if (!(resched_mask & rsp->flavor_mask))
  253. continue;
  254. smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
  255. if (READ_ONCE(rdp->mynode->completed) !=
  256. READ_ONCE(rdp->cond_resched_completed))
  257. continue;
  258. /*
  259. * Pretend to be momentarily idle for the quiescent state.
  260. * This allows the grace-period kthread to record the
  261. * quiescent state, with no need for this CPU to do anything
  262. * further.
  263. */
  264. rdtp = this_cpu_ptr(&rcu_dynticks);
  265. smp_mb__before_atomic(); /* Earlier stuff before QS. */
  266. atomic_add(2, &rdtp->dynticks); /* QS. */
  267. smp_mb__after_atomic(); /* Later stuff after QS. */
  268. break;
  269. }
  270. local_irq_restore(flags);
  271. }
  272. /*
  273. * Note a context switch. This is a quiescent state for RCU-sched,
  274. * and requires special handling for preemptible RCU.
  275. * The caller must have disabled preemption.
  276. */
  277. void rcu_note_context_switch(void)
  278. {
  279. trace_rcu_utilization(TPS("Start context switch"));
  280. rcu_sched_qs();
  281. rcu_preempt_note_context_switch();
  282. if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
  283. rcu_momentary_dyntick_idle();
  284. trace_rcu_utilization(TPS("End context switch"));
  285. }
  286. EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  287. /*
  288. * Register a quiescent state for all RCU flavors. If there is an
  289. * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
  290. * dyntick-idle quiescent state visible to other CPUs (but only for those
  291. * RCU flavors in desperate need of a quiescent state, which will normally
  292. * be none of them). Either way, do a lightweight quiescent state for
  293. * all RCU flavors.
  294. */
  295. void rcu_all_qs(void)
  296. {
  297. if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
  298. rcu_momentary_dyntick_idle();
  299. this_cpu_inc(rcu_qs_ctr);
  300. }
  301. EXPORT_SYMBOL_GPL(rcu_all_qs);
  302. static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
  303. static long qhimark = 10000; /* If this many pending, ignore blimit. */
  304. static long qlowmark = 100; /* Once only this many pending, use blimit. */
  305. module_param(blimit, long, 0444);
  306. module_param(qhimark, long, 0444);
  307. module_param(qlowmark, long, 0444);
  308. static ulong jiffies_till_first_fqs = ULONG_MAX;
  309. static ulong jiffies_till_next_fqs = ULONG_MAX;
  310. module_param(jiffies_till_first_fqs, ulong, 0644);
  311. module_param(jiffies_till_next_fqs, ulong, 0644);
  312. /*
  313. * How long the grace period must be before we start recruiting
  314. * quiescent-state help from rcu_note_context_switch().
  315. */
  316. static ulong jiffies_till_sched_qs = HZ / 20;
  317. module_param(jiffies_till_sched_qs, ulong, 0644);
  318. static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
  319. struct rcu_data *rdp);
  320. static void force_qs_rnp(struct rcu_state *rsp,
  321. int (*f)(struct rcu_data *rsp, bool *isidle,
  322. unsigned long *maxj),
  323. bool *isidle, unsigned long *maxj);
  324. static void force_quiescent_state(struct rcu_state *rsp);
  325. static int rcu_pending(void);
  326. /*
  327. * Return the number of RCU batches started thus far for debug & stats.
  328. */
  329. unsigned long rcu_batches_started(void)
  330. {
  331. return rcu_state_p->gpnum;
  332. }
  333. EXPORT_SYMBOL_GPL(rcu_batches_started);
  334. /*
  335. * Return the number of RCU-sched batches started thus far for debug & stats.
  336. */
  337. unsigned long rcu_batches_started_sched(void)
  338. {
  339. return rcu_sched_state.gpnum;
  340. }
  341. EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
  342. /*
  343. * Return the number of RCU BH batches started thus far for debug & stats.
  344. */
  345. unsigned long rcu_batches_started_bh(void)
  346. {
  347. return rcu_bh_state.gpnum;
  348. }
  349. EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
  350. /*
  351. * Return the number of RCU batches completed thus far for debug & stats.
  352. */
  353. unsigned long rcu_batches_completed(void)
  354. {
  355. return rcu_state_p->completed;
  356. }
  357. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  358. /*
  359. * Return the number of RCU-sched batches completed thus far for debug & stats.
  360. */
  361. unsigned long rcu_batches_completed_sched(void)
  362. {
  363. return rcu_sched_state.completed;
  364. }
  365. EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  366. /*
  367. * Return the number of RCU BH batches completed thus far for debug & stats.
  368. */
  369. unsigned long rcu_batches_completed_bh(void)
  370. {
  371. return rcu_bh_state.completed;
  372. }
  373. EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  374. /*
  375. * Force a quiescent state.
  376. */
  377. void rcu_force_quiescent_state(void)
  378. {
  379. force_quiescent_state(rcu_state_p);
  380. }
  381. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  382. /*
  383. * Force a quiescent state for RCU BH.
  384. */
  385. void rcu_bh_force_quiescent_state(void)
  386. {
  387. force_quiescent_state(&rcu_bh_state);
  388. }
  389. EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  390. /*
  391. * Force a quiescent state for RCU-sched.
  392. */
  393. void rcu_sched_force_quiescent_state(void)
  394. {
  395. force_quiescent_state(&rcu_sched_state);
  396. }
  397. EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  398. /*
  399. * Show the state of the grace-period kthreads.
  400. */
  401. void show_rcu_gp_kthreads(void)
  402. {
  403. struct rcu_state *rsp;
  404. for_each_rcu_flavor(rsp) {
  405. pr_info("%s: wait state: %d ->state: %#lx\n",
  406. rsp->name, rsp->gp_state, rsp->gp_kthread->state);
  407. /* sched_show_task(rsp->gp_kthread); */
  408. }
  409. }
  410. EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
  411. /*
  412. * Record the number of times rcutorture tests have been initiated and
  413. * terminated. This information allows the debugfs tracing stats to be
  414. * correlated to the rcutorture messages, even when the rcutorture module
  415. * is being repeatedly loaded and unloaded. In other words, we cannot
  416. * store this state in rcutorture itself.
  417. */
  418. void rcutorture_record_test_transition(void)
  419. {
  420. rcutorture_testseq++;
  421. rcutorture_vernum = 0;
  422. }
  423. EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  424. /*
  425. * Send along grace-period-related data for rcutorture diagnostics.
  426. */
  427. void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
  428. unsigned long *gpnum, unsigned long *completed)
  429. {
  430. struct rcu_state *rsp = NULL;
  431. switch (test_type) {
  432. case RCU_FLAVOR:
  433. rsp = rcu_state_p;
  434. break;
  435. case RCU_BH_FLAVOR:
  436. rsp = &rcu_bh_state;
  437. break;
  438. case RCU_SCHED_FLAVOR:
  439. rsp = &rcu_sched_state;
  440. break;
  441. default:
  442. break;
  443. }
  444. if (rsp != NULL) {
  445. *flags = READ_ONCE(rsp->gp_flags);
  446. *gpnum = READ_ONCE(rsp->gpnum);
  447. *completed = READ_ONCE(rsp->completed);
  448. return;
  449. }
  450. *flags = 0;
  451. *gpnum = 0;
  452. *completed = 0;
  453. }
  454. EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
  455. /*
  456. * Record the number of writer passes through the current rcutorture test.
  457. * This is also used to correlate debugfs tracing stats with the rcutorture
  458. * messages.
  459. */
  460. void rcutorture_record_progress(unsigned long vernum)
  461. {
  462. rcutorture_vernum++;
  463. }
  464. EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  465. /*
  466. * Does the CPU have callbacks ready to be invoked?
  467. */
  468. static int
  469. cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
  470. {
  471. return &rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL] &&
  472. rdp->nxttail[RCU_DONE_TAIL] != NULL;
  473. }
  474. /*
  475. * Return the root node of the specified rcu_state structure.
  476. */
  477. static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  478. {
  479. return &rsp->node[0];
  480. }
  481. /*
  482. * Is there any need for future grace periods?
  483. * Interrupts must be disabled. If the caller does not hold the root
  484. * rnp_node structure's ->lock, the results are advisory only.
  485. */
  486. static int rcu_future_needs_gp(struct rcu_state *rsp)
  487. {
  488. struct rcu_node *rnp = rcu_get_root(rsp);
  489. int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
  490. int *fp = &rnp->need_future_gp[idx];
  491. return READ_ONCE(*fp);
  492. }
  493. /*
  494. * Does the current CPU require a not-yet-started grace period?
  495. * The caller must have disabled interrupts to prevent races with
  496. * normal callback registry.
  497. */
  498. static int
  499. cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
  500. {
  501. int i;
  502. if (rcu_gp_in_progress(rsp))
  503. return 0; /* No, a grace period is already in progress. */
  504. if (rcu_future_needs_gp(rsp))
  505. return 1; /* Yes, a no-CBs CPU needs one. */
  506. if (!rdp->nxttail[RCU_NEXT_TAIL])
  507. return 0; /* No, this is a no-CBs (or offline) CPU. */
  508. if (*rdp->nxttail[RCU_NEXT_READY_TAIL])
  509. return 1; /* Yes, this CPU has newly registered callbacks. */
  510. for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
  511. if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
  512. ULONG_CMP_LT(READ_ONCE(rsp->completed),
  513. rdp->nxtcompleted[i]))
  514. return 1; /* Yes, CBs for future grace period. */
  515. return 0; /* No grace period needed. */
  516. }
  517. /*
  518. * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state
  519. *
  520. * If the new value of the ->dynticks_nesting counter now is zero,
  521. * we really have entered idle, and must do the appropriate accounting.
  522. * The caller must have disabled interrupts.
  523. */
  524. static void rcu_eqs_enter_common(long long oldval, bool user)
  525. {
  526. struct rcu_state *rsp;
  527. struct rcu_data *rdp;
  528. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  529. trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
  530. if (!user && !is_idle_task(current)) {
  531. struct task_struct *idle __maybe_unused =
  532. idle_task(smp_processor_id());
  533. trace_rcu_dyntick(TPS("Error on entry: not idle task"), oldval, 0);
  534. ftrace_dump(DUMP_ORIG);
  535. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  536. current->pid, current->comm,
  537. idle->pid, idle->comm); /* must be idle task! */
  538. }
  539. for_each_rcu_flavor(rsp) {
  540. rdp = this_cpu_ptr(rsp->rda);
  541. do_nocb_deferred_wakeup(rdp);
  542. }
  543. rcu_prepare_for_idle();
  544. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  545. smp_mb__before_atomic(); /* See above. */
  546. atomic_inc(&rdtp->dynticks);
  547. smp_mb__after_atomic(); /* Force ordering with next sojourn. */
  548. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  549. rcu_dynticks_task_enter();
  550. /*
  551. * It is illegal to enter an extended quiescent state while
  552. * in an RCU read-side critical section.
  553. */
  554. rcu_lockdep_assert(!lock_is_held(&rcu_lock_map),
  555. "Illegal idle entry in RCU read-side critical section.");
  556. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map),
  557. "Illegal idle entry in RCU-bh read-side critical section.");
  558. rcu_lockdep_assert(!lock_is_held(&rcu_sched_lock_map),
  559. "Illegal idle entry in RCU-sched read-side critical section.");
  560. }
  561. /*
  562. * Enter an RCU extended quiescent state, which can be either the
  563. * idle loop or adaptive-tickless usermode execution.
  564. */
  565. static void rcu_eqs_enter(bool user)
  566. {
  567. long long oldval;
  568. struct rcu_dynticks *rdtp;
  569. rdtp = this_cpu_ptr(&rcu_dynticks);
  570. oldval = rdtp->dynticks_nesting;
  571. WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
  572. if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
  573. rdtp->dynticks_nesting = 0;
  574. rcu_eqs_enter_common(oldval, user);
  575. } else {
  576. rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE;
  577. }
  578. }
  579. /**
  580. * rcu_idle_enter - inform RCU that current CPU is entering idle
  581. *
  582. * Enter idle mode, in other words, -leave- the mode in which RCU
  583. * read-side critical sections can occur. (Though RCU read-side
  584. * critical sections can occur in irq handlers in idle, a possibility
  585. * handled by irq_enter() and irq_exit().)
  586. *
  587. * We crowbar the ->dynticks_nesting field to zero to allow for
  588. * the possibility of usermode upcalls having messed up our count
  589. * of interrupt nesting level during the prior busy period.
  590. */
  591. void rcu_idle_enter(void)
  592. {
  593. unsigned long flags;
  594. local_irq_save(flags);
  595. rcu_eqs_enter(false);
  596. rcu_sysidle_enter(0);
  597. local_irq_restore(flags);
  598. }
  599. EXPORT_SYMBOL_GPL(rcu_idle_enter);
  600. #ifdef CONFIG_RCU_USER_QS
  601. /**
  602. * rcu_user_enter - inform RCU that we are resuming userspace.
  603. *
  604. * Enter RCU idle mode right before resuming userspace. No use of RCU
  605. * is permitted between this call and rcu_user_exit(). This way the
  606. * CPU doesn't need to maintain the tick for RCU maintenance purposes
  607. * when the CPU runs in userspace.
  608. */
  609. void rcu_user_enter(void)
  610. {
  611. rcu_eqs_enter(1);
  612. }
  613. #endif /* CONFIG_RCU_USER_QS */
  614. /**
  615. * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
  616. *
  617. * Exit from an interrupt handler, which might possibly result in entering
  618. * idle mode, in other words, leaving the mode in which read-side critical
  619. * sections can occur.
  620. *
  621. * This code assumes that the idle loop never does anything that might
  622. * result in unbalanced calls to irq_enter() and irq_exit(). If your
  623. * architecture violates this assumption, RCU will give you what you
  624. * deserve, good and hard. But very infrequently and irreproducibly.
  625. *
  626. * Use things like work queues to work around this limitation.
  627. *
  628. * You have been warned.
  629. */
  630. void rcu_irq_exit(void)
  631. {
  632. unsigned long flags;
  633. long long oldval;
  634. struct rcu_dynticks *rdtp;
  635. local_irq_save(flags);
  636. rdtp = this_cpu_ptr(&rcu_dynticks);
  637. oldval = rdtp->dynticks_nesting;
  638. rdtp->dynticks_nesting--;
  639. WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
  640. if (rdtp->dynticks_nesting)
  641. trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
  642. else
  643. rcu_eqs_enter_common(oldval, true);
  644. rcu_sysidle_enter(1);
  645. local_irq_restore(flags);
  646. }
  647. /*
  648. * rcu_eqs_exit_common - current CPU moving away from extended quiescent state
  649. *
  650. * If the new value of the ->dynticks_nesting counter was previously zero,
  651. * we really have exited idle, and must do the appropriate accounting.
  652. * The caller must have disabled interrupts.
  653. */
  654. static void rcu_eqs_exit_common(long long oldval, int user)
  655. {
  656. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  657. rcu_dynticks_task_exit();
  658. smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
  659. atomic_inc(&rdtp->dynticks);
  660. /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
  661. smp_mb__after_atomic(); /* See above. */
  662. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  663. rcu_cleanup_after_idle();
  664. trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
  665. if (!user && !is_idle_task(current)) {
  666. struct task_struct *idle __maybe_unused =
  667. idle_task(smp_processor_id());
  668. trace_rcu_dyntick(TPS("Error on exit: not idle task"),
  669. oldval, rdtp->dynticks_nesting);
  670. ftrace_dump(DUMP_ORIG);
  671. WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s",
  672. current->pid, current->comm,
  673. idle->pid, idle->comm); /* must be idle task! */
  674. }
  675. }
  676. /*
  677. * Exit an RCU extended quiescent state, which can be either the
  678. * idle loop or adaptive-tickless usermode execution.
  679. */
  680. static void rcu_eqs_exit(bool user)
  681. {
  682. struct rcu_dynticks *rdtp;
  683. long long oldval;
  684. rdtp = this_cpu_ptr(&rcu_dynticks);
  685. oldval = rdtp->dynticks_nesting;
  686. WARN_ON_ONCE(oldval < 0);
  687. if (oldval & DYNTICK_TASK_NEST_MASK) {
  688. rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
  689. } else {
  690. rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  691. rcu_eqs_exit_common(oldval, user);
  692. }
  693. }
  694. /**
  695. * rcu_idle_exit - inform RCU that current CPU is leaving idle
  696. *
  697. * Exit idle mode, in other words, -enter- the mode in which RCU
  698. * read-side critical sections can occur.
  699. *
  700. * We crowbar the ->dynticks_nesting field to DYNTICK_TASK_NEST to
  701. * allow for the possibility of usermode upcalls messing up our count
  702. * of interrupt nesting level during the busy period that is just
  703. * now starting.
  704. */
  705. void rcu_idle_exit(void)
  706. {
  707. unsigned long flags;
  708. local_irq_save(flags);
  709. rcu_eqs_exit(false);
  710. rcu_sysidle_exit(0);
  711. local_irq_restore(flags);
  712. }
  713. EXPORT_SYMBOL_GPL(rcu_idle_exit);
  714. #ifdef CONFIG_RCU_USER_QS
  715. /**
  716. * rcu_user_exit - inform RCU that we are exiting userspace.
  717. *
  718. * Exit RCU idle mode while entering the kernel because it can
  719. * run a RCU read side critical section anytime.
  720. */
  721. void rcu_user_exit(void)
  722. {
  723. rcu_eqs_exit(1);
  724. }
  725. #endif /* CONFIG_RCU_USER_QS */
  726. /**
  727. * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
  728. *
  729. * Enter an interrupt handler, which might possibly result in exiting
  730. * idle mode, in other words, entering the mode in which read-side critical
  731. * sections can occur.
  732. *
  733. * Note that the Linux kernel is fully capable of entering an interrupt
  734. * handler that it never exits, for example when doing upcalls to
  735. * user mode! This code assumes that the idle loop never does upcalls to
  736. * user mode. If your architecture does do upcalls from the idle loop (or
  737. * does anything else that results in unbalanced calls to the irq_enter()
  738. * and irq_exit() functions), RCU will give you what you deserve, good
  739. * and hard. But very infrequently and irreproducibly.
  740. *
  741. * Use things like work queues to work around this limitation.
  742. *
  743. * You have been warned.
  744. */
  745. void rcu_irq_enter(void)
  746. {
  747. unsigned long flags;
  748. struct rcu_dynticks *rdtp;
  749. long long oldval;
  750. local_irq_save(flags);
  751. rdtp = this_cpu_ptr(&rcu_dynticks);
  752. oldval = rdtp->dynticks_nesting;
  753. rdtp->dynticks_nesting++;
  754. WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
  755. if (oldval)
  756. trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
  757. else
  758. rcu_eqs_exit_common(oldval, true);
  759. rcu_sysidle_exit(1);
  760. local_irq_restore(flags);
  761. }
  762. /**
  763. * rcu_nmi_enter - inform RCU of entry to NMI context
  764. *
  765. * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
  766. * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
  767. * that the CPU is active. This implementation permits nested NMIs, as
  768. * long as the nesting level does not overflow an int. (You will probably
  769. * run out of stack space first.)
  770. */
  771. void rcu_nmi_enter(void)
  772. {
  773. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  774. int incby = 2;
  775. /* Complain about underflow. */
  776. WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
  777. /*
  778. * If idle from RCU viewpoint, atomically increment ->dynticks
  779. * to mark non-idle and increment ->dynticks_nmi_nesting by one.
  780. * Otherwise, increment ->dynticks_nmi_nesting by two. This means
  781. * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
  782. * to be in the outermost NMI handler that interrupted an RCU-idle
  783. * period (observation due to Andy Lutomirski).
  784. */
  785. if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
  786. smp_mb__before_atomic(); /* Force delay from prior write. */
  787. atomic_inc(&rdtp->dynticks);
  788. /* atomic_inc() before later RCU read-side crit sects */
  789. smp_mb__after_atomic(); /* See above. */
  790. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  791. incby = 1;
  792. }
  793. rdtp->dynticks_nmi_nesting += incby;
  794. barrier();
  795. }
  796. /**
  797. * rcu_nmi_exit - inform RCU of exit from NMI context
  798. *
  799. * If we are returning from the outermost NMI handler that interrupted an
  800. * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
  801. * to let the RCU grace-period handling know that the CPU is back to
  802. * being RCU-idle.
  803. */
  804. void rcu_nmi_exit(void)
  805. {
  806. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  807. /*
  808. * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
  809. * (We are exiting an NMI handler, so RCU better be paying attention
  810. * to us!)
  811. */
  812. WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
  813. WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
  814. /*
  815. * If the nesting level is not 1, the CPU wasn't RCU-idle, so
  816. * leave it in non-RCU-idle state.
  817. */
  818. if (rdtp->dynticks_nmi_nesting != 1) {
  819. rdtp->dynticks_nmi_nesting -= 2;
  820. return;
  821. }
  822. /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
  823. rdtp->dynticks_nmi_nesting = 0;
  824. /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
  825. smp_mb__before_atomic(); /* See above. */
  826. atomic_inc(&rdtp->dynticks);
  827. smp_mb__after_atomic(); /* Force delay to next write. */
  828. WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
  829. }
  830. /**
  831. * __rcu_is_watching - are RCU read-side critical sections safe?
  832. *
  833. * Return true if RCU is watching the running CPU, which means that
  834. * this CPU can safely enter RCU read-side critical sections. Unlike
  835. * rcu_is_watching(), the caller of __rcu_is_watching() must have at
  836. * least disabled preemption.
  837. */
  838. bool notrace __rcu_is_watching(void)
  839. {
  840. return atomic_read(this_cpu_ptr(&rcu_dynticks.dynticks)) & 0x1;
  841. }
  842. /**
  843. * rcu_is_watching - see if RCU thinks that the current CPU is idle
  844. *
  845. * If the current CPU is in its idle loop and is neither in an interrupt
  846. * or NMI handler, return true.
  847. */
  848. bool notrace rcu_is_watching(void)
  849. {
  850. bool ret;
  851. preempt_disable();
  852. ret = __rcu_is_watching();
  853. preempt_enable();
  854. return ret;
  855. }
  856. EXPORT_SYMBOL_GPL(rcu_is_watching);
  857. #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
  858. /*
  859. * Is the current CPU online? Disable preemption to avoid false positives
  860. * that could otherwise happen due to the current CPU number being sampled,
  861. * this task being preempted, its old CPU being taken offline, resuming
  862. * on some other CPU, then determining that its old CPU is now offline.
  863. * It is OK to use RCU on an offline processor during initial boot, hence
  864. * the check for rcu_scheduler_fully_active. Note also that it is OK
  865. * for a CPU coming online to use RCU for one jiffy prior to marking itself
  866. * online in the cpu_online_mask. Similarly, it is OK for a CPU going
  867. * offline to continue to use RCU for one jiffy after marking itself
  868. * offline in the cpu_online_mask. This leniency is necessary given the
  869. * non-atomic nature of the online and offline processing, for example,
  870. * the fact that a CPU enters the scheduler after completing the CPU_DYING
  871. * notifiers.
  872. *
  873. * This is also why RCU internally marks CPUs online during the
  874. * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
  875. *
  876. * Disable checking if in an NMI handler because we cannot safely report
  877. * errors from NMI handlers anyway.
  878. */
  879. bool rcu_lockdep_current_cpu_online(void)
  880. {
  881. struct rcu_data *rdp;
  882. struct rcu_node *rnp;
  883. bool ret;
  884. if (in_nmi())
  885. return true;
  886. preempt_disable();
  887. rdp = this_cpu_ptr(&rcu_sched_data);
  888. rnp = rdp->mynode;
  889. ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
  890. !rcu_scheduler_fully_active;
  891. preempt_enable();
  892. return ret;
  893. }
  894. EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
  895. #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
  896. /**
  897. * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
  898. *
  899. * If the current CPU is idle or running at a first-level (not nested)
  900. * interrupt from idle, return true. The caller must have at least
  901. * disabled preemption.
  902. */
  903. static int rcu_is_cpu_rrupt_from_idle(void)
  904. {
  905. return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 1;
  906. }
  907. /*
  908. * Snapshot the specified CPU's dynticks counter so that we can later
  909. * credit them with an implicit quiescent state. Return 1 if this CPU
  910. * is in dynticks idle mode, which is an extended quiescent state.
  911. */
  912. static int dyntick_save_progress_counter(struct rcu_data *rdp,
  913. bool *isidle, unsigned long *maxj)
  914. {
  915. rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
  916. rcu_sysidle_check_cpu(rdp, isidle, maxj);
  917. if ((rdp->dynticks_snap & 0x1) == 0) {
  918. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
  919. return 1;
  920. } else {
  921. if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
  922. rdp->mynode->gpnum))
  923. WRITE_ONCE(rdp->gpwrap, true);
  924. return 0;
  925. }
  926. }
  927. /*
  928. * Return true if the specified CPU has passed through a quiescent
  929. * state by virtue of being in or having passed through an dynticks
  930. * idle state since the last call to dyntick_save_progress_counter()
  931. * for this same CPU, or by virtue of having been offline.
  932. */
  933. static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
  934. bool *isidle, unsigned long *maxj)
  935. {
  936. unsigned int curr;
  937. int *rcrmp;
  938. unsigned int snap;
  939. curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
  940. snap = (unsigned int)rdp->dynticks_snap;
  941. /*
  942. * If the CPU passed through or entered a dynticks idle phase with
  943. * no active irq/NMI handlers, then we can safely pretend that the CPU
  944. * already acknowledged the request to pass through a quiescent
  945. * state. Either way, that CPU cannot possibly be in an RCU
  946. * read-side critical section that started before the beginning
  947. * of the current RCU grace period.
  948. */
  949. if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
  950. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
  951. rdp->dynticks_fqs++;
  952. return 1;
  953. }
  954. /*
  955. * Check for the CPU being offline, but only if the grace period
  956. * is old enough. We don't need to worry about the CPU changing
  957. * state: If we see it offline even once, it has been through a
  958. * quiescent state.
  959. *
  960. * The reason for insisting that the grace period be at least
  961. * one jiffy old is that CPUs that are not quite online and that
  962. * have just gone offline can still execute RCU read-side critical
  963. * sections.
  964. */
  965. if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies))
  966. return 0; /* Grace period is not old enough. */
  967. barrier();
  968. if (cpu_is_offline(rdp->cpu)) {
  969. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
  970. rdp->offline_fqs++;
  971. return 1;
  972. }
  973. /*
  974. * A CPU running for an extended time within the kernel can
  975. * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
  976. * even context-switching back and forth between a pair of
  977. * in-kernel CPU-bound tasks cannot advance grace periods.
  978. * So if the grace period is old enough, make the CPU pay attention.
  979. * Note that the unsynchronized assignments to the per-CPU
  980. * rcu_sched_qs_mask variable are safe. Yes, setting of
  981. * bits can be lost, but they will be set again on the next
  982. * force-quiescent-state pass. So lost bit sets do not result
  983. * in incorrect behavior, merely in a grace period lasting
  984. * a few jiffies longer than it might otherwise. Because
  985. * there are at most four threads involved, and because the
  986. * updates are only once every few jiffies, the probability of
  987. * lossage (and thus of slight grace-period extension) is
  988. * quite low.
  989. *
  990. * Note that if the jiffies_till_sched_qs boot/sysfs parameter
  991. * is set too high, we override with half of the RCU CPU stall
  992. * warning delay.
  993. */
  994. rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
  995. if (ULONG_CMP_GE(jiffies,
  996. rdp->rsp->gp_start + jiffies_till_sched_qs) ||
  997. ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
  998. if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
  999. WRITE_ONCE(rdp->cond_resched_completed,
  1000. READ_ONCE(rdp->mynode->completed));
  1001. smp_mb(); /* ->cond_resched_completed before *rcrmp. */
  1002. WRITE_ONCE(*rcrmp,
  1003. READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
  1004. resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
  1005. rdp->rsp->jiffies_resched += 5; /* Enable beating. */
  1006. } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
  1007. /* Time to beat on that CPU again! */
  1008. resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
  1009. rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
  1010. }
  1011. }
  1012. return 0;
  1013. }
  1014. static void record_gp_stall_check_time(struct rcu_state *rsp)
  1015. {
  1016. unsigned long j = jiffies;
  1017. unsigned long j1;
  1018. rsp->gp_start = j;
  1019. smp_wmb(); /* Record start time before stall time. */
  1020. j1 = rcu_jiffies_till_stall_check();
  1021. WRITE_ONCE(rsp->jiffies_stall, j + j1);
  1022. rsp->jiffies_resched = j + j1 / 2;
  1023. rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
  1024. }
  1025. /*
  1026. * Complain about starvation of grace-period kthread.
  1027. */
  1028. static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
  1029. {
  1030. unsigned long gpa;
  1031. unsigned long j;
  1032. j = jiffies;
  1033. gpa = READ_ONCE(rsp->gp_activity);
  1034. if (j - gpa > 2 * HZ)
  1035. pr_err("%s kthread starved for %ld jiffies!\n",
  1036. rsp->name, j - gpa);
  1037. }
  1038. /*
  1039. * Dump stacks of all tasks running on stalled CPUs.
  1040. */
  1041. static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
  1042. {
  1043. int cpu;
  1044. unsigned long flags;
  1045. struct rcu_node *rnp;
  1046. rcu_for_each_leaf_node(rsp, rnp) {
  1047. raw_spin_lock_irqsave(&rnp->lock, flags);
  1048. if (rnp->qsmask != 0) {
  1049. for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
  1050. if (rnp->qsmask & (1UL << cpu))
  1051. dump_cpu_task(rnp->grplo + cpu);
  1052. }
  1053. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1054. }
  1055. }
  1056. static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
  1057. {
  1058. int cpu;
  1059. long delta;
  1060. unsigned long flags;
  1061. unsigned long gpa;
  1062. unsigned long j;
  1063. int ndetected = 0;
  1064. struct rcu_node *rnp = rcu_get_root(rsp);
  1065. long totqlen = 0;
  1066. /* Only let one CPU complain about others per time interval. */
  1067. raw_spin_lock_irqsave(&rnp->lock, flags);
  1068. delta = jiffies - READ_ONCE(rsp->jiffies_stall);
  1069. if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
  1070. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1071. return;
  1072. }
  1073. WRITE_ONCE(rsp->jiffies_stall,
  1074. jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
  1075. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1076. /*
  1077. * OK, time to rat on our buddy...
  1078. * See Documentation/RCU/stallwarn.txt for info on how to debug
  1079. * RCU CPU stall warnings.
  1080. */
  1081. pr_err("INFO: %s detected stalls on CPUs/tasks:",
  1082. rsp->name);
  1083. print_cpu_stall_info_begin();
  1084. rcu_for_each_leaf_node(rsp, rnp) {
  1085. raw_spin_lock_irqsave(&rnp->lock, flags);
  1086. ndetected += rcu_print_task_stall(rnp);
  1087. if (rnp->qsmask != 0) {
  1088. for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
  1089. if (rnp->qsmask & (1UL << cpu)) {
  1090. print_cpu_stall_info(rsp,
  1091. rnp->grplo + cpu);
  1092. ndetected++;
  1093. }
  1094. }
  1095. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1096. }
  1097. print_cpu_stall_info_end();
  1098. for_each_possible_cpu(cpu)
  1099. totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
  1100. pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
  1101. smp_processor_id(), (long)(jiffies - rsp->gp_start),
  1102. (long)rsp->gpnum, (long)rsp->completed, totqlen);
  1103. if (ndetected) {
  1104. rcu_dump_cpu_stacks(rsp);
  1105. } else {
  1106. if (READ_ONCE(rsp->gpnum) != gpnum ||
  1107. READ_ONCE(rsp->completed) == gpnum) {
  1108. pr_err("INFO: Stall ended before state dump start\n");
  1109. } else {
  1110. j = jiffies;
  1111. gpa = READ_ONCE(rsp->gp_activity);
  1112. pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
  1113. rsp->name, j - gpa, j, gpa,
  1114. jiffies_till_next_fqs,
  1115. rcu_get_root(rsp)->qsmask);
  1116. /* In this case, the current CPU might be at fault. */
  1117. sched_show_task(current);
  1118. }
  1119. }
  1120. /* Complain about tasks blocking the grace period. */
  1121. rcu_print_detail_task_stall(rsp);
  1122. rcu_check_gp_kthread_starvation(rsp);
  1123. force_quiescent_state(rsp); /* Kick them all. */
  1124. }
  1125. static void print_cpu_stall(struct rcu_state *rsp)
  1126. {
  1127. int cpu;
  1128. unsigned long flags;
  1129. struct rcu_node *rnp = rcu_get_root(rsp);
  1130. long totqlen = 0;
  1131. /*
  1132. * OK, time to rat on ourselves...
  1133. * See Documentation/RCU/stallwarn.txt for info on how to debug
  1134. * RCU CPU stall warnings.
  1135. */
  1136. pr_err("INFO: %s self-detected stall on CPU", rsp->name);
  1137. print_cpu_stall_info_begin();
  1138. print_cpu_stall_info(rsp, smp_processor_id());
  1139. print_cpu_stall_info_end();
  1140. for_each_possible_cpu(cpu)
  1141. totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
  1142. pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
  1143. jiffies - rsp->gp_start,
  1144. (long)rsp->gpnum, (long)rsp->completed, totqlen);
  1145. rcu_check_gp_kthread_starvation(rsp);
  1146. rcu_dump_cpu_stacks(rsp);
  1147. raw_spin_lock_irqsave(&rnp->lock, flags);
  1148. if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
  1149. WRITE_ONCE(rsp->jiffies_stall,
  1150. jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
  1151. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1152. /*
  1153. * Attempt to revive the RCU machinery by forcing a context switch.
  1154. *
  1155. * A context switch would normally allow the RCU state machine to make
  1156. * progress and it could be we're stuck in kernel space without context
  1157. * switches for an entirely unreasonable amount of time.
  1158. */
  1159. resched_cpu(smp_processor_id());
  1160. }
  1161. static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  1162. {
  1163. unsigned long completed;
  1164. unsigned long gpnum;
  1165. unsigned long gps;
  1166. unsigned long j;
  1167. unsigned long js;
  1168. struct rcu_node *rnp;
  1169. if (rcu_cpu_stall_suppress || !rcu_gp_in_progress(rsp))
  1170. return;
  1171. j = jiffies;
  1172. /*
  1173. * Lots of memory barriers to reject false positives.
  1174. *
  1175. * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
  1176. * then rsp->gp_start, and finally rsp->completed. These values
  1177. * are updated in the opposite order with memory barriers (or
  1178. * equivalent) during grace-period initialization and cleanup.
  1179. * Now, a false positive can occur if we get an new value of
  1180. * rsp->gp_start and a old value of rsp->jiffies_stall. But given
  1181. * the memory barriers, the only way that this can happen is if one
  1182. * grace period ends and another starts between these two fetches.
  1183. * Detect this by comparing rsp->completed with the previous fetch
  1184. * from rsp->gpnum.
  1185. *
  1186. * Given this check, comparisons of jiffies, rsp->jiffies_stall,
  1187. * and rsp->gp_start suffice to forestall false positives.
  1188. */
  1189. gpnum = READ_ONCE(rsp->gpnum);
  1190. smp_rmb(); /* Pick up ->gpnum first... */
  1191. js = READ_ONCE(rsp->jiffies_stall);
  1192. smp_rmb(); /* ...then ->jiffies_stall before the rest... */
  1193. gps = READ_ONCE(rsp->gp_start);
  1194. smp_rmb(); /* ...and finally ->gp_start before ->completed. */
  1195. completed = READ_ONCE(rsp->completed);
  1196. if (ULONG_CMP_GE(completed, gpnum) ||
  1197. ULONG_CMP_LT(j, js) ||
  1198. ULONG_CMP_GE(gps, js))
  1199. return; /* No stall or GP completed since entering function. */
  1200. rnp = rdp->mynode;
  1201. if (rcu_gp_in_progress(rsp) &&
  1202. (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
  1203. /* We haven't checked in, so go dump stack. */
  1204. print_cpu_stall(rsp);
  1205. } else if (rcu_gp_in_progress(rsp) &&
  1206. ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
  1207. /* They had a few time units to dump stack, so complain. */
  1208. print_other_cpu_stall(rsp, gpnum);
  1209. }
  1210. }
  1211. /**
  1212. * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  1213. *
  1214. * Set the stall-warning timeout way off into the future, thus preventing
  1215. * any RCU CPU stall-warning messages from appearing in the current set of
  1216. * RCU grace periods.
  1217. *
  1218. * The caller must disable hard irqs.
  1219. */
  1220. void rcu_cpu_stall_reset(void)
  1221. {
  1222. struct rcu_state *rsp;
  1223. for_each_rcu_flavor(rsp)
  1224. WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
  1225. }
  1226. /*
  1227. * Initialize the specified rcu_data structure's default callback list
  1228. * to empty. The default callback list is the one that is not used by
  1229. * no-callbacks CPUs.
  1230. */
  1231. static void init_default_callback_list(struct rcu_data *rdp)
  1232. {
  1233. int i;
  1234. rdp->nxtlist = NULL;
  1235. for (i = 0; i < RCU_NEXT_SIZE; i++)
  1236. rdp->nxttail[i] = &rdp->nxtlist;
  1237. }
  1238. /*
  1239. * Initialize the specified rcu_data structure's callback list to empty.
  1240. */
  1241. static void init_callback_list(struct rcu_data *rdp)
  1242. {
  1243. if (init_nocb_callback_list(rdp))
  1244. return;
  1245. init_default_callback_list(rdp);
  1246. }
  1247. /*
  1248. * Determine the value that ->completed will have at the end of the
  1249. * next subsequent grace period. This is used to tag callbacks so that
  1250. * a CPU can invoke callbacks in a timely fashion even if that CPU has
  1251. * been dyntick-idle for an extended period with callbacks under the
  1252. * influence of RCU_FAST_NO_HZ.
  1253. *
  1254. * The caller must hold rnp->lock with interrupts disabled.
  1255. */
  1256. static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
  1257. struct rcu_node *rnp)
  1258. {
  1259. /*
  1260. * If RCU is idle, we just wait for the next grace period.
  1261. * But we can only be sure that RCU is idle if we are looking
  1262. * at the root rcu_node structure -- otherwise, a new grace
  1263. * period might have started, but just not yet gotten around
  1264. * to initializing the current non-root rcu_node structure.
  1265. */
  1266. if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
  1267. return rnp->completed + 1;
  1268. /*
  1269. * Otherwise, wait for a possible partial grace period and
  1270. * then the subsequent full grace period.
  1271. */
  1272. return rnp->completed + 2;
  1273. }
  1274. /*
  1275. * Trace-event helper function for rcu_start_future_gp() and
  1276. * rcu_nocb_wait_gp().
  1277. */
  1278. static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  1279. unsigned long c, const char *s)
  1280. {
  1281. trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
  1282. rnp->completed, c, rnp->level,
  1283. rnp->grplo, rnp->grphi, s);
  1284. }
  1285. /*
  1286. * Start some future grace period, as needed to handle newly arrived
  1287. * callbacks. The required future grace periods are recorded in each
  1288. * rcu_node structure's ->need_future_gp field. Returns true if there
  1289. * is reason to awaken the grace-period kthread.
  1290. *
  1291. * The caller must hold the specified rcu_node structure's ->lock.
  1292. */
  1293. static bool __maybe_unused
  1294. rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  1295. unsigned long *c_out)
  1296. {
  1297. unsigned long c;
  1298. int i;
  1299. bool ret = false;
  1300. struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
  1301. /*
  1302. * Pick up grace-period number for new callbacks. If this
  1303. * grace period is already marked as needed, return to the caller.
  1304. */
  1305. c = rcu_cbs_completed(rdp->rsp, rnp);
  1306. trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
  1307. if (rnp->need_future_gp[c & 0x1]) {
  1308. trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf"));
  1309. goto out;
  1310. }
  1311. /*
  1312. * If either this rcu_node structure or the root rcu_node structure
  1313. * believe that a grace period is in progress, then we must wait
  1314. * for the one following, which is in "c". Because our request
  1315. * will be noticed at the end of the current grace period, we don't
  1316. * need to explicitly start one. We only do the lockless check
  1317. * of rnp_root's fields if the current rcu_node structure thinks
  1318. * there is no grace period in flight, and because we hold rnp->lock,
  1319. * the only possible change is when rnp_root's two fields are
  1320. * equal, in which case rnp_root->gpnum might be concurrently
  1321. * incremented. But that is OK, as it will just result in our
  1322. * doing some extra useless work.
  1323. */
  1324. if (rnp->gpnum != rnp->completed ||
  1325. READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
  1326. rnp->need_future_gp[c & 0x1]++;
  1327. trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
  1328. goto out;
  1329. }
  1330. /*
  1331. * There might be no grace period in progress. If we don't already
  1332. * hold it, acquire the root rcu_node structure's lock in order to
  1333. * start one (if needed).
  1334. */
  1335. if (rnp != rnp_root) {
  1336. raw_spin_lock(&rnp_root->lock);
  1337. smp_mb__after_unlock_lock();
  1338. }
  1339. /*
  1340. * Get a new grace-period number. If there really is no grace
  1341. * period in progress, it will be smaller than the one we obtained
  1342. * earlier. Adjust callbacks as needed. Note that even no-CBs
  1343. * CPUs have a ->nxtcompleted[] array, so no no-CBs checks needed.
  1344. */
  1345. c = rcu_cbs_completed(rdp->rsp, rnp_root);
  1346. for (i = RCU_DONE_TAIL; i < RCU_NEXT_TAIL; i++)
  1347. if (ULONG_CMP_LT(c, rdp->nxtcompleted[i]))
  1348. rdp->nxtcompleted[i] = c;
  1349. /*
  1350. * If the needed for the required grace period is already
  1351. * recorded, trace and leave.
  1352. */
  1353. if (rnp_root->need_future_gp[c & 0x1]) {
  1354. trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot"));
  1355. goto unlock_out;
  1356. }
  1357. /* Record the need for the future grace period. */
  1358. rnp_root->need_future_gp[c & 0x1]++;
  1359. /* If a grace period is not already in progress, start one. */
  1360. if (rnp_root->gpnum != rnp_root->completed) {
  1361. trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot"));
  1362. } else {
  1363. trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot"));
  1364. ret = rcu_start_gp_advanced(rdp->rsp, rnp_root, rdp);
  1365. }
  1366. unlock_out:
  1367. if (rnp != rnp_root)
  1368. raw_spin_unlock(&rnp_root->lock);
  1369. out:
  1370. if (c_out != NULL)
  1371. *c_out = c;
  1372. return ret;
  1373. }
  1374. /*
  1375. * Clean up any old requests for the just-ended grace period. Also return
  1376. * whether any additional grace periods have been requested. Also invoke
  1377. * rcu_nocb_gp_cleanup() in order to wake up any no-callbacks kthreads
  1378. * waiting for this grace period to complete.
  1379. */
  1380. static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  1381. {
  1382. int c = rnp->completed;
  1383. int needmore;
  1384. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  1385. rcu_nocb_gp_cleanup(rsp, rnp);
  1386. rnp->need_future_gp[c & 0x1] = 0;
  1387. needmore = rnp->need_future_gp[(c + 1) & 0x1];
  1388. trace_rcu_future_gp(rnp, rdp, c,
  1389. needmore ? TPS("CleanupMore") : TPS("Cleanup"));
  1390. return needmore;
  1391. }
  1392. /*
  1393. * Awaken the grace-period kthread for the specified flavor of RCU.
  1394. * Don't do a self-awaken, and don't bother awakening when there is
  1395. * nothing for the grace-period kthread to do (as in several CPUs
  1396. * raced to awaken, and we lost), and finally don't try to awaken
  1397. * a kthread that has not yet been created.
  1398. */
  1399. static void rcu_gp_kthread_wake(struct rcu_state *rsp)
  1400. {
  1401. if (current == rsp->gp_kthread ||
  1402. !READ_ONCE(rsp->gp_flags) ||
  1403. !rsp->gp_kthread)
  1404. return;
  1405. wake_up(&rsp->gp_wq);
  1406. }
  1407. /*
  1408. * If there is room, assign a ->completed number to any callbacks on
  1409. * this CPU that have not already been assigned. Also accelerate any
  1410. * callbacks that were previously assigned a ->completed number that has
  1411. * since proven to be too conservative, which can happen if callbacks get
  1412. * assigned a ->completed number while RCU is idle, but with reference to
  1413. * a non-root rcu_node structure. This function is idempotent, so it does
  1414. * not hurt to call it repeatedly. Returns an flag saying that we should
  1415. * awaken the RCU grace-period kthread.
  1416. *
  1417. * The caller must hold rnp->lock with interrupts disabled.
  1418. */
  1419. static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
  1420. struct rcu_data *rdp)
  1421. {
  1422. unsigned long c;
  1423. int i;
  1424. bool ret;
  1425. /* If the CPU has no callbacks, nothing to do. */
  1426. if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
  1427. return false;
  1428. /*
  1429. * Starting from the sublist containing the callbacks most
  1430. * recently assigned a ->completed number and working down, find the
  1431. * first sublist that is not assignable to an upcoming grace period.
  1432. * Such a sublist has something in it (first two tests) and has
  1433. * a ->completed number assigned that will complete sooner than
  1434. * the ->completed number for newly arrived callbacks (last test).
  1435. *
  1436. * The key point is that any later sublist can be assigned the
  1437. * same ->completed number as the newly arrived callbacks, which
  1438. * means that the callbacks in any of these later sublist can be
  1439. * grouped into a single sublist, whether or not they have already
  1440. * been assigned a ->completed number.
  1441. */
  1442. c = rcu_cbs_completed(rsp, rnp);
  1443. for (i = RCU_NEXT_TAIL - 1; i > RCU_DONE_TAIL; i--)
  1444. if (rdp->nxttail[i] != rdp->nxttail[i - 1] &&
  1445. !ULONG_CMP_GE(rdp->nxtcompleted[i], c))
  1446. break;
  1447. /*
  1448. * If there are no sublist for unassigned callbacks, leave.
  1449. * At the same time, advance "i" one sublist, so that "i" will
  1450. * index into the sublist where all the remaining callbacks should
  1451. * be grouped into.
  1452. */
  1453. if (++i >= RCU_NEXT_TAIL)
  1454. return false;
  1455. /*
  1456. * Assign all subsequent callbacks' ->completed number to the next
  1457. * full grace period and group them all in the sublist initially
  1458. * indexed by "i".
  1459. */
  1460. for (; i <= RCU_NEXT_TAIL; i++) {
  1461. rdp->nxttail[i] = rdp->nxttail[RCU_NEXT_TAIL];
  1462. rdp->nxtcompleted[i] = c;
  1463. }
  1464. /* Record any needed additional grace periods. */
  1465. ret = rcu_start_future_gp(rnp, rdp, NULL);
  1466. /* Trace depending on how much we were able to accelerate. */
  1467. if (!*rdp->nxttail[RCU_WAIT_TAIL])
  1468. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
  1469. else
  1470. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
  1471. return ret;
  1472. }
  1473. /*
  1474. * Move any callbacks whose grace period has completed to the
  1475. * RCU_DONE_TAIL sublist, then compact the remaining sublists and
  1476. * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
  1477. * sublist. This function is idempotent, so it does not hurt to
  1478. * invoke it repeatedly. As long as it is not invoked -too- often...
  1479. * Returns true if the RCU grace-period kthread needs to be awakened.
  1480. *
  1481. * The caller must hold rnp->lock with interrupts disabled.
  1482. */
  1483. static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
  1484. struct rcu_data *rdp)
  1485. {
  1486. int i, j;
  1487. /* If the CPU has no callbacks, nothing to do. */
  1488. if (!rdp->nxttail[RCU_NEXT_TAIL] || !*rdp->nxttail[RCU_DONE_TAIL])
  1489. return false;
  1490. /*
  1491. * Find all callbacks whose ->completed numbers indicate that they
  1492. * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
  1493. */
  1494. for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++) {
  1495. if (ULONG_CMP_LT(rnp->completed, rdp->nxtcompleted[i]))
  1496. break;
  1497. rdp->nxttail[RCU_DONE_TAIL] = rdp->nxttail[i];
  1498. }
  1499. /* Clean up any sublist tail pointers that were misordered above. */
  1500. for (j = RCU_WAIT_TAIL; j < i; j++)
  1501. rdp->nxttail[j] = rdp->nxttail[RCU_DONE_TAIL];
  1502. /* Copy down callbacks to fill in empty sublists. */
  1503. for (j = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++, j++) {
  1504. if (rdp->nxttail[j] == rdp->nxttail[RCU_NEXT_TAIL])
  1505. break;
  1506. rdp->nxttail[j] = rdp->nxttail[i];
  1507. rdp->nxtcompleted[j] = rdp->nxtcompleted[i];
  1508. }
  1509. /* Classify any remaining callbacks. */
  1510. return rcu_accelerate_cbs(rsp, rnp, rdp);
  1511. }
  1512. /*
  1513. * Update CPU-local rcu_data state to record the beginnings and ends of
  1514. * grace periods. The caller must hold the ->lock of the leaf rcu_node
  1515. * structure corresponding to the current CPU, and must have irqs disabled.
  1516. * Returns true if the grace-period kthread needs to be awakened.
  1517. */
  1518. static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
  1519. struct rcu_data *rdp)
  1520. {
  1521. bool ret;
  1522. /* Handle the ends of any preceding grace periods first. */
  1523. if (rdp->completed == rnp->completed &&
  1524. !unlikely(READ_ONCE(rdp->gpwrap))) {
  1525. /* No grace period end, so just accelerate recent callbacks. */
  1526. ret = rcu_accelerate_cbs(rsp, rnp, rdp);
  1527. } else {
  1528. /* Advance callbacks. */
  1529. ret = rcu_advance_cbs(rsp, rnp, rdp);
  1530. /* Remember that we saw this grace-period completion. */
  1531. rdp->completed = rnp->completed;
  1532. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
  1533. }
  1534. if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
  1535. /*
  1536. * If the current grace period is waiting for this CPU,
  1537. * set up to detect a quiescent state, otherwise don't
  1538. * go looking for one.
  1539. */
  1540. rdp->gpnum = rnp->gpnum;
  1541. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
  1542. rdp->passed_quiesce = 0;
  1543. rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
  1544. rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
  1545. zero_cpu_stall_ticks(rdp);
  1546. WRITE_ONCE(rdp->gpwrap, false);
  1547. }
  1548. return ret;
  1549. }
  1550. static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
  1551. {
  1552. unsigned long flags;
  1553. bool needwake;
  1554. struct rcu_node *rnp;
  1555. local_irq_save(flags);
  1556. rnp = rdp->mynode;
  1557. if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
  1558. rdp->completed == READ_ONCE(rnp->completed) &&
  1559. !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
  1560. !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
  1561. local_irq_restore(flags);
  1562. return;
  1563. }
  1564. smp_mb__after_unlock_lock();
  1565. needwake = __note_gp_changes(rsp, rnp, rdp);
  1566. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1567. if (needwake)
  1568. rcu_gp_kthread_wake(rsp);
  1569. }
  1570. /*
  1571. * Initialize a new grace period. Return 0 if no grace period required.
  1572. */
  1573. static int rcu_gp_init(struct rcu_state *rsp)
  1574. {
  1575. unsigned long oldmask;
  1576. struct rcu_data *rdp;
  1577. struct rcu_node *rnp = rcu_get_root(rsp);
  1578. WRITE_ONCE(rsp->gp_activity, jiffies);
  1579. raw_spin_lock_irq(&rnp->lock);
  1580. smp_mb__after_unlock_lock();
  1581. if (!READ_ONCE(rsp->gp_flags)) {
  1582. /* Spurious wakeup, tell caller to go back to sleep. */
  1583. raw_spin_unlock_irq(&rnp->lock);
  1584. return 0;
  1585. }
  1586. WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
  1587. if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
  1588. /*
  1589. * Grace period already in progress, don't start another.
  1590. * Not supposed to be able to happen.
  1591. */
  1592. raw_spin_unlock_irq(&rnp->lock);
  1593. return 0;
  1594. }
  1595. /* Advance to a new grace period and initialize state. */
  1596. record_gp_stall_check_time(rsp);
  1597. /* Record GP times before starting GP, hence smp_store_release(). */
  1598. smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
  1599. trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
  1600. raw_spin_unlock_irq(&rnp->lock);
  1601. /*
  1602. * Apply per-leaf buffered online and offline operations to the
  1603. * rcu_node tree. Note that this new grace period need not wait
  1604. * for subsequent online CPUs, and that quiescent-state forcing
  1605. * will handle subsequent offline CPUs.
  1606. */
  1607. rcu_for_each_leaf_node(rsp, rnp) {
  1608. raw_spin_lock_irq(&rnp->lock);
  1609. smp_mb__after_unlock_lock();
  1610. if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
  1611. !rnp->wait_blkd_tasks) {
  1612. /* Nothing to do on this leaf rcu_node structure. */
  1613. raw_spin_unlock_irq(&rnp->lock);
  1614. continue;
  1615. }
  1616. /* Record old state, apply changes to ->qsmaskinit field. */
  1617. oldmask = rnp->qsmaskinit;
  1618. rnp->qsmaskinit = rnp->qsmaskinitnext;
  1619. /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
  1620. if (!oldmask != !rnp->qsmaskinit) {
  1621. if (!oldmask) /* First online CPU for this rcu_node. */
  1622. rcu_init_new_rnp(rnp);
  1623. else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
  1624. rnp->wait_blkd_tasks = true;
  1625. else /* Last offline CPU and can propagate. */
  1626. rcu_cleanup_dead_rnp(rnp);
  1627. }
  1628. /*
  1629. * If all waited-on tasks from prior grace period are
  1630. * done, and if all this rcu_node structure's CPUs are
  1631. * still offline, propagate up the rcu_node tree and
  1632. * clear ->wait_blkd_tasks. Otherwise, if one of this
  1633. * rcu_node structure's CPUs has since come back online,
  1634. * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
  1635. * checks for this, so just call it unconditionally).
  1636. */
  1637. if (rnp->wait_blkd_tasks &&
  1638. (!rcu_preempt_has_tasks(rnp) ||
  1639. rnp->qsmaskinit)) {
  1640. rnp->wait_blkd_tasks = false;
  1641. rcu_cleanup_dead_rnp(rnp);
  1642. }
  1643. raw_spin_unlock_irq(&rnp->lock);
  1644. }
  1645. /*
  1646. * Set the quiescent-state-needed bits in all the rcu_node
  1647. * structures for all currently online CPUs in breadth-first order,
  1648. * starting from the root rcu_node structure, relying on the layout
  1649. * of the tree within the rsp->node[] array. Note that other CPUs
  1650. * will access only the leaves of the hierarchy, thus seeing that no
  1651. * grace period is in progress, at least until the corresponding
  1652. * leaf node has been initialized. In addition, we have excluded
  1653. * CPU-hotplug operations.
  1654. *
  1655. * The grace period cannot complete until the initialization
  1656. * process finishes, because this kthread handles both.
  1657. */
  1658. rcu_for_each_node_breadth_first(rsp, rnp) {
  1659. raw_spin_lock_irq(&rnp->lock);
  1660. smp_mb__after_unlock_lock();
  1661. rdp = this_cpu_ptr(rsp->rda);
  1662. rcu_preempt_check_blocked_tasks(rnp);
  1663. rnp->qsmask = rnp->qsmaskinit;
  1664. WRITE_ONCE(rnp->gpnum, rsp->gpnum);
  1665. if (WARN_ON_ONCE(rnp->completed != rsp->completed))
  1666. WRITE_ONCE(rnp->completed, rsp->completed);
  1667. if (rnp == rdp->mynode)
  1668. (void)__note_gp_changes(rsp, rnp, rdp);
  1669. rcu_preempt_boost_start_gp(rnp);
  1670. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  1671. rnp->level, rnp->grplo,
  1672. rnp->grphi, rnp->qsmask);
  1673. raw_spin_unlock_irq(&rnp->lock);
  1674. cond_resched_rcu_qs();
  1675. WRITE_ONCE(rsp->gp_activity, jiffies);
  1676. if (gp_init_delay > 0 &&
  1677. !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
  1678. schedule_timeout_uninterruptible(gp_init_delay);
  1679. }
  1680. return 1;
  1681. }
  1682. /*
  1683. * Do one round of quiescent-state forcing.
  1684. */
  1685. static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
  1686. {
  1687. int fqs_state = fqs_state_in;
  1688. bool isidle = false;
  1689. unsigned long maxj;
  1690. struct rcu_node *rnp = rcu_get_root(rsp);
  1691. WRITE_ONCE(rsp->gp_activity, jiffies);
  1692. rsp->n_force_qs++;
  1693. if (fqs_state == RCU_SAVE_DYNTICK) {
  1694. /* Collect dyntick-idle snapshots. */
  1695. if (is_sysidle_rcu_state(rsp)) {
  1696. isidle = true;
  1697. maxj = jiffies - ULONG_MAX / 4;
  1698. }
  1699. force_qs_rnp(rsp, dyntick_save_progress_counter,
  1700. &isidle, &maxj);
  1701. rcu_sysidle_report_gp(rsp, isidle, maxj);
  1702. fqs_state = RCU_FORCE_QS;
  1703. } else {
  1704. /* Handle dyntick-idle and offline CPUs. */
  1705. isidle = true;
  1706. force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
  1707. }
  1708. /* Clear flag to prevent immediate re-entry. */
  1709. if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
  1710. raw_spin_lock_irq(&rnp->lock);
  1711. smp_mb__after_unlock_lock();
  1712. WRITE_ONCE(rsp->gp_flags,
  1713. READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
  1714. raw_spin_unlock_irq(&rnp->lock);
  1715. }
  1716. return fqs_state;
  1717. }
  1718. /*
  1719. * Clean up after the old grace period.
  1720. */
  1721. static void rcu_gp_cleanup(struct rcu_state *rsp)
  1722. {
  1723. unsigned long gp_duration;
  1724. bool needgp = false;
  1725. int nocb = 0;
  1726. struct rcu_data *rdp;
  1727. struct rcu_node *rnp = rcu_get_root(rsp);
  1728. WRITE_ONCE(rsp->gp_activity, jiffies);
  1729. raw_spin_lock_irq(&rnp->lock);
  1730. smp_mb__after_unlock_lock();
  1731. gp_duration = jiffies - rsp->gp_start;
  1732. if (gp_duration > rsp->gp_max)
  1733. rsp->gp_max = gp_duration;
  1734. /*
  1735. * We know the grace period is complete, but to everyone else
  1736. * it appears to still be ongoing. But it is also the case
  1737. * that to everyone else it looks like there is nothing that
  1738. * they can do to advance the grace period. It is therefore
  1739. * safe for us to drop the lock in order to mark the grace
  1740. * period as completed in all of the rcu_node structures.
  1741. */
  1742. raw_spin_unlock_irq(&rnp->lock);
  1743. /*
  1744. * Propagate new ->completed value to rcu_node structures so
  1745. * that other CPUs don't have to wait until the start of the next
  1746. * grace period to process their callbacks. This also avoids
  1747. * some nasty RCU grace-period initialization races by forcing
  1748. * the end of the current grace period to be completely recorded in
  1749. * all of the rcu_node structures before the beginning of the next
  1750. * grace period is recorded in any of the rcu_node structures.
  1751. */
  1752. rcu_for_each_node_breadth_first(rsp, rnp) {
  1753. raw_spin_lock_irq(&rnp->lock);
  1754. smp_mb__after_unlock_lock();
  1755. WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
  1756. WARN_ON_ONCE(rnp->qsmask);
  1757. WRITE_ONCE(rnp->completed, rsp->gpnum);
  1758. rdp = this_cpu_ptr(rsp->rda);
  1759. if (rnp == rdp->mynode)
  1760. needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
  1761. /* smp_mb() provided by prior unlock-lock pair. */
  1762. nocb += rcu_future_gp_cleanup(rsp, rnp);
  1763. raw_spin_unlock_irq(&rnp->lock);
  1764. cond_resched_rcu_qs();
  1765. WRITE_ONCE(rsp->gp_activity, jiffies);
  1766. }
  1767. rnp = rcu_get_root(rsp);
  1768. raw_spin_lock_irq(&rnp->lock);
  1769. smp_mb__after_unlock_lock(); /* Order GP before ->completed update. */
  1770. rcu_nocb_gp_set(rnp, nocb);
  1771. /* Declare grace period done. */
  1772. WRITE_ONCE(rsp->completed, rsp->gpnum);
  1773. trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
  1774. rsp->fqs_state = RCU_GP_IDLE;
  1775. rdp = this_cpu_ptr(rsp->rda);
  1776. /* Advance CBs to reduce false positives below. */
  1777. needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
  1778. if (needgp || cpu_needs_another_gp(rsp, rdp)) {
  1779. WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
  1780. trace_rcu_grace_period(rsp->name,
  1781. READ_ONCE(rsp->gpnum),
  1782. TPS("newreq"));
  1783. }
  1784. raw_spin_unlock_irq(&rnp->lock);
  1785. }
  1786. /*
  1787. * Body of kthread that handles grace periods.
  1788. */
  1789. static int __noreturn rcu_gp_kthread(void *arg)
  1790. {
  1791. int fqs_state;
  1792. int gf;
  1793. unsigned long j;
  1794. int ret;
  1795. struct rcu_state *rsp = arg;
  1796. struct rcu_node *rnp = rcu_get_root(rsp);
  1797. rcu_bind_gp_kthread();
  1798. for (;;) {
  1799. /* Handle grace-period start. */
  1800. for (;;) {
  1801. trace_rcu_grace_period(rsp->name,
  1802. READ_ONCE(rsp->gpnum),
  1803. TPS("reqwait"));
  1804. rsp->gp_state = RCU_GP_WAIT_GPS;
  1805. wait_event_interruptible(rsp->gp_wq,
  1806. READ_ONCE(rsp->gp_flags) &
  1807. RCU_GP_FLAG_INIT);
  1808. /* Locking provides needed memory barrier. */
  1809. if (rcu_gp_init(rsp))
  1810. break;
  1811. cond_resched_rcu_qs();
  1812. WRITE_ONCE(rsp->gp_activity, jiffies);
  1813. WARN_ON(signal_pending(current));
  1814. trace_rcu_grace_period(rsp->name,
  1815. READ_ONCE(rsp->gpnum),
  1816. TPS("reqwaitsig"));
  1817. }
  1818. /* Handle quiescent-state forcing. */
  1819. fqs_state = RCU_SAVE_DYNTICK;
  1820. j = jiffies_till_first_fqs;
  1821. if (j > HZ) {
  1822. j = HZ;
  1823. jiffies_till_first_fqs = HZ;
  1824. }
  1825. ret = 0;
  1826. for (;;) {
  1827. if (!ret)
  1828. rsp->jiffies_force_qs = jiffies + j;
  1829. trace_rcu_grace_period(rsp->name,
  1830. READ_ONCE(rsp->gpnum),
  1831. TPS("fqswait"));
  1832. rsp->gp_state = RCU_GP_WAIT_FQS;
  1833. ret = wait_event_interruptible_timeout(rsp->gp_wq,
  1834. ((gf = READ_ONCE(rsp->gp_flags)) &
  1835. RCU_GP_FLAG_FQS) ||
  1836. (!READ_ONCE(rnp->qsmask) &&
  1837. !rcu_preempt_blocked_readers_cgp(rnp)),
  1838. j);
  1839. /* Locking provides needed memory barriers. */
  1840. /* If grace period done, leave loop. */
  1841. if (!READ_ONCE(rnp->qsmask) &&
  1842. !rcu_preempt_blocked_readers_cgp(rnp))
  1843. break;
  1844. /* If time for quiescent-state forcing, do it. */
  1845. if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
  1846. (gf & RCU_GP_FLAG_FQS)) {
  1847. trace_rcu_grace_period(rsp->name,
  1848. READ_ONCE(rsp->gpnum),
  1849. TPS("fqsstart"));
  1850. fqs_state = rcu_gp_fqs(rsp, fqs_state);
  1851. trace_rcu_grace_period(rsp->name,
  1852. READ_ONCE(rsp->gpnum),
  1853. TPS("fqsend"));
  1854. cond_resched_rcu_qs();
  1855. WRITE_ONCE(rsp->gp_activity, jiffies);
  1856. } else {
  1857. /* Deal with stray signal. */
  1858. cond_resched_rcu_qs();
  1859. WRITE_ONCE(rsp->gp_activity, jiffies);
  1860. WARN_ON(signal_pending(current));
  1861. trace_rcu_grace_period(rsp->name,
  1862. READ_ONCE(rsp->gpnum),
  1863. TPS("fqswaitsig"));
  1864. }
  1865. j = jiffies_till_next_fqs;
  1866. if (j > HZ) {
  1867. j = HZ;
  1868. jiffies_till_next_fqs = HZ;
  1869. } else if (j < 1) {
  1870. j = 1;
  1871. jiffies_till_next_fqs = 1;
  1872. }
  1873. }
  1874. /* Handle grace-period end. */
  1875. rcu_gp_cleanup(rsp);
  1876. }
  1877. }
  1878. /*
  1879. * Start a new RCU grace period if warranted, re-initializing the hierarchy
  1880. * in preparation for detecting the next grace period. The caller must hold
  1881. * the root node's ->lock and hard irqs must be disabled.
  1882. *
  1883. * Note that it is legal for a dying CPU (which is marked as offline) to
  1884. * invoke this function. This can happen when the dying CPU reports its
  1885. * quiescent state.
  1886. *
  1887. * Returns true if the grace-period kthread must be awakened.
  1888. */
  1889. static bool
  1890. rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
  1891. struct rcu_data *rdp)
  1892. {
  1893. if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
  1894. /*
  1895. * Either we have not yet spawned the grace-period
  1896. * task, this CPU does not need another grace period,
  1897. * or a grace period is already in progress.
  1898. * Either way, don't start a new grace period.
  1899. */
  1900. return false;
  1901. }
  1902. WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
  1903. trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
  1904. TPS("newreq"));
  1905. /*
  1906. * We can't do wakeups while holding the rnp->lock, as that
  1907. * could cause possible deadlocks with the rq->lock. Defer
  1908. * the wakeup to our caller.
  1909. */
  1910. return true;
  1911. }
  1912. /*
  1913. * Similar to rcu_start_gp_advanced(), but also advance the calling CPU's
  1914. * callbacks. Note that rcu_start_gp_advanced() cannot do this because it
  1915. * is invoked indirectly from rcu_advance_cbs(), which would result in
  1916. * endless recursion -- or would do so if it wasn't for the self-deadlock
  1917. * that is encountered beforehand.
  1918. *
  1919. * Returns true if the grace-period kthread needs to be awakened.
  1920. */
  1921. static bool rcu_start_gp(struct rcu_state *rsp)
  1922. {
  1923. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  1924. struct rcu_node *rnp = rcu_get_root(rsp);
  1925. bool ret = false;
  1926. /*
  1927. * If there is no grace period in progress right now, any
  1928. * callbacks we have up to this point will be satisfied by the
  1929. * next grace period. Also, advancing the callbacks reduces the
  1930. * probability of false positives from cpu_needs_another_gp()
  1931. * resulting in pointless grace periods. So, advance callbacks
  1932. * then start the grace period!
  1933. */
  1934. ret = rcu_advance_cbs(rsp, rnp, rdp) || ret;
  1935. ret = rcu_start_gp_advanced(rsp, rnp, rdp) || ret;
  1936. return ret;
  1937. }
  1938. /*
  1939. * Report a full set of quiescent states to the specified rcu_state
  1940. * data structure. This involves cleaning up after the prior grace
  1941. * period and letting rcu_start_gp() start up the next grace period
  1942. * if one is needed. Note that the caller must hold rnp->lock, which
  1943. * is released before return.
  1944. */
  1945. static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  1946. __releases(rcu_get_root(rsp)->lock)
  1947. {
  1948. WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
  1949. raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
  1950. rcu_gp_kthread_wake(rsp);
  1951. }
  1952. /*
  1953. * Similar to rcu_report_qs_rdp(), for which it is a helper function.
  1954. * Allows quiescent states for a group of CPUs to be reported at one go
  1955. * to the specified rcu_node structure, though all the CPUs in the group
  1956. * must be represented by the same rcu_node structure (which need not be a
  1957. * leaf rcu_node structure, though it often will be). The gps parameter
  1958. * is the grace-period snapshot, which means that the quiescent states
  1959. * are valid only if rnp->gpnum is equal to gps. That structure's lock
  1960. * must be held upon entry, and it is released before return.
  1961. */
  1962. static void
  1963. rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  1964. struct rcu_node *rnp, unsigned long gps, unsigned long flags)
  1965. __releases(rnp->lock)
  1966. {
  1967. unsigned long oldmask = 0;
  1968. struct rcu_node *rnp_c;
  1969. /* Walk up the rcu_node hierarchy. */
  1970. for (;;) {
  1971. if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
  1972. /*
  1973. * Our bit has already been cleared, or the
  1974. * relevant grace period is already over, so done.
  1975. */
  1976. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1977. return;
  1978. }
  1979. WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
  1980. rnp->qsmask &= ~mask;
  1981. trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  1982. mask, rnp->qsmask, rnp->level,
  1983. rnp->grplo, rnp->grphi,
  1984. !!rnp->gp_tasks);
  1985. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  1986. /* Other bits still set at this level, so done. */
  1987. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1988. return;
  1989. }
  1990. mask = rnp->grpmask;
  1991. if (rnp->parent == NULL) {
  1992. /* No more levels. Exit loop holding root lock. */
  1993. break;
  1994. }
  1995. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  1996. rnp_c = rnp;
  1997. rnp = rnp->parent;
  1998. raw_spin_lock_irqsave(&rnp->lock, flags);
  1999. smp_mb__after_unlock_lock();
  2000. oldmask = rnp_c->qsmask;
  2001. }
  2002. /*
  2003. * Get here if we are the last CPU to pass through a quiescent
  2004. * state for this grace period. Invoke rcu_report_qs_rsp()
  2005. * to clean up and start the next grace period if one is needed.
  2006. */
  2007. rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
  2008. }
  2009. /*
  2010. * Record a quiescent state for all tasks that were previously queued
  2011. * on the specified rcu_node structure and that were blocking the current
  2012. * RCU grace period. The caller must hold the specified rnp->lock with
  2013. * irqs disabled, and this lock is released upon return, but irqs remain
  2014. * disabled.
  2015. */
  2016. static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
  2017. struct rcu_node *rnp, unsigned long flags)
  2018. __releases(rnp->lock)
  2019. {
  2020. unsigned long gps;
  2021. unsigned long mask;
  2022. struct rcu_node *rnp_p;
  2023. if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
  2024. rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  2025. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  2026. return; /* Still need more quiescent states! */
  2027. }
  2028. rnp_p = rnp->parent;
  2029. if (rnp_p == NULL) {
  2030. /*
  2031. * Only one rcu_node structure in the tree, so don't
  2032. * try to report up to its nonexistent parent!
  2033. */
  2034. rcu_report_qs_rsp(rsp, flags);
  2035. return;
  2036. }
  2037. /* Report up the rest of the hierarchy, tracking current ->gpnum. */
  2038. gps = rnp->gpnum;
  2039. mask = rnp->grpmask;
  2040. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  2041. raw_spin_lock(&rnp_p->lock); /* irqs already disabled. */
  2042. smp_mb__after_unlock_lock();
  2043. rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
  2044. }
  2045. /*
  2046. * Record a quiescent state for the specified CPU to that CPU's rcu_data
  2047. * structure. This must be either called from the specified CPU, or
  2048. * called when the specified CPU is known to be offline (and when it is
  2049. * also known that no other CPU is concurrently trying to help the offline
  2050. * CPU). The lastcomp argument is used to make sure we are still in the
  2051. * grace period of interest. We don't want to end the current grace period
  2052. * based on quiescent states detected in an earlier grace period!
  2053. */
  2054. static void
  2055. rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
  2056. {
  2057. unsigned long flags;
  2058. unsigned long mask;
  2059. bool needwake;
  2060. struct rcu_node *rnp;
  2061. rnp = rdp->mynode;
  2062. raw_spin_lock_irqsave(&rnp->lock, flags);
  2063. smp_mb__after_unlock_lock();
  2064. if ((rdp->passed_quiesce == 0 &&
  2065. rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) ||
  2066. rdp->gpnum != rnp->gpnum || rnp->completed == rnp->gpnum ||
  2067. rdp->gpwrap) {
  2068. /*
  2069. * The grace period in which this quiescent state was
  2070. * recorded has ended, so don't report it upwards.
  2071. * We will instead need a new quiescent state that lies
  2072. * within the current grace period.
  2073. */
  2074. rdp->passed_quiesce = 0; /* need qs for new gp. */
  2075. rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
  2076. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  2077. return;
  2078. }
  2079. mask = rdp->grpmask;
  2080. if ((rnp->qsmask & mask) == 0) {
  2081. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  2082. } else {
  2083. rdp->qs_pending = 0;
  2084. /*
  2085. * This GP can't end until cpu checks in, so all of our
  2086. * callbacks can be processed during the next GP.
  2087. */
  2088. needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
  2089. rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
  2090. /* ^^^ Released rnp->lock */
  2091. if (needwake)
  2092. rcu_gp_kthread_wake(rsp);
  2093. }
  2094. }
  2095. /*
  2096. * Check to see if there is a new grace period of which this CPU
  2097. * is not yet aware, and if so, set up local rcu_data state for it.
  2098. * Otherwise, see if this CPU has just passed through its first
  2099. * quiescent state for this grace period, and record that fact if so.
  2100. */
  2101. static void
  2102. rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  2103. {
  2104. /* Check for grace-period ends and beginnings. */
  2105. note_gp_changes(rsp, rdp);
  2106. /*
  2107. * Does this CPU still need to do its part for current grace period?
  2108. * If no, return and let the other CPUs do their part as well.
  2109. */
  2110. if (!rdp->qs_pending)
  2111. return;
  2112. /*
  2113. * Was there a quiescent state since the beginning of the grace
  2114. * period? If no, then exit and wait for the next call.
  2115. */
  2116. if (!rdp->passed_quiesce &&
  2117. rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr))
  2118. return;
  2119. /*
  2120. * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  2121. * judge of that).
  2122. */
  2123. rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
  2124. }
  2125. /*
  2126. * Send the specified CPU's RCU callbacks to the orphanage. The
  2127. * specified CPU must be offline, and the caller must hold the
  2128. * ->orphan_lock.
  2129. */
  2130. static void
  2131. rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
  2132. struct rcu_node *rnp, struct rcu_data *rdp)
  2133. {
  2134. /* No-CBs CPUs do not have orphanable callbacks. */
  2135. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
  2136. return;
  2137. /*
  2138. * Orphan the callbacks. First adjust the counts. This is safe
  2139. * because _rcu_barrier() excludes CPU-hotplug operations, so it
  2140. * cannot be running now. Thus no memory barrier is required.
  2141. */
  2142. if (rdp->nxtlist != NULL) {
  2143. rsp->qlen_lazy += rdp->qlen_lazy;
  2144. rsp->qlen += rdp->qlen;
  2145. rdp->n_cbs_orphaned += rdp->qlen;
  2146. rdp->qlen_lazy = 0;
  2147. WRITE_ONCE(rdp->qlen, 0);
  2148. }
  2149. /*
  2150. * Next, move those callbacks still needing a grace period to
  2151. * the orphanage, where some other CPU will pick them up.
  2152. * Some of the callbacks might have gone partway through a grace
  2153. * period, but that is too bad. They get to start over because we
  2154. * cannot assume that grace periods are synchronized across CPUs.
  2155. * We don't bother updating the ->nxttail[] array yet, instead
  2156. * we just reset the whole thing later on.
  2157. */
  2158. if (*rdp->nxttail[RCU_DONE_TAIL] != NULL) {
  2159. *rsp->orphan_nxttail = *rdp->nxttail[RCU_DONE_TAIL];
  2160. rsp->orphan_nxttail = rdp->nxttail[RCU_NEXT_TAIL];
  2161. *rdp->nxttail[RCU_DONE_TAIL] = NULL;
  2162. }
  2163. /*
  2164. * Then move the ready-to-invoke callbacks to the orphanage,
  2165. * where some other CPU will pick them up. These will not be
  2166. * required to pass though another grace period: They are done.
  2167. */
  2168. if (rdp->nxtlist != NULL) {
  2169. *rsp->orphan_donetail = rdp->nxtlist;
  2170. rsp->orphan_donetail = rdp->nxttail[RCU_DONE_TAIL];
  2171. }
  2172. /*
  2173. * Finally, initialize the rcu_data structure's list to empty and
  2174. * disallow further callbacks on this CPU.
  2175. */
  2176. init_callback_list(rdp);
  2177. rdp->nxttail[RCU_NEXT_TAIL] = NULL;
  2178. }
  2179. /*
  2180. * Adopt the RCU callbacks from the specified rcu_state structure's
  2181. * orphanage. The caller must hold the ->orphan_lock.
  2182. */
  2183. static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
  2184. {
  2185. int i;
  2186. struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
  2187. /* No-CBs CPUs are handled specially. */
  2188. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
  2189. rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
  2190. return;
  2191. /* Do the accounting first. */
  2192. rdp->qlen_lazy += rsp->qlen_lazy;
  2193. rdp->qlen += rsp->qlen;
  2194. rdp->n_cbs_adopted += rsp->qlen;
  2195. if (rsp->qlen_lazy != rsp->qlen)
  2196. rcu_idle_count_callbacks_posted();
  2197. rsp->qlen_lazy = 0;
  2198. rsp->qlen = 0;
  2199. /*
  2200. * We do not need a memory barrier here because the only way we
  2201. * can get here if there is an rcu_barrier() in flight is if
  2202. * we are the task doing the rcu_barrier().
  2203. */
  2204. /* First adopt the ready-to-invoke callbacks. */
  2205. if (rsp->orphan_donelist != NULL) {
  2206. *rsp->orphan_donetail = *rdp->nxttail[RCU_DONE_TAIL];
  2207. *rdp->nxttail[RCU_DONE_TAIL] = rsp->orphan_donelist;
  2208. for (i = RCU_NEXT_SIZE - 1; i >= RCU_DONE_TAIL; i--)
  2209. if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
  2210. rdp->nxttail[i] = rsp->orphan_donetail;
  2211. rsp->orphan_donelist = NULL;
  2212. rsp->orphan_donetail = &rsp->orphan_donelist;
  2213. }
  2214. /* And then adopt the callbacks that still need a grace period. */
  2215. if (rsp->orphan_nxtlist != NULL) {
  2216. *rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxtlist;
  2217. rdp->nxttail[RCU_NEXT_TAIL] = rsp->orphan_nxttail;
  2218. rsp->orphan_nxtlist = NULL;
  2219. rsp->orphan_nxttail = &rsp->orphan_nxtlist;
  2220. }
  2221. }
  2222. /*
  2223. * Trace the fact that this CPU is going offline.
  2224. */
  2225. static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  2226. {
  2227. RCU_TRACE(unsigned long mask);
  2228. RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
  2229. RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
  2230. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  2231. return;
  2232. RCU_TRACE(mask = rdp->grpmask);
  2233. trace_rcu_grace_period(rsp->name,
  2234. rnp->gpnum + 1 - !!(rnp->qsmask & mask),
  2235. TPS("cpuofl"));
  2236. }
  2237. /*
  2238. * All CPUs for the specified rcu_node structure have gone offline,
  2239. * and all tasks that were preempted within an RCU read-side critical
  2240. * section while running on one of those CPUs have since exited their RCU
  2241. * read-side critical section. Some other CPU is reporting this fact with
  2242. * the specified rcu_node structure's ->lock held and interrupts disabled.
  2243. * This function therefore goes up the tree of rcu_node structures,
  2244. * clearing the corresponding bits in the ->qsmaskinit fields. Note that
  2245. * the leaf rcu_node structure's ->qsmaskinit field has already been
  2246. * updated
  2247. *
  2248. * This function does check that the specified rcu_node structure has
  2249. * all CPUs offline and no blocked tasks, so it is OK to invoke it
  2250. * prematurely. That said, invoking it after the fact will cost you
  2251. * a needless lock acquisition. So once it has done its work, don't
  2252. * invoke it again.
  2253. */
  2254. static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
  2255. {
  2256. long mask;
  2257. struct rcu_node *rnp = rnp_leaf;
  2258. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
  2259. rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
  2260. return;
  2261. for (;;) {
  2262. mask = rnp->grpmask;
  2263. rnp = rnp->parent;
  2264. if (!rnp)
  2265. break;
  2266. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  2267. smp_mb__after_unlock_lock(); /* GP memory ordering. */
  2268. rnp->qsmaskinit &= ~mask;
  2269. rnp->qsmask &= ~mask;
  2270. if (rnp->qsmaskinit) {
  2271. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  2272. return;
  2273. }
  2274. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  2275. }
  2276. }
  2277. /*
  2278. * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
  2279. * function. We now remove it from the rcu_node tree's ->qsmaskinit
  2280. * bit masks.
  2281. */
  2282. static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
  2283. {
  2284. unsigned long flags;
  2285. unsigned long mask;
  2286. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  2287. struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
  2288. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  2289. return;
  2290. /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
  2291. mask = rdp->grpmask;
  2292. raw_spin_lock_irqsave(&rnp->lock, flags);
  2293. smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
  2294. rnp->qsmaskinitnext &= ~mask;
  2295. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  2296. }
  2297. /*
  2298. * The CPU has been completely removed, and some other CPU is reporting
  2299. * this fact from process context. Do the remainder of the cleanup,
  2300. * including orphaning the outgoing CPU's RCU callbacks, and also
  2301. * adopting them. There can only be one CPU hotplug operation at a time,
  2302. * so no other CPU can be attempting to update rcu_cpu_kthread_task.
  2303. */
  2304. static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
  2305. {
  2306. unsigned long flags;
  2307. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  2308. struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
  2309. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  2310. return;
  2311. /* Adjust any no-longer-needed kthreads. */
  2312. rcu_boost_kthread_setaffinity(rnp, -1);
  2313. /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
  2314. raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
  2315. rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
  2316. rcu_adopt_orphan_cbs(rsp, flags);
  2317. raw_spin_unlock_irqrestore(&rsp->orphan_lock, flags);
  2318. WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL,
  2319. "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n",
  2320. cpu, rdp->qlen, rdp->nxtlist);
  2321. }
  2322. /*
  2323. * Invoke any RCU callbacks that have made it to the end of their grace
  2324. * period. Thottle as specified by rdp->blimit.
  2325. */
  2326. static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  2327. {
  2328. unsigned long flags;
  2329. struct rcu_head *next, *list, **tail;
  2330. long bl, count, count_lazy;
  2331. int i;
  2332. /* If no callbacks are ready, just return. */
  2333. if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
  2334. trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
  2335. trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
  2336. need_resched(), is_idle_task(current),
  2337. rcu_is_callbacks_kthread());
  2338. return;
  2339. }
  2340. /*
  2341. * Extract the list of ready callbacks, disabling to prevent
  2342. * races with call_rcu() from interrupt handlers.
  2343. */
  2344. local_irq_save(flags);
  2345. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  2346. bl = rdp->blimit;
  2347. trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, bl);
  2348. list = rdp->nxtlist;
  2349. rdp->nxtlist = *rdp->nxttail[RCU_DONE_TAIL];
  2350. *rdp->nxttail[RCU_DONE_TAIL] = NULL;
  2351. tail = rdp->nxttail[RCU_DONE_TAIL];
  2352. for (i = RCU_NEXT_SIZE - 1; i >= 0; i--)
  2353. if (rdp->nxttail[i] == rdp->nxttail[RCU_DONE_TAIL])
  2354. rdp->nxttail[i] = &rdp->nxtlist;
  2355. local_irq_restore(flags);
  2356. /* Invoke callbacks. */
  2357. count = count_lazy = 0;
  2358. while (list) {
  2359. next = list->next;
  2360. prefetch(next);
  2361. debug_rcu_head_unqueue(list);
  2362. if (__rcu_reclaim(rsp->name, list))
  2363. count_lazy++;
  2364. list = next;
  2365. /* Stop only if limit reached and CPU has something to do. */
  2366. if (++count >= bl &&
  2367. (need_resched() ||
  2368. (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
  2369. break;
  2370. }
  2371. local_irq_save(flags);
  2372. trace_rcu_batch_end(rsp->name, count, !!list, need_resched(),
  2373. is_idle_task(current),
  2374. rcu_is_callbacks_kthread());
  2375. /* Update count, and requeue any remaining callbacks. */
  2376. if (list != NULL) {
  2377. *tail = rdp->nxtlist;
  2378. rdp->nxtlist = list;
  2379. for (i = 0; i < RCU_NEXT_SIZE; i++)
  2380. if (&rdp->nxtlist == rdp->nxttail[i])
  2381. rdp->nxttail[i] = tail;
  2382. else
  2383. break;
  2384. }
  2385. smp_mb(); /* List handling before counting for rcu_barrier(). */
  2386. rdp->qlen_lazy -= count_lazy;
  2387. WRITE_ONCE(rdp->qlen, rdp->qlen - count);
  2388. rdp->n_cbs_invoked += count;
  2389. /* Reinstate batch limit if we have worked down the excess. */
  2390. if (rdp->blimit == LONG_MAX && rdp->qlen <= qlowmark)
  2391. rdp->blimit = blimit;
  2392. /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  2393. if (rdp->qlen == 0 && rdp->qlen_last_fqs_check != 0) {
  2394. rdp->qlen_last_fqs_check = 0;
  2395. rdp->n_force_qs_snap = rsp->n_force_qs;
  2396. } else if (rdp->qlen < rdp->qlen_last_fqs_check - qhimark)
  2397. rdp->qlen_last_fqs_check = rdp->qlen;
  2398. WARN_ON_ONCE((rdp->nxtlist == NULL) != (rdp->qlen == 0));
  2399. local_irq_restore(flags);
  2400. /* Re-invoke RCU core processing if there are callbacks remaining. */
  2401. if (cpu_has_callbacks_ready_to_invoke(rdp))
  2402. invoke_rcu_core();
  2403. }
  2404. /*
  2405. * Check to see if this CPU is in a non-context-switch quiescent state
  2406. * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
  2407. * Also schedule RCU core processing.
  2408. *
  2409. * This function must be called from hardirq context. It is normally
  2410. * invoked from the scheduling-clock interrupt. If rcu_pending returns
  2411. * false, there is no point in invoking rcu_check_callbacks().
  2412. */
  2413. void rcu_check_callbacks(int user)
  2414. {
  2415. trace_rcu_utilization(TPS("Start scheduler-tick"));
  2416. increment_cpu_stall_ticks();
  2417. if (user || rcu_is_cpu_rrupt_from_idle()) {
  2418. /*
  2419. * Get here if this CPU took its interrupt from user
  2420. * mode or from the idle loop, and if this is not a
  2421. * nested interrupt. In this case, the CPU is in
  2422. * a quiescent state, so note it.
  2423. *
  2424. * No memory barrier is required here because both
  2425. * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  2426. * variables that other CPUs neither access nor modify,
  2427. * at least not while the corresponding CPU is online.
  2428. */
  2429. rcu_sched_qs();
  2430. rcu_bh_qs();
  2431. } else if (!in_softirq()) {
  2432. /*
  2433. * Get here if this CPU did not take its interrupt from
  2434. * softirq, in other words, if it is not interrupting
  2435. * a rcu_bh read-side critical section. This is an _bh
  2436. * critical section, so note it.
  2437. */
  2438. rcu_bh_qs();
  2439. }
  2440. rcu_preempt_check_callbacks();
  2441. if (rcu_pending())
  2442. invoke_rcu_core();
  2443. if (user)
  2444. rcu_note_voluntary_context_switch(current);
  2445. trace_rcu_utilization(TPS("End scheduler-tick"));
  2446. }
  2447. /*
  2448. * Scan the leaf rcu_node structures, processing dyntick state for any that
  2449. * have not yet encountered a quiescent state, using the function specified.
  2450. * Also initiate boosting for any threads blocked on the root rcu_node.
  2451. *
  2452. * The caller must have suppressed start of new grace periods.
  2453. */
  2454. static void force_qs_rnp(struct rcu_state *rsp,
  2455. int (*f)(struct rcu_data *rsp, bool *isidle,
  2456. unsigned long *maxj),
  2457. bool *isidle, unsigned long *maxj)
  2458. {
  2459. unsigned long bit;
  2460. int cpu;
  2461. unsigned long flags;
  2462. unsigned long mask;
  2463. struct rcu_node *rnp;
  2464. rcu_for_each_leaf_node(rsp, rnp) {
  2465. cond_resched_rcu_qs();
  2466. mask = 0;
  2467. raw_spin_lock_irqsave(&rnp->lock, flags);
  2468. smp_mb__after_unlock_lock();
  2469. if (rnp->qsmask == 0) {
  2470. if (rcu_state_p == &rcu_sched_state ||
  2471. rsp != rcu_state_p ||
  2472. rcu_preempt_blocked_readers_cgp(rnp)) {
  2473. /*
  2474. * No point in scanning bits because they
  2475. * are all zero. But we might need to
  2476. * priority-boost blocked readers.
  2477. */
  2478. rcu_initiate_boost(rnp, flags);
  2479. /* rcu_initiate_boost() releases rnp->lock */
  2480. continue;
  2481. }
  2482. if (rnp->parent &&
  2483. (rnp->parent->qsmask & rnp->grpmask)) {
  2484. /*
  2485. * Race between grace-period
  2486. * initialization and task exiting RCU
  2487. * read-side critical section: Report.
  2488. */
  2489. rcu_report_unblock_qs_rnp(rsp, rnp, flags);
  2490. /* rcu_report_unblock_qs_rnp() rlses ->lock */
  2491. continue;
  2492. }
  2493. }
  2494. cpu = rnp->grplo;
  2495. bit = 1;
  2496. for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
  2497. if ((rnp->qsmask & bit) != 0) {
  2498. if ((rnp->qsmaskinit & bit) == 0)
  2499. *isidle = false; /* Pending hotplug. */
  2500. if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
  2501. mask |= bit;
  2502. }
  2503. }
  2504. if (mask != 0) {
  2505. /* Idle/offline CPUs, report (releases rnp->lock. */
  2506. rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
  2507. } else {
  2508. /* Nothing to do here, so just drop the lock. */
  2509. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  2510. }
  2511. }
  2512. }
  2513. /*
  2514. * Force quiescent states on reluctant CPUs, and also detect which
  2515. * CPUs are in dyntick-idle mode.
  2516. */
  2517. static void force_quiescent_state(struct rcu_state *rsp)
  2518. {
  2519. unsigned long flags;
  2520. bool ret;
  2521. struct rcu_node *rnp;
  2522. struct rcu_node *rnp_old = NULL;
  2523. /* Funnel through hierarchy to reduce memory contention. */
  2524. rnp = __this_cpu_read(rsp->rda->mynode);
  2525. for (; rnp != NULL; rnp = rnp->parent) {
  2526. ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
  2527. !raw_spin_trylock(&rnp->fqslock);
  2528. if (rnp_old != NULL)
  2529. raw_spin_unlock(&rnp_old->fqslock);
  2530. if (ret) {
  2531. rsp->n_force_qs_lh++;
  2532. return;
  2533. }
  2534. rnp_old = rnp;
  2535. }
  2536. /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
  2537. /* Reached the root of the rcu_node tree, acquire lock. */
  2538. raw_spin_lock_irqsave(&rnp_old->lock, flags);
  2539. smp_mb__after_unlock_lock();
  2540. raw_spin_unlock(&rnp_old->fqslock);
  2541. if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
  2542. rsp->n_force_qs_lh++;
  2543. raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
  2544. return; /* Someone beat us to it. */
  2545. }
  2546. WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
  2547. raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
  2548. rcu_gp_kthread_wake(rsp);
  2549. }
  2550. /*
  2551. * This does the RCU core processing work for the specified rcu_state
  2552. * and rcu_data structures. This may be called only from the CPU to
  2553. * whom the rdp belongs.
  2554. */
  2555. static void
  2556. __rcu_process_callbacks(struct rcu_state *rsp)
  2557. {
  2558. unsigned long flags;
  2559. bool needwake;
  2560. struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
  2561. WARN_ON_ONCE(rdp->beenonline == 0);
  2562. /* Update RCU state based on any recent quiescent states. */
  2563. rcu_check_quiescent_state(rsp, rdp);
  2564. /* Does this CPU require a not-yet-started grace period? */
  2565. local_irq_save(flags);
  2566. if (cpu_needs_another_gp(rsp, rdp)) {
  2567. raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
  2568. needwake = rcu_start_gp(rsp);
  2569. raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
  2570. if (needwake)
  2571. rcu_gp_kthread_wake(rsp);
  2572. } else {
  2573. local_irq_restore(flags);
  2574. }
  2575. /* If there are callbacks ready, invoke them. */
  2576. if (cpu_has_callbacks_ready_to_invoke(rdp))
  2577. invoke_rcu_callbacks(rsp, rdp);
  2578. /* Do any needed deferred wakeups of rcuo kthreads. */
  2579. do_nocb_deferred_wakeup(rdp);
  2580. }
  2581. /*
  2582. * Do RCU core processing for the current CPU.
  2583. */
  2584. static void rcu_process_callbacks(struct softirq_action *unused)
  2585. {
  2586. struct rcu_state *rsp;
  2587. if (cpu_is_offline(smp_processor_id()))
  2588. return;
  2589. trace_rcu_utilization(TPS("Start RCU core"));
  2590. for_each_rcu_flavor(rsp)
  2591. __rcu_process_callbacks(rsp);
  2592. trace_rcu_utilization(TPS("End RCU core"));
  2593. }
  2594. /*
  2595. * Schedule RCU callback invocation. If the specified type of RCU
  2596. * does not support RCU priority boosting, just do a direct call,
  2597. * otherwise wake up the per-CPU kernel kthread. Note that because we
  2598. * are running on the current CPU with softirqs disabled, the
  2599. * rcu_cpu_kthread_task cannot disappear out from under us.
  2600. */
  2601. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  2602. {
  2603. if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
  2604. return;
  2605. if (likely(!rsp->boost)) {
  2606. rcu_do_batch(rsp, rdp);
  2607. return;
  2608. }
  2609. invoke_rcu_callbacks_kthread();
  2610. }
  2611. static void invoke_rcu_core(void)
  2612. {
  2613. if (cpu_online(smp_processor_id()))
  2614. raise_softirq(RCU_SOFTIRQ);
  2615. }
  2616. /*
  2617. * Handle any core-RCU processing required by a call_rcu() invocation.
  2618. */
  2619. static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
  2620. struct rcu_head *head, unsigned long flags)
  2621. {
  2622. bool needwake;
  2623. /*
  2624. * If called from an extended quiescent state, invoke the RCU
  2625. * core in order to force a re-evaluation of RCU's idleness.
  2626. */
  2627. if (!rcu_is_watching())
  2628. invoke_rcu_core();
  2629. /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
  2630. if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
  2631. return;
  2632. /*
  2633. * Force the grace period if too many callbacks or too long waiting.
  2634. * Enforce hysteresis, and don't invoke force_quiescent_state()
  2635. * if some other CPU has recently done so. Also, don't bother
  2636. * invoking force_quiescent_state() if the newly enqueued callback
  2637. * is the only one waiting for a grace period to complete.
  2638. */
  2639. if (unlikely(rdp->qlen > rdp->qlen_last_fqs_check + qhimark)) {
  2640. /* Are we ignoring a completed grace period? */
  2641. note_gp_changes(rsp, rdp);
  2642. /* Start a new grace period if one not already started. */
  2643. if (!rcu_gp_in_progress(rsp)) {
  2644. struct rcu_node *rnp_root = rcu_get_root(rsp);
  2645. raw_spin_lock(&rnp_root->lock);
  2646. smp_mb__after_unlock_lock();
  2647. needwake = rcu_start_gp(rsp);
  2648. raw_spin_unlock(&rnp_root->lock);
  2649. if (needwake)
  2650. rcu_gp_kthread_wake(rsp);
  2651. } else {
  2652. /* Give the grace period a kick. */
  2653. rdp->blimit = LONG_MAX;
  2654. if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  2655. *rdp->nxttail[RCU_DONE_TAIL] != head)
  2656. force_quiescent_state(rsp);
  2657. rdp->n_force_qs_snap = rsp->n_force_qs;
  2658. rdp->qlen_last_fqs_check = rdp->qlen;
  2659. }
  2660. }
  2661. }
  2662. /*
  2663. * RCU callback function to leak a callback.
  2664. */
  2665. static void rcu_leak_callback(struct rcu_head *rhp)
  2666. {
  2667. }
  2668. /*
  2669. * Helper function for call_rcu() and friends. The cpu argument will
  2670. * normally be -1, indicating "currently running CPU". It may specify
  2671. * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
  2672. * is expected to specify a CPU.
  2673. */
  2674. static void
  2675. __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
  2676. struct rcu_state *rsp, int cpu, bool lazy)
  2677. {
  2678. unsigned long flags;
  2679. struct rcu_data *rdp;
  2680. WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
  2681. if (debug_rcu_head_queue(head)) {
  2682. /* Probable double call_rcu(), so leak the callback. */
  2683. WRITE_ONCE(head->func, rcu_leak_callback);
  2684. WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
  2685. return;
  2686. }
  2687. head->func = func;
  2688. head->next = NULL;
  2689. /*
  2690. * Opportunistically note grace-period endings and beginnings.
  2691. * Note that we might see a beginning right after we see an
  2692. * end, but never vice versa, since this CPU has to pass through
  2693. * a quiescent state betweentimes.
  2694. */
  2695. local_irq_save(flags);
  2696. rdp = this_cpu_ptr(rsp->rda);
  2697. /* Add the callback to our list. */
  2698. if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL) || cpu != -1) {
  2699. int offline;
  2700. if (cpu != -1)
  2701. rdp = per_cpu_ptr(rsp->rda, cpu);
  2702. if (likely(rdp->mynode)) {
  2703. /* Post-boot, so this should be for a no-CBs CPU. */
  2704. offline = !__call_rcu_nocb(rdp, head, lazy, flags);
  2705. WARN_ON_ONCE(offline);
  2706. /* Offline CPU, _call_rcu() illegal, leak callback. */
  2707. local_irq_restore(flags);
  2708. return;
  2709. }
  2710. /*
  2711. * Very early boot, before rcu_init(). Initialize if needed
  2712. * and then drop through to queue the callback.
  2713. */
  2714. BUG_ON(cpu != -1);
  2715. WARN_ON_ONCE(!rcu_is_watching());
  2716. if (!likely(rdp->nxtlist))
  2717. init_default_callback_list(rdp);
  2718. }
  2719. WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
  2720. if (lazy)
  2721. rdp->qlen_lazy++;
  2722. else
  2723. rcu_idle_count_callbacks_posted();
  2724. smp_mb(); /* Count before adding callback for rcu_barrier(). */
  2725. *rdp->nxttail[RCU_NEXT_TAIL] = head;
  2726. rdp->nxttail[RCU_NEXT_TAIL] = &head->next;
  2727. if (__is_kfree_rcu_offset((unsigned long)func))
  2728. trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
  2729. rdp->qlen_lazy, rdp->qlen);
  2730. else
  2731. trace_rcu_callback(rsp->name, head, rdp->qlen_lazy, rdp->qlen);
  2732. /* Go handle any RCU core processing required. */
  2733. __call_rcu_core(rsp, rdp, head, flags);
  2734. local_irq_restore(flags);
  2735. }
  2736. /*
  2737. * Queue an RCU-sched callback for invocation after a grace period.
  2738. */
  2739. void call_rcu_sched(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  2740. {
  2741. __call_rcu(head, func, &rcu_sched_state, -1, 0);
  2742. }
  2743. EXPORT_SYMBOL_GPL(call_rcu_sched);
  2744. /*
  2745. * Queue an RCU callback for invocation after a quicker grace period.
  2746. */
  2747. void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
  2748. {
  2749. __call_rcu(head, func, &rcu_bh_state, -1, 0);
  2750. }
  2751. EXPORT_SYMBOL_GPL(call_rcu_bh);
  2752. /*
  2753. * Queue an RCU callback for lazy invocation after a grace period.
  2754. * This will likely be later named something like "call_rcu_lazy()",
  2755. * but this change will require some way of tagging the lazy RCU
  2756. * callbacks in the list of pending callbacks. Until then, this
  2757. * function may only be called from __kfree_rcu().
  2758. */
  2759. void kfree_call_rcu(struct rcu_head *head,
  2760. void (*func)(struct rcu_head *rcu))
  2761. {
  2762. __call_rcu(head, func, rcu_state_p, -1, 1);
  2763. }
  2764. EXPORT_SYMBOL_GPL(kfree_call_rcu);
  2765. /*
  2766. * Because a context switch is a grace period for RCU-sched and RCU-bh,
  2767. * any blocking grace-period wait automatically implies a grace period
  2768. * if there is only one CPU online at any point time during execution
  2769. * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
  2770. * occasionally incorrectly indicate that there are multiple CPUs online
  2771. * when there was in fact only one the whole time, as this just adds
  2772. * some overhead: RCU still operates correctly.
  2773. */
  2774. static inline int rcu_blocking_is_gp(void)
  2775. {
  2776. int ret;
  2777. might_sleep(); /* Check for RCU read-side critical section. */
  2778. preempt_disable();
  2779. ret = num_online_cpus() <= 1;
  2780. preempt_enable();
  2781. return ret;
  2782. }
  2783. /**
  2784. * synchronize_sched - wait until an rcu-sched grace period has elapsed.
  2785. *
  2786. * Control will return to the caller some time after a full rcu-sched
  2787. * grace period has elapsed, in other words after all currently executing
  2788. * rcu-sched read-side critical sections have completed. These read-side
  2789. * critical sections are delimited by rcu_read_lock_sched() and
  2790. * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
  2791. * local_irq_disable(), and so on may be used in place of
  2792. * rcu_read_lock_sched().
  2793. *
  2794. * This means that all preempt_disable code sequences, including NMI and
  2795. * non-threaded hardware-interrupt handlers, in progress on entry will
  2796. * have completed before this primitive returns. However, this does not
  2797. * guarantee that softirq handlers will have completed, since in some
  2798. * kernels, these handlers can run in process context, and can block.
  2799. *
  2800. * Note that this guarantee implies further memory-ordering guarantees.
  2801. * On systems with more than one CPU, when synchronize_sched() returns,
  2802. * each CPU is guaranteed to have executed a full memory barrier since the
  2803. * end of its last RCU-sched read-side critical section whose beginning
  2804. * preceded the call to synchronize_sched(). In addition, each CPU having
  2805. * an RCU read-side critical section that extends beyond the return from
  2806. * synchronize_sched() is guaranteed to have executed a full memory barrier
  2807. * after the beginning of synchronize_sched() and before the beginning of
  2808. * that RCU read-side critical section. Note that these guarantees include
  2809. * CPUs that are offline, idle, or executing in user mode, as well as CPUs
  2810. * that are executing in the kernel.
  2811. *
  2812. * Furthermore, if CPU A invoked synchronize_sched(), which returned
  2813. * to its caller on CPU B, then both CPU A and CPU B are guaranteed
  2814. * to have executed a full memory barrier during the execution of
  2815. * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
  2816. * again only if the system has more than one CPU).
  2817. *
  2818. * This primitive provides the guarantees made by the (now removed)
  2819. * synchronize_kernel() API. In contrast, synchronize_rcu() only
  2820. * guarantees that rcu_read_lock() sections will have completed.
  2821. * In "classic RCU", these two guarantees happen to be one and
  2822. * the same, but can differ in realtime RCU implementations.
  2823. */
  2824. void synchronize_sched(void)
  2825. {
  2826. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  2827. !lock_is_held(&rcu_lock_map) &&
  2828. !lock_is_held(&rcu_sched_lock_map),
  2829. "Illegal synchronize_sched() in RCU-sched read-side critical section");
  2830. if (rcu_blocking_is_gp())
  2831. return;
  2832. if (rcu_gp_is_expedited())
  2833. synchronize_sched_expedited();
  2834. else
  2835. wait_rcu_gp(call_rcu_sched);
  2836. }
  2837. EXPORT_SYMBOL_GPL(synchronize_sched);
  2838. /**
  2839. * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  2840. *
  2841. * Control will return to the caller some time after a full rcu_bh grace
  2842. * period has elapsed, in other words after all currently executing rcu_bh
  2843. * read-side critical sections have completed. RCU read-side critical
  2844. * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
  2845. * and may be nested.
  2846. *
  2847. * See the description of synchronize_sched() for more detailed information
  2848. * on memory ordering guarantees.
  2849. */
  2850. void synchronize_rcu_bh(void)
  2851. {
  2852. rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
  2853. !lock_is_held(&rcu_lock_map) &&
  2854. !lock_is_held(&rcu_sched_lock_map),
  2855. "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
  2856. if (rcu_blocking_is_gp())
  2857. return;
  2858. if (rcu_gp_is_expedited())
  2859. synchronize_rcu_bh_expedited();
  2860. else
  2861. wait_rcu_gp(call_rcu_bh);
  2862. }
  2863. EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
  2864. /**
  2865. * get_state_synchronize_rcu - Snapshot current RCU state
  2866. *
  2867. * Returns a cookie that is used by a later call to cond_synchronize_rcu()
  2868. * to determine whether or not a full grace period has elapsed in the
  2869. * meantime.
  2870. */
  2871. unsigned long get_state_synchronize_rcu(void)
  2872. {
  2873. /*
  2874. * Any prior manipulation of RCU-protected data must happen
  2875. * before the load from ->gpnum.
  2876. */
  2877. smp_mb(); /* ^^^ */
  2878. /*
  2879. * Make sure this load happens before the purportedly
  2880. * time-consuming work between get_state_synchronize_rcu()
  2881. * and cond_synchronize_rcu().
  2882. */
  2883. return smp_load_acquire(&rcu_state_p->gpnum);
  2884. }
  2885. EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  2886. /**
  2887. * cond_synchronize_rcu - Conditionally wait for an RCU grace period
  2888. *
  2889. * @oldstate: return value from earlier call to get_state_synchronize_rcu()
  2890. *
  2891. * If a full RCU grace period has elapsed since the earlier call to
  2892. * get_state_synchronize_rcu(), just return. Otherwise, invoke
  2893. * synchronize_rcu() to wait for a full grace period.
  2894. *
  2895. * Yes, this function does not take counter wrap into account. But
  2896. * counter wrap is harmless. If the counter wraps, we have waited for
  2897. * more than 2 billion grace periods (and way more on a 64-bit system!),
  2898. * so waiting for one additional grace period should be just fine.
  2899. */
  2900. void cond_synchronize_rcu(unsigned long oldstate)
  2901. {
  2902. unsigned long newstate;
  2903. /*
  2904. * Ensure that this load happens before any RCU-destructive
  2905. * actions the caller might carry out after we return.
  2906. */
  2907. newstate = smp_load_acquire(&rcu_state_p->completed);
  2908. if (ULONG_CMP_GE(oldstate, newstate))
  2909. synchronize_rcu();
  2910. }
  2911. EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
  2912. static int synchronize_sched_expedited_cpu_stop(void *data)
  2913. {
  2914. /*
  2915. * There must be a full memory barrier on each affected CPU
  2916. * between the time that try_stop_cpus() is called and the
  2917. * time that it returns.
  2918. *
  2919. * In the current initial implementation of cpu_stop, the
  2920. * above condition is already met when the control reaches
  2921. * this point and the following smp_mb() is not strictly
  2922. * necessary. Do smp_mb() anyway for documentation and
  2923. * robustness against future implementation changes.
  2924. */
  2925. smp_mb(); /* See above comment block. */
  2926. return 0;
  2927. }
  2928. /**
  2929. * synchronize_sched_expedited - Brute-force RCU-sched grace period
  2930. *
  2931. * Wait for an RCU-sched grace period to elapse, but use a "big hammer"
  2932. * approach to force the grace period to end quickly. This consumes
  2933. * significant time on all CPUs and is unfriendly to real-time workloads,
  2934. * so is thus not recommended for any sort of common-case code. In fact,
  2935. * if you are using synchronize_sched_expedited() in a loop, please
  2936. * restructure your code to batch your updates, and then use a single
  2937. * synchronize_sched() instead.
  2938. *
  2939. * This implementation can be thought of as an application of ticket
  2940. * locking to RCU, with sync_sched_expedited_started and
  2941. * sync_sched_expedited_done taking on the roles of the halves
  2942. * of the ticket-lock word. Each task atomically increments
  2943. * sync_sched_expedited_started upon entry, snapshotting the old value,
  2944. * then attempts to stop all the CPUs. If this succeeds, then each
  2945. * CPU will have executed a context switch, resulting in an RCU-sched
  2946. * grace period. We are then done, so we use atomic_cmpxchg() to
  2947. * update sync_sched_expedited_done to match our snapshot -- but
  2948. * only if someone else has not already advanced past our snapshot.
  2949. *
  2950. * On the other hand, if try_stop_cpus() fails, we check the value
  2951. * of sync_sched_expedited_done. If it has advanced past our
  2952. * initial snapshot, then someone else must have forced a grace period
  2953. * some time after we took our snapshot. In this case, our work is
  2954. * done for us, and we can simply return. Otherwise, we try again,
  2955. * but keep our initial snapshot for purposes of checking for someone
  2956. * doing our work for us.
  2957. *
  2958. * If we fail too many times in a row, we fall back to synchronize_sched().
  2959. */
  2960. void synchronize_sched_expedited(void)
  2961. {
  2962. cpumask_var_t cm;
  2963. bool cma = false;
  2964. int cpu;
  2965. long firstsnap, s, snap;
  2966. int trycount = 0;
  2967. struct rcu_state *rsp = &rcu_sched_state;
  2968. /*
  2969. * If we are in danger of counter wrap, just do synchronize_sched().
  2970. * By allowing sync_sched_expedited_started to advance no more than
  2971. * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
  2972. * that more than 3.5 billion CPUs would be required to force a
  2973. * counter wrap on a 32-bit system. Quite a few more CPUs would of
  2974. * course be required on a 64-bit system.
  2975. */
  2976. if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
  2977. (ulong)atomic_long_read(&rsp->expedited_done) +
  2978. ULONG_MAX / 8)) {
  2979. synchronize_sched();
  2980. atomic_long_inc(&rsp->expedited_wrap);
  2981. return;
  2982. }
  2983. /*
  2984. * Take a ticket. Note that atomic_inc_return() implies a
  2985. * full memory barrier.
  2986. */
  2987. snap = atomic_long_inc_return(&rsp->expedited_start);
  2988. firstsnap = snap;
  2989. if (!try_get_online_cpus()) {
  2990. /* CPU hotplug operation in flight, fall back to normal GP. */
  2991. wait_rcu_gp(call_rcu_sched);
  2992. atomic_long_inc(&rsp->expedited_normal);
  2993. return;
  2994. }
  2995. WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
  2996. /* Offline CPUs, idle CPUs, and any CPU we run on are quiescent. */
  2997. cma = zalloc_cpumask_var(&cm, GFP_KERNEL);
  2998. if (cma) {
  2999. cpumask_copy(cm, cpu_online_mask);
  3000. cpumask_clear_cpu(raw_smp_processor_id(), cm);
  3001. for_each_cpu(cpu, cm) {
  3002. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  3003. if (!(atomic_add_return(0, &rdtp->dynticks) & 0x1))
  3004. cpumask_clear_cpu(cpu, cm);
  3005. }
  3006. if (cpumask_weight(cm) == 0)
  3007. goto all_cpus_idle;
  3008. }
  3009. /*
  3010. * Each pass through the following loop attempts to force a
  3011. * context switch on each CPU.
  3012. */
  3013. while (try_stop_cpus(cma ? cm : cpu_online_mask,
  3014. synchronize_sched_expedited_cpu_stop,
  3015. NULL) == -EAGAIN) {
  3016. put_online_cpus();
  3017. atomic_long_inc(&rsp->expedited_tryfail);
  3018. /* Check to see if someone else did our work for us. */
  3019. s = atomic_long_read(&rsp->expedited_done);
  3020. if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
  3021. /* ensure test happens before caller kfree */
  3022. smp_mb__before_atomic(); /* ^^^ */
  3023. atomic_long_inc(&rsp->expedited_workdone1);
  3024. free_cpumask_var(cm);
  3025. return;
  3026. }
  3027. /* No joy, try again later. Or just synchronize_sched(). */
  3028. if (trycount++ < 10) {
  3029. udelay(trycount * num_online_cpus());
  3030. } else {
  3031. wait_rcu_gp(call_rcu_sched);
  3032. atomic_long_inc(&rsp->expedited_normal);
  3033. free_cpumask_var(cm);
  3034. return;
  3035. }
  3036. /* Recheck to see if someone else did our work for us. */
  3037. s = atomic_long_read(&rsp->expedited_done);
  3038. if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
  3039. /* ensure test happens before caller kfree */
  3040. smp_mb__before_atomic(); /* ^^^ */
  3041. atomic_long_inc(&rsp->expedited_workdone2);
  3042. free_cpumask_var(cm);
  3043. return;
  3044. }
  3045. /*
  3046. * Refetching sync_sched_expedited_started allows later
  3047. * callers to piggyback on our grace period. We retry
  3048. * after they started, so our grace period works for them,
  3049. * and they started after our first try, so their grace
  3050. * period works for us.
  3051. */
  3052. if (!try_get_online_cpus()) {
  3053. /* CPU hotplug operation in flight, use normal GP. */
  3054. wait_rcu_gp(call_rcu_sched);
  3055. atomic_long_inc(&rsp->expedited_normal);
  3056. free_cpumask_var(cm);
  3057. return;
  3058. }
  3059. snap = atomic_long_read(&rsp->expedited_start);
  3060. smp_mb(); /* ensure read is before try_stop_cpus(). */
  3061. }
  3062. atomic_long_inc(&rsp->expedited_stoppedcpus);
  3063. all_cpus_idle:
  3064. free_cpumask_var(cm);
  3065. /*
  3066. * Everyone up to our most recent fetch is covered by our grace
  3067. * period. Update the counter, but only if our work is still
  3068. * relevant -- which it won't be if someone who started later
  3069. * than we did already did their update.
  3070. */
  3071. do {
  3072. atomic_long_inc(&rsp->expedited_done_tries);
  3073. s = atomic_long_read(&rsp->expedited_done);
  3074. if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
  3075. /* ensure test happens before caller kfree */
  3076. smp_mb__before_atomic(); /* ^^^ */
  3077. atomic_long_inc(&rsp->expedited_done_lost);
  3078. break;
  3079. }
  3080. } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
  3081. atomic_long_inc(&rsp->expedited_done_exit);
  3082. put_online_cpus();
  3083. }
  3084. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  3085. /*
  3086. * Check to see if there is any immediate RCU-related work to be done
  3087. * by the current CPU, for the specified type of RCU, returning 1 if so.
  3088. * The checks are in order of increasing expense: checks that can be
  3089. * carried out against CPU-local state are performed first. However,
  3090. * we must check for CPU stalls first, else we might not get a chance.
  3091. */
  3092. static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  3093. {
  3094. struct rcu_node *rnp = rdp->mynode;
  3095. rdp->n_rcu_pending++;
  3096. /* Check for CPU stalls, if enabled. */
  3097. check_cpu_stall(rsp, rdp);
  3098. /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
  3099. if (rcu_nohz_full_cpu(rsp))
  3100. return 0;
  3101. /* Is the RCU core waiting for a quiescent state from this CPU? */
  3102. if (rcu_scheduler_fully_active &&
  3103. rdp->qs_pending && !rdp->passed_quiesce &&
  3104. rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
  3105. rdp->n_rp_qs_pending++;
  3106. } else if (rdp->qs_pending &&
  3107. (rdp->passed_quiesce ||
  3108. rdp->rcu_qs_ctr_snap != __this_cpu_read(rcu_qs_ctr))) {
  3109. rdp->n_rp_report_qs++;
  3110. return 1;
  3111. }
  3112. /* Does this CPU have callbacks ready to invoke? */
  3113. if (cpu_has_callbacks_ready_to_invoke(rdp)) {
  3114. rdp->n_rp_cb_ready++;
  3115. return 1;
  3116. }
  3117. /* Has RCU gone idle with this CPU needing another grace period? */
  3118. if (cpu_needs_another_gp(rsp, rdp)) {
  3119. rdp->n_rp_cpu_needs_gp++;
  3120. return 1;
  3121. }
  3122. /* Has another RCU grace period completed? */
  3123. if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
  3124. rdp->n_rp_gp_completed++;
  3125. return 1;
  3126. }
  3127. /* Has a new RCU grace period started? */
  3128. if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
  3129. unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
  3130. rdp->n_rp_gp_started++;
  3131. return 1;
  3132. }
  3133. /* Does this CPU need a deferred NOCB wakeup? */
  3134. if (rcu_nocb_need_deferred_wakeup(rdp)) {
  3135. rdp->n_rp_nocb_defer_wakeup++;
  3136. return 1;
  3137. }
  3138. /* nothing to do */
  3139. rdp->n_rp_need_nothing++;
  3140. return 0;
  3141. }
  3142. /*
  3143. * Check to see if there is any immediate RCU-related work to be done
  3144. * by the current CPU, returning 1 if so. This function is part of the
  3145. * RCU implementation; it is -not- an exported member of the RCU API.
  3146. */
  3147. static int rcu_pending(void)
  3148. {
  3149. struct rcu_state *rsp;
  3150. for_each_rcu_flavor(rsp)
  3151. if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
  3152. return 1;
  3153. return 0;
  3154. }
  3155. /*
  3156. * Return true if the specified CPU has any callback. If all_lazy is
  3157. * non-NULL, store an indication of whether all callbacks are lazy.
  3158. * (If there are no callbacks, all of them are deemed to be lazy.)
  3159. */
  3160. static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
  3161. {
  3162. bool al = true;
  3163. bool hc = false;
  3164. struct rcu_data *rdp;
  3165. struct rcu_state *rsp;
  3166. for_each_rcu_flavor(rsp) {
  3167. rdp = this_cpu_ptr(rsp->rda);
  3168. if (!rdp->nxtlist)
  3169. continue;
  3170. hc = true;
  3171. if (rdp->qlen != rdp->qlen_lazy || !all_lazy) {
  3172. al = false;
  3173. break;
  3174. }
  3175. }
  3176. if (all_lazy)
  3177. *all_lazy = al;
  3178. return hc;
  3179. }
  3180. /*
  3181. * Helper function for _rcu_barrier() tracing. If tracing is disabled,
  3182. * the compiler is expected to optimize this away.
  3183. */
  3184. static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
  3185. int cpu, unsigned long done)
  3186. {
  3187. trace_rcu_barrier(rsp->name, s, cpu,
  3188. atomic_read(&rsp->barrier_cpu_count), done);
  3189. }
  3190. /*
  3191. * RCU callback function for _rcu_barrier(). If we are last, wake
  3192. * up the task executing _rcu_barrier().
  3193. */
  3194. static void rcu_barrier_callback(struct rcu_head *rhp)
  3195. {
  3196. struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
  3197. struct rcu_state *rsp = rdp->rsp;
  3198. if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
  3199. _rcu_barrier_trace(rsp, "LastCB", -1, rsp->n_barrier_done);
  3200. complete(&rsp->barrier_completion);
  3201. } else {
  3202. _rcu_barrier_trace(rsp, "CB", -1, rsp->n_barrier_done);
  3203. }
  3204. }
  3205. /*
  3206. * Called with preemption disabled, and from cross-cpu IRQ context.
  3207. */
  3208. static void rcu_barrier_func(void *type)
  3209. {
  3210. struct rcu_state *rsp = type;
  3211. struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
  3212. _rcu_barrier_trace(rsp, "IRQ", -1, rsp->n_barrier_done);
  3213. atomic_inc(&rsp->barrier_cpu_count);
  3214. rsp->call(&rdp->barrier_head, rcu_barrier_callback);
  3215. }
  3216. /*
  3217. * Orchestrate the specified type of RCU barrier, waiting for all
  3218. * RCU callbacks of the specified type to complete.
  3219. */
  3220. static void _rcu_barrier(struct rcu_state *rsp)
  3221. {
  3222. int cpu;
  3223. struct rcu_data *rdp;
  3224. unsigned long snap = READ_ONCE(rsp->n_barrier_done);
  3225. unsigned long snap_done;
  3226. _rcu_barrier_trace(rsp, "Begin", -1, snap);
  3227. /* Take mutex to serialize concurrent rcu_barrier() requests. */
  3228. mutex_lock(&rsp->barrier_mutex);
  3229. /*
  3230. * Ensure that all prior references, including to ->n_barrier_done,
  3231. * are ordered before the _rcu_barrier() machinery.
  3232. */
  3233. smp_mb(); /* See above block comment. */
  3234. /*
  3235. * Recheck ->n_barrier_done to see if others did our work for us.
  3236. * This means checking ->n_barrier_done for an even-to-odd-to-even
  3237. * transition. The "if" expression below therefore rounds the old
  3238. * value up to the next even number and adds two before comparing.
  3239. */
  3240. snap_done = rsp->n_barrier_done;
  3241. _rcu_barrier_trace(rsp, "Check", -1, snap_done);
  3242. /*
  3243. * If the value in snap is odd, we needed to wait for the current
  3244. * rcu_barrier() to complete, then wait for the next one, in other
  3245. * words, we need the value of snap_done to be three larger than
  3246. * the value of snap. On the other hand, if the value in snap is
  3247. * even, we only had to wait for the next rcu_barrier() to complete,
  3248. * in other words, we need the value of snap_done to be only two
  3249. * greater than the value of snap. The "(snap + 3) & ~0x1" computes
  3250. * this for us (thank you, Linus!).
  3251. */
  3252. if (ULONG_CMP_GE(snap_done, (snap + 3) & ~0x1)) {
  3253. _rcu_barrier_trace(rsp, "EarlyExit", -1, snap_done);
  3254. smp_mb(); /* caller's subsequent code after above check. */
  3255. mutex_unlock(&rsp->barrier_mutex);
  3256. return;
  3257. }
  3258. /*
  3259. * Increment ->n_barrier_done to avoid duplicate work. Use
  3260. * WRITE_ONCE() to prevent the compiler from speculating
  3261. * the increment to precede the early-exit check.
  3262. */
  3263. WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
  3264. WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
  3265. _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
  3266. smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
  3267. /*
  3268. * Initialize the count to one rather than to zero in order to
  3269. * avoid a too-soon return to zero in case of a short grace period
  3270. * (or preemption of this task). Exclude CPU-hotplug operations
  3271. * to ensure that no offline CPU has callbacks queued.
  3272. */
  3273. init_completion(&rsp->barrier_completion);
  3274. atomic_set(&rsp->barrier_cpu_count, 1);
  3275. get_online_cpus();
  3276. /*
  3277. * Force each CPU with callbacks to register a new callback.
  3278. * When that callback is invoked, we will know that all of the
  3279. * corresponding CPU's preceding callbacks have been invoked.
  3280. */
  3281. for_each_possible_cpu(cpu) {
  3282. if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
  3283. continue;
  3284. rdp = per_cpu_ptr(rsp->rda, cpu);
  3285. if (rcu_is_nocb_cpu(cpu)) {
  3286. if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
  3287. _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
  3288. rsp->n_barrier_done);
  3289. } else {
  3290. _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
  3291. rsp->n_barrier_done);
  3292. smp_mb__before_atomic();
  3293. atomic_inc(&rsp->barrier_cpu_count);
  3294. __call_rcu(&rdp->barrier_head,
  3295. rcu_barrier_callback, rsp, cpu, 0);
  3296. }
  3297. } else if (READ_ONCE(rdp->qlen)) {
  3298. _rcu_barrier_trace(rsp, "OnlineQ", cpu,
  3299. rsp->n_barrier_done);
  3300. smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
  3301. } else {
  3302. _rcu_barrier_trace(rsp, "OnlineNQ", cpu,
  3303. rsp->n_barrier_done);
  3304. }
  3305. }
  3306. put_online_cpus();
  3307. /*
  3308. * Now that we have an rcu_barrier_callback() callback on each
  3309. * CPU, and thus each counted, remove the initial count.
  3310. */
  3311. if (atomic_dec_and_test(&rsp->barrier_cpu_count))
  3312. complete(&rsp->barrier_completion);
  3313. /* Increment ->n_barrier_done to prevent duplicate work. */
  3314. smp_mb(); /* Keep increment after above mechanism. */
  3315. WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
  3316. WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
  3317. _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
  3318. smp_mb(); /* Keep increment before caller's subsequent code. */
  3319. /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
  3320. wait_for_completion(&rsp->barrier_completion);
  3321. /* Other rcu_barrier() invocations can now safely proceed. */
  3322. mutex_unlock(&rsp->barrier_mutex);
  3323. }
  3324. /**
  3325. * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  3326. */
  3327. void rcu_barrier_bh(void)
  3328. {
  3329. _rcu_barrier(&rcu_bh_state);
  3330. }
  3331. EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  3332. /**
  3333. * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
  3334. */
  3335. void rcu_barrier_sched(void)
  3336. {
  3337. _rcu_barrier(&rcu_sched_state);
  3338. }
  3339. EXPORT_SYMBOL_GPL(rcu_barrier_sched);
  3340. /*
  3341. * Propagate ->qsinitmask bits up the rcu_node tree to account for the
  3342. * first CPU in a given leaf rcu_node structure coming online. The caller
  3343. * must hold the corresponding leaf rcu_node ->lock with interrrupts
  3344. * disabled.
  3345. */
  3346. static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
  3347. {
  3348. long mask;
  3349. struct rcu_node *rnp = rnp_leaf;
  3350. for (;;) {
  3351. mask = rnp->grpmask;
  3352. rnp = rnp->parent;
  3353. if (rnp == NULL)
  3354. return;
  3355. raw_spin_lock(&rnp->lock); /* Interrupts already disabled. */
  3356. rnp->qsmaskinit |= mask;
  3357. raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
  3358. }
  3359. }
  3360. /*
  3361. * Do boot-time initialization of a CPU's per-CPU RCU data.
  3362. */
  3363. static void __init
  3364. rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
  3365. {
  3366. unsigned long flags;
  3367. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  3368. struct rcu_node *rnp = rcu_get_root(rsp);
  3369. /* Set up local state, ensuring consistent view of global state. */
  3370. raw_spin_lock_irqsave(&rnp->lock, flags);
  3371. rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
  3372. rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
  3373. WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
  3374. WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
  3375. rdp->cpu = cpu;
  3376. rdp->rsp = rsp;
  3377. rcu_boot_init_nocb_percpu_data(rdp);
  3378. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  3379. }
  3380. /*
  3381. * Initialize a CPU's per-CPU RCU data. Note that only one online or
  3382. * offline event can be happening at a given time. Note also that we
  3383. * can accept some slop in the rsp->completed access due to the fact
  3384. * that this CPU cannot possibly have any RCU callbacks in flight yet.
  3385. */
  3386. static void
  3387. rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
  3388. {
  3389. unsigned long flags;
  3390. unsigned long mask;
  3391. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  3392. struct rcu_node *rnp = rcu_get_root(rsp);
  3393. /* Set up local state, ensuring consistent view of global state. */
  3394. raw_spin_lock_irqsave(&rnp->lock, flags);
  3395. rdp->beenonline = 1; /* We have now been online. */
  3396. rdp->qlen_last_fqs_check = 0;
  3397. rdp->n_force_qs_snap = rsp->n_force_qs;
  3398. rdp->blimit = blimit;
  3399. if (!rdp->nxtlist)
  3400. init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
  3401. rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
  3402. rcu_sysidle_init_percpu_data(rdp->dynticks);
  3403. atomic_set(&rdp->dynticks->dynticks,
  3404. (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
  3405. raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
  3406. /*
  3407. * Add CPU to leaf rcu_node pending-online bitmask. Any needed
  3408. * propagation up the rcu_node tree will happen at the beginning
  3409. * of the next grace period.
  3410. */
  3411. rnp = rdp->mynode;
  3412. mask = rdp->grpmask;
  3413. raw_spin_lock(&rnp->lock); /* irqs already disabled. */
  3414. smp_mb__after_unlock_lock();
  3415. rnp->qsmaskinitnext |= mask;
  3416. rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
  3417. rdp->completed = rnp->completed;
  3418. rdp->passed_quiesce = false;
  3419. rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
  3420. rdp->qs_pending = false;
  3421. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
  3422. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  3423. }
  3424. static void rcu_prepare_cpu(int cpu)
  3425. {
  3426. struct rcu_state *rsp;
  3427. for_each_rcu_flavor(rsp)
  3428. rcu_init_percpu_data(cpu, rsp);
  3429. }
  3430. /*
  3431. * Handle CPU online/offline notification events.
  3432. */
  3433. int rcu_cpu_notify(struct notifier_block *self,
  3434. unsigned long action, void *hcpu)
  3435. {
  3436. long cpu = (long)hcpu;
  3437. struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
  3438. struct rcu_node *rnp = rdp->mynode;
  3439. struct rcu_state *rsp;
  3440. switch (action) {
  3441. case CPU_UP_PREPARE:
  3442. case CPU_UP_PREPARE_FROZEN:
  3443. rcu_prepare_cpu(cpu);
  3444. rcu_prepare_kthreads(cpu);
  3445. rcu_spawn_all_nocb_kthreads(cpu);
  3446. break;
  3447. case CPU_ONLINE:
  3448. case CPU_DOWN_FAILED:
  3449. rcu_boost_kthread_setaffinity(rnp, -1);
  3450. break;
  3451. case CPU_DOWN_PREPARE:
  3452. rcu_boost_kthread_setaffinity(rnp, cpu);
  3453. break;
  3454. case CPU_DYING:
  3455. case CPU_DYING_FROZEN:
  3456. for_each_rcu_flavor(rsp)
  3457. rcu_cleanup_dying_cpu(rsp);
  3458. break;
  3459. case CPU_DYING_IDLE:
  3460. for_each_rcu_flavor(rsp) {
  3461. rcu_cleanup_dying_idle_cpu(cpu, rsp);
  3462. }
  3463. break;
  3464. case CPU_DEAD:
  3465. case CPU_DEAD_FROZEN:
  3466. case CPU_UP_CANCELED:
  3467. case CPU_UP_CANCELED_FROZEN:
  3468. for_each_rcu_flavor(rsp) {
  3469. rcu_cleanup_dead_cpu(cpu, rsp);
  3470. do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
  3471. }
  3472. break;
  3473. default:
  3474. break;
  3475. }
  3476. return NOTIFY_OK;
  3477. }
  3478. static int rcu_pm_notify(struct notifier_block *self,
  3479. unsigned long action, void *hcpu)
  3480. {
  3481. switch (action) {
  3482. case PM_HIBERNATION_PREPARE:
  3483. case PM_SUSPEND_PREPARE:
  3484. if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
  3485. rcu_expedite_gp();
  3486. break;
  3487. case PM_POST_HIBERNATION:
  3488. case PM_POST_SUSPEND:
  3489. if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
  3490. rcu_unexpedite_gp();
  3491. break;
  3492. default:
  3493. break;
  3494. }
  3495. return NOTIFY_OK;
  3496. }
  3497. /*
  3498. * Spawn the kthreads that handle each RCU flavor's grace periods.
  3499. */
  3500. static int __init rcu_spawn_gp_kthread(void)
  3501. {
  3502. unsigned long flags;
  3503. int kthread_prio_in = kthread_prio;
  3504. struct rcu_node *rnp;
  3505. struct rcu_state *rsp;
  3506. struct sched_param sp;
  3507. struct task_struct *t;
  3508. /* Force priority into range. */
  3509. if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
  3510. kthread_prio = 1;
  3511. else if (kthread_prio < 0)
  3512. kthread_prio = 0;
  3513. else if (kthread_prio > 99)
  3514. kthread_prio = 99;
  3515. if (kthread_prio != kthread_prio_in)
  3516. pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
  3517. kthread_prio, kthread_prio_in);
  3518. rcu_scheduler_fully_active = 1;
  3519. for_each_rcu_flavor(rsp) {
  3520. t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
  3521. BUG_ON(IS_ERR(t));
  3522. rnp = rcu_get_root(rsp);
  3523. raw_spin_lock_irqsave(&rnp->lock, flags);
  3524. rsp->gp_kthread = t;
  3525. if (kthread_prio) {
  3526. sp.sched_priority = kthread_prio;
  3527. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  3528. }
  3529. wake_up_process(t);
  3530. raw_spin_unlock_irqrestore(&rnp->lock, flags);
  3531. }
  3532. rcu_spawn_nocb_kthreads();
  3533. rcu_spawn_boost_kthreads();
  3534. return 0;
  3535. }
  3536. early_initcall(rcu_spawn_gp_kthread);
  3537. /*
  3538. * This function is invoked towards the end of the scheduler's initialization
  3539. * process. Before this is called, the idle task might contain
  3540. * RCU read-side critical sections (during which time, this idle
  3541. * task is booting the system). After this function is called, the
  3542. * idle tasks are prohibited from containing RCU read-side critical
  3543. * sections. This function also enables RCU lockdep checking.
  3544. */
  3545. void rcu_scheduler_starting(void)
  3546. {
  3547. WARN_ON(num_online_cpus() != 1);
  3548. WARN_ON(nr_context_switches() > 0);
  3549. rcu_scheduler_active = 1;
  3550. }
  3551. /*
  3552. * Compute the per-level fanout, either using the exact fanout specified
  3553. * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
  3554. */
  3555. static void __init rcu_init_levelspread(struct rcu_state *rsp)
  3556. {
  3557. int i;
  3558. if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
  3559. rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
  3560. for (i = rcu_num_lvls - 2; i >= 0; i--)
  3561. rsp->levelspread[i] = CONFIG_RCU_FANOUT;
  3562. } else {
  3563. int ccur;
  3564. int cprv;
  3565. cprv = nr_cpu_ids;
  3566. for (i = rcu_num_lvls - 1; i >= 0; i--) {
  3567. ccur = rsp->levelcnt[i];
  3568. rsp->levelspread[i] = (cprv + ccur - 1) / ccur;
  3569. cprv = ccur;
  3570. }
  3571. }
  3572. }
  3573. /*
  3574. * Helper function for rcu_init() that initializes one rcu_state structure.
  3575. */
  3576. static void __init rcu_init_one(struct rcu_state *rsp,
  3577. struct rcu_data __percpu *rda)
  3578. {
  3579. static const char * const buf[] = {
  3580. "rcu_node_0",
  3581. "rcu_node_1",
  3582. "rcu_node_2",
  3583. "rcu_node_3" }; /* Match MAX_RCU_LVLS */
  3584. static const char * const fqs[] = {
  3585. "rcu_node_fqs_0",
  3586. "rcu_node_fqs_1",
  3587. "rcu_node_fqs_2",
  3588. "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
  3589. static u8 fl_mask = 0x1;
  3590. int cpustride = 1;
  3591. int i;
  3592. int j;
  3593. struct rcu_node *rnp;
  3594. BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
  3595. /* Silence gcc 4.8 warning about array index out of range. */
  3596. if (rcu_num_lvls > RCU_NUM_LVLS)
  3597. panic("rcu_init_one: rcu_num_lvls overflow");
  3598. /* Initialize the level-tracking arrays. */
  3599. for (i = 0; i < rcu_num_lvls; i++)
  3600. rsp->levelcnt[i] = num_rcu_lvl[i];
  3601. for (i = 1; i < rcu_num_lvls; i++)
  3602. rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
  3603. rcu_init_levelspread(rsp);
  3604. rsp->flavor_mask = fl_mask;
  3605. fl_mask <<= 1;
  3606. /* Initialize the elements themselves, starting from the leaves. */
  3607. for (i = rcu_num_lvls - 1; i >= 0; i--) {
  3608. cpustride *= rsp->levelspread[i];
  3609. rnp = rsp->level[i];
  3610. for (j = 0; j < rsp->levelcnt[i]; j++, rnp++) {
  3611. raw_spin_lock_init(&rnp->lock);
  3612. lockdep_set_class_and_name(&rnp->lock,
  3613. &rcu_node_class[i], buf[i]);
  3614. raw_spin_lock_init(&rnp->fqslock);
  3615. lockdep_set_class_and_name(&rnp->fqslock,
  3616. &rcu_fqs_class[i], fqs[i]);
  3617. rnp->gpnum = rsp->gpnum;
  3618. rnp->completed = rsp->completed;
  3619. rnp->qsmask = 0;
  3620. rnp->qsmaskinit = 0;
  3621. rnp->grplo = j * cpustride;
  3622. rnp->grphi = (j + 1) * cpustride - 1;
  3623. if (rnp->grphi >= nr_cpu_ids)
  3624. rnp->grphi = nr_cpu_ids - 1;
  3625. if (i == 0) {
  3626. rnp->grpnum = 0;
  3627. rnp->grpmask = 0;
  3628. rnp->parent = NULL;
  3629. } else {
  3630. rnp->grpnum = j % rsp->levelspread[i - 1];
  3631. rnp->grpmask = 1UL << rnp->grpnum;
  3632. rnp->parent = rsp->level[i - 1] +
  3633. j / rsp->levelspread[i - 1];
  3634. }
  3635. rnp->level = i;
  3636. INIT_LIST_HEAD(&rnp->blkd_tasks);
  3637. rcu_init_one_nocb(rnp);
  3638. }
  3639. }
  3640. init_waitqueue_head(&rsp->gp_wq);
  3641. rnp = rsp->level[rcu_num_lvls - 1];
  3642. for_each_possible_cpu(i) {
  3643. while (i > rnp->grphi)
  3644. rnp++;
  3645. per_cpu_ptr(rsp->rda, i)->mynode = rnp;
  3646. rcu_boot_init_percpu_data(i, rsp);
  3647. }
  3648. list_add(&rsp->flavors, &rcu_struct_flavors);
  3649. }
  3650. /*
  3651. * Compute the rcu_node tree geometry from kernel parameters. This cannot
  3652. * replace the definitions in tree.h because those are needed to size
  3653. * the ->node array in the rcu_state structure.
  3654. */
  3655. static void __init rcu_init_geometry(void)
  3656. {
  3657. ulong d;
  3658. int i;
  3659. int j;
  3660. int n = nr_cpu_ids;
  3661. int rcu_capacity[MAX_RCU_LVLS + 1];
  3662. /*
  3663. * Initialize any unspecified boot parameters.
  3664. * The default values of jiffies_till_first_fqs and
  3665. * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
  3666. * value, which is a function of HZ, then adding one for each
  3667. * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
  3668. */
  3669. d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
  3670. if (jiffies_till_first_fqs == ULONG_MAX)
  3671. jiffies_till_first_fqs = d;
  3672. if (jiffies_till_next_fqs == ULONG_MAX)
  3673. jiffies_till_next_fqs = d;
  3674. /* If the compile-time values are accurate, just leave. */
  3675. if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
  3676. nr_cpu_ids == NR_CPUS)
  3677. return;
  3678. pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
  3679. rcu_fanout_leaf, nr_cpu_ids);
  3680. /*
  3681. * Compute number of nodes that can be handled an rcu_node tree
  3682. * with the given number of levels. Setting rcu_capacity[0] makes
  3683. * some of the arithmetic easier.
  3684. */
  3685. rcu_capacity[0] = 1;
  3686. rcu_capacity[1] = rcu_fanout_leaf;
  3687. for (i = 2; i <= MAX_RCU_LVLS; i++)
  3688. rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
  3689. /*
  3690. * The boot-time rcu_fanout_leaf parameter is only permitted
  3691. * to increase the leaf-level fanout, not decrease it. Of course,
  3692. * the leaf-level fanout cannot exceed the number of bits in
  3693. * the rcu_node masks. Finally, the tree must be able to accommodate
  3694. * the configured number of CPUs. Complain and fall back to the
  3695. * compile-time values if these limits are exceeded.
  3696. */
  3697. if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
  3698. rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
  3699. n > rcu_capacity[MAX_RCU_LVLS]) {
  3700. WARN_ON(1);
  3701. return;
  3702. }
  3703. /* Calculate the number of rcu_nodes at each level of the tree. */
  3704. for (i = 1; i <= MAX_RCU_LVLS; i++)
  3705. if (n <= rcu_capacity[i]) {
  3706. for (j = 0; j <= i; j++)
  3707. num_rcu_lvl[j] =
  3708. DIV_ROUND_UP(n, rcu_capacity[i - j]);
  3709. rcu_num_lvls = i;
  3710. for (j = i + 1; j <= MAX_RCU_LVLS; j++)
  3711. num_rcu_lvl[j] = 0;
  3712. break;
  3713. }
  3714. /* Calculate the total number of rcu_node structures. */
  3715. rcu_num_nodes = 0;
  3716. for (i = 0; i <= MAX_RCU_LVLS; i++)
  3717. rcu_num_nodes += num_rcu_lvl[i];
  3718. rcu_num_nodes -= n;
  3719. }
  3720. void __init rcu_init(void)
  3721. {
  3722. int cpu;
  3723. rcu_early_boot_tests();
  3724. rcu_bootup_announce();
  3725. rcu_init_geometry();
  3726. rcu_init_one(&rcu_bh_state, &rcu_bh_data);
  3727. rcu_init_one(&rcu_sched_state, &rcu_sched_data);
  3728. __rcu_init_preempt();
  3729. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  3730. /*
  3731. * We don't need protection against CPU-hotplug here because
  3732. * this is called early in boot, before either interrupts
  3733. * or the scheduler are operational.
  3734. */
  3735. cpu_notifier(rcu_cpu_notify, 0);
  3736. pm_notifier(rcu_pm_notify, 0);
  3737. for_each_online_cpu(cpu)
  3738. rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
  3739. }
  3740. #include "tree_plugin.h"