tree.c 128 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111
  1. /*
  2. * Read-Copy Update mechanism for mutual exclusion
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, you can access it online at
  16. * http://www.gnu.org/licenses/gpl-2.0.html.
  17. *
  18. * Copyright IBM Corporation, 2008
  19. *
  20. * Authors: Dipankar Sarma <dipankar@in.ibm.com>
  21. * Manfred Spraul <manfred@colorfullife.com>
  22. * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical version
  23. *
  24. * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
  25. * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
  26. *
  27. * For detailed explanation of Read-Copy Update mechanism see -
  28. * Documentation/RCU
  29. */
  30. #include <linux/types.h>
  31. #include <linux/kernel.h>
  32. #include <linux/init.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/smp.h>
  35. #include <linux/rcupdate_wait.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/sched.h>
  38. #include <linux/sched/debug.h>
  39. #include <linux/nmi.h>
  40. #include <linux/atomic.h>
  41. #include <linux/bitops.h>
  42. #include <linux/export.h>
  43. #include <linux/completion.h>
  44. #include <linux/moduleparam.h>
  45. #include <linux/percpu.h>
  46. #include <linux/notifier.h>
  47. #include <linux/cpu.h>
  48. #include <linux/mutex.h>
  49. #include <linux/time.h>
  50. #include <linux/kernel_stat.h>
  51. #include <linux/wait.h>
  52. #include <linux/kthread.h>
  53. #include <uapi/linux/sched/types.h>
  54. #include <linux/prefetch.h>
  55. #include <linux/delay.h>
  56. #include <linux/stop_machine.h>
  57. #include <linux/random.h>
  58. #include <linux/trace_events.h>
  59. #include <linux/suspend.h>
  60. #include <linux/ftrace.h>
  61. #include "tree.h"
  62. #include "rcu.h"
  63. #ifdef MODULE_PARAM_PREFIX
  64. #undef MODULE_PARAM_PREFIX
  65. #endif
  66. #define MODULE_PARAM_PREFIX "rcutree."
  67. /* Data structures. */
  68. /*
  69. * In order to export the rcu_state name to the tracing tools, it
  70. * needs to be added in the __tracepoint_string section.
  71. * This requires defining a separate variable tp_<sname>_varname
  72. * that points to the string being used, and this will allow
  73. * the tracing userspace tools to be able to decipher the string
  74. * address to the matching string.
  75. */
  76. #ifdef CONFIG_TRACING
  77. # define DEFINE_RCU_TPS(sname) \
  78. static char sname##_varname[] = #sname; \
  79. static const char *tp_##sname##_varname __used __tracepoint_string = sname##_varname;
  80. # define RCU_STATE_NAME(sname) sname##_varname
  81. #else
  82. # define DEFINE_RCU_TPS(sname)
  83. # define RCU_STATE_NAME(sname) __stringify(sname)
  84. #endif
  85. #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
  86. DEFINE_RCU_TPS(sname) \
  87. static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
  88. struct rcu_state sname##_state = { \
  89. .level = { &sname##_state.node[0] }, \
  90. .rda = &sname##_data, \
  91. .call = cr, \
  92. .gp_state = RCU_GP_IDLE, \
  93. .gpnum = 0UL - 300UL, \
  94. .completed = 0UL - 300UL, \
  95. .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
  96. .name = RCU_STATE_NAME(sname), \
  97. .abbr = sabbr, \
  98. .exp_mutex = __MUTEX_INITIALIZER(sname##_state.exp_mutex), \
  99. .exp_wake_mutex = __MUTEX_INITIALIZER(sname##_state.exp_wake_mutex), \
  100. }
  101. RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
  102. RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
  103. static struct rcu_state *const rcu_state_p;
  104. LIST_HEAD(rcu_struct_flavors);
  105. /* Dump rcu_node combining tree at boot to verify correct setup. */
  106. static bool dump_tree;
  107. module_param(dump_tree, bool, 0444);
  108. /* Control rcu_node-tree auto-balancing at boot time. */
  109. static bool rcu_fanout_exact;
  110. module_param(rcu_fanout_exact, bool, 0444);
  111. /* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
  112. static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
  113. module_param(rcu_fanout_leaf, int, 0444);
  114. int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
  115. /* Number of rcu_nodes at specified level. */
  116. int num_rcu_lvl[] = NUM_RCU_LVL_INIT;
  117. int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
  118. /* panic() on RCU Stall sysctl. */
  119. int sysctl_panic_on_rcu_stall __read_mostly;
  120. /*
  121. * The rcu_scheduler_active variable is initialized to the value
  122. * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
  123. * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
  124. * RCU can assume that there is but one task, allowing RCU to (for example)
  125. * optimize synchronize_rcu() to a simple barrier(). When this variable
  126. * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
  127. * to detect real grace periods. This variable is also used to suppress
  128. * boot-time false positives from lockdep-RCU error checking. Finally, it
  129. * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
  130. * is fully initialized, including all of its kthreads having been spawned.
  131. */
  132. int rcu_scheduler_active __read_mostly;
  133. EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  134. /*
  135. * The rcu_scheduler_fully_active variable transitions from zero to one
  136. * during the early_initcall() processing, which is after the scheduler
  137. * is capable of creating new tasks. So RCU processing (for example,
  138. * creating tasks for RCU priority boosting) must be delayed until after
  139. * rcu_scheduler_fully_active transitions from zero to one. We also
  140. * currently delay invocation of any RCU callbacks until after this point.
  141. *
  142. * It might later prove better for people registering RCU callbacks during
  143. * early boot to take responsibility for these callbacks, but one step at
  144. * a time.
  145. */
  146. static int rcu_scheduler_fully_active __read_mostly;
  147. static void rcu_init_new_rnp(struct rcu_node *rnp_leaf);
  148. static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
  149. static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
  150. static void invoke_rcu_core(void);
  151. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
  152. static void rcu_report_exp_rdp(struct rcu_state *rsp,
  153. struct rcu_data *rdp, bool wake);
  154. static void sync_sched_exp_online_cleanup(int cpu);
  155. /* rcuc/rcub kthread realtime priority */
  156. static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
  157. module_param(kthread_prio, int, 0644);
  158. /* Delay in jiffies for grace-period initialization delays, debug only. */
  159. static int gp_preinit_delay;
  160. module_param(gp_preinit_delay, int, 0444);
  161. static int gp_init_delay;
  162. module_param(gp_init_delay, int, 0444);
  163. static int gp_cleanup_delay;
  164. module_param(gp_cleanup_delay, int, 0444);
  165. /*
  166. * Number of grace periods between delays, normalized by the duration of
  167. * the delay. The longer the delay, the more the grace periods between
  168. * each delay. The reason for this normalization is that it means that,
  169. * for non-zero delays, the overall slowdown of grace periods is constant
  170. * regardless of the duration of the delay. This arrangement balances
  171. * the need for long delays to increase some race probabilities with the
  172. * need for fast grace periods to increase other race probabilities.
  173. */
  174. #define PER_RCU_NODE_PERIOD 3 /* Number of grace periods between delays. */
  175. /*
  176. * Track the rcutorture test sequence number and the update version
  177. * number within a given test. The rcutorture_testseq is incremented
  178. * on every rcutorture module load and unload, so has an odd value
  179. * when a test is running. The rcutorture_vernum is set to zero
  180. * when rcutorture starts and is incremented on each rcutorture update.
  181. * These variables enable correlating rcutorture output with the
  182. * RCU tracing information.
  183. */
  184. unsigned long rcutorture_testseq;
  185. unsigned long rcutorture_vernum;
  186. /*
  187. * Compute the mask of online CPUs for the specified rcu_node structure.
  188. * This will not be stable unless the rcu_node structure's ->lock is
  189. * held, but the bit corresponding to the current CPU will be stable
  190. * in most contexts.
  191. */
  192. unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
  193. {
  194. return READ_ONCE(rnp->qsmaskinitnext);
  195. }
  196. /*
  197. * Return true if an RCU grace period is in progress. The READ_ONCE()s
  198. * permit this function to be invoked without holding the root rcu_node
  199. * structure's ->lock, but of course results can be subject to change.
  200. */
  201. static int rcu_gp_in_progress(struct rcu_state *rsp)
  202. {
  203. return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
  204. }
  205. /*
  206. * Note a quiescent state. Because we do not need to know
  207. * how many quiescent states passed, just if there was at least
  208. * one since the start of the grace period, this just sets a flag.
  209. * The caller must have disabled preemption.
  210. */
  211. void rcu_sched_qs(void)
  212. {
  213. RCU_LOCKDEP_WARN(preemptible(), "rcu_sched_qs() invoked with preemption enabled!!!");
  214. if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
  215. return;
  216. trace_rcu_grace_period(TPS("rcu_sched"),
  217. __this_cpu_read(rcu_sched_data.gpnum),
  218. TPS("cpuqs"));
  219. __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
  220. if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
  221. return;
  222. __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
  223. rcu_report_exp_rdp(&rcu_sched_state,
  224. this_cpu_ptr(&rcu_sched_data), true);
  225. }
  226. void rcu_bh_qs(void)
  227. {
  228. RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
  229. if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
  230. trace_rcu_grace_period(TPS("rcu_bh"),
  231. __this_cpu_read(rcu_bh_data.gpnum),
  232. TPS("cpuqs"));
  233. __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
  234. }
  235. }
  236. /*
  237. * Steal a bit from the bottom of ->dynticks for idle entry/exit
  238. * control. Initially this is for TLB flushing.
  239. */
  240. #define RCU_DYNTICK_CTRL_MASK 0x1
  241. #define RCU_DYNTICK_CTRL_CTR (RCU_DYNTICK_CTRL_MASK + 1)
  242. #ifndef rcu_eqs_special_exit
  243. #define rcu_eqs_special_exit() do { } while (0)
  244. #endif
  245. static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
  246. .dynticks_nesting = 1,
  247. .dynticks_nmi_nesting = DYNTICK_IRQ_NONIDLE,
  248. .dynticks = ATOMIC_INIT(RCU_DYNTICK_CTRL_CTR),
  249. };
  250. /*
  251. * Record entry into an extended quiescent state. This is only to be
  252. * called when not already in an extended quiescent state.
  253. */
  254. static void rcu_dynticks_eqs_enter(void)
  255. {
  256. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  257. int seq;
  258. /*
  259. * CPUs seeing atomic_add_return() must see prior RCU read-side
  260. * critical sections, and we also must force ordering with the
  261. * next idle sojourn.
  262. */
  263. seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
  264. /* Better be in an extended quiescent state! */
  265. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  266. (seq & RCU_DYNTICK_CTRL_CTR));
  267. /* Better not have special action (TLB flush) pending! */
  268. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  269. (seq & RCU_DYNTICK_CTRL_MASK));
  270. }
  271. /*
  272. * Record exit from an extended quiescent state. This is only to be
  273. * called from an extended quiescent state.
  274. */
  275. static void rcu_dynticks_eqs_exit(void)
  276. {
  277. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  278. int seq;
  279. /*
  280. * CPUs seeing atomic_add_return() must see prior idle sojourns,
  281. * and we also must force ordering with the next RCU read-side
  282. * critical section.
  283. */
  284. seq = atomic_add_return(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
  285. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  286. !(seq & RCU_DYNTICK_CTRL_CTR));
  287. if (seq & RCU_DYNTICK_CTRL_MASK) {
  288. atomic_andnot(RCU_DYNTICK_CTRL_MASK, &rdtp->dynticks);
  289. smp_mb__after_atomic(); /* _exit after clearing mask. */
  290. /* Prefer duplicate flushes to losing a flush. */
  291. rcu_eqs_special_exit();
  292. }
  293. }
  294. /*
  295. * Reset the current CPU's ->dynticks counter to indicate that the
  296. * newly onlined CPU is no longer in an extended quiescent state.
  297. * This will either leave the counter unchanged, or increment it
  298. * to the next non-quiescent value.
  299. *
  300. * The non-atomic test/increment sequence works because the upper bits
  301. * of the ->dynticks counter are manipulated only by the corresponding CPU,
  302. * or when the corresponding CPU is offline.
  303. */
  304. static void rcu_dynticks_eqs_online(void)
  305. {
  306. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  307. if (atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR)
  308. return;
  309. atomic_add(RCU_DYNTICK_CTRL_CTR, &rdtp->dynticks);
  310. }
  311. /*
  312. * Is the current CPU in an extended quiescent state?
  313. *
  314. * No ordering, as we are sampling CPU-local information.
  315. */
  316. bool rcu_dynticks_curr_cpu_in_eqs(void)
  317. {
  318. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  319. return !(atomic_read(&rdtp->dynticks) & RCU_DYNTICK_CTRL_CTR);
  320. }
  321. /*
  322. * Snapshot the ->dynticks counter with full ordering so as to allow
  323. * stable comparison of this counter with past and future snapshots.
  324. */
  325. int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
  326. {
  327. int snap = atomic_add_return(0, &rdtp->dynticks);
  328. return snap & ~RCU_DYNTICK_CTRL_MASK;
  329. }
  330. /*
  331. * Return true if the snapshot returned from rcu_dynticks_snap()
  332. * indicates that RCU is in an extended quiescent state.
  333. */
  334. static bool rcu_dynticks_in_eqs(int snap)
  335. {
  336. return !(snap & RCU_DYNTICK_CTRL_CTR);
  337. }
  338. /*
  339. * Return true if the CPU corresponding to the specified rcu_dynticks
  340. * structure has spent some time in an extended quiescent state since
  341. * rcu_dynticks_snap() returned the specified snapshot.
  342. */
  343. static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
  344. {
  345. return snap != rcu_dynticks_snap(rdtp);
  346. }
  347. /*
  348. * Do a double-increment of the ->dynticks counter to emulate a
  349. * momentary idle-CPU quiescent state.
  350. */
  351. static void rcu_dynticks_momentary_idle(void)
  352. {
  353. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  354. int special = atomic_add_return(2 * RCU_DYNTICK_CTRL_CTR,
  355. &rdtp->dynticks);
  356. /* It is illegal to call this from idle state. */
  357. WARN_ON_ONCE(!(special & RCU_DYNTICK_CTRL_CTR));
  358. }
  359. /*
  360. * Set the special (bottom) bit of the specified CPU so that it
  361. * will take special action (such as flushing its TLB) on the
  362. * next exit from an extended quiescent state. Returns true if
  363. * the bit was successfully set, or false if the CPU was not in
  364. * an extended quiescent state.
  365. */
  366. bool rcu_eqs_special_set(int cpu)
  367. {
  368. int old;
  369. int new;
  370. struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
  371. do {
  372. old = atomic_read(&rdtp->dynticks);
  373. if (old & RCU_DYNTICK_CTRL_CTR)
  374. return false;
  375. new = old | RCU_DYNTICK_CTRL_MASK;
  376. } while (atomic_cmpxchg(&rdtp->dynticks, old, new) != old);
  377. return true;
  378. }
  379. /*
  380. * Let the RCU core know that this CPU has gone through the scheduler,
  381. * which is a quiescent state. This is called when the need for a
  382. * quiescent state is urgent, so we burn an atomic operation and full
  383. * memory barriers to let the RCU core know about it, regardless of what
  384. * this CPU might (or might not) do in the near future.
  385. *
  386. * We inform the RCU core by emulating a zero-duration dyntick-idle period.
  387. *
  388. * The caller must have disabled interrupts.
  389. */
  390. static void rcu_momentary_dyntick_idle(void)
  391. {
  392. raw_cpu_write(rcu_dynticks.rcu_need_heavy_qs, false);
  393. rcu_dynticks_momentary_idle();
  394. }
  395. /*
  396. * Note a context switch. This is a quiescent state for RCU-sched,
  397. * and requires special handling for preemptible RCU.
  398. * The caller must have disabled interrupts.
  399. */
  400. void rcu_note_context_switch(bool preempt)
  401. {
  402. barrier(); /* Avoid RCU read-side critical sections leaking down. */
  403. trace_rcu_utilization(TPS("Start context switch"));
  404. rcu_sched_qs();
  405. rcu_preempt_note_context_switch(preempt);
  406. /* Load rcu_urgent_qs before other flags. */
  407. if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs)))
  408. goto out;
  409. this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
  410. if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs)))
  411. rcu_momentary_dyntick_idle();
  412. this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
  413. if (!preempt)
  414. rcu_note_voluntary_context_switch_lite(current);
  415. out:
  416. trace_rcu_utilization(TPS("End context switch"));
  417. barrier(); /* Avoid RCU read-side critical sections leaking up. */
  418. }
  419. EXPORT_SYMBOL_GPL(rcu_note_context_switch);
  420. /*
  421. * Register a quiescent state for all RCU flavors. If there is an
  422. * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
  423. * dyntick-idle quiescent state visible to other CPUs (but only for those
  424. * RCU flavors in desperate need of a quiescent state, which will normally
  425. * be none of them). Either way, do a lightweight quiescent state for
  426. * all RCU flavors.
  427. *
  428. * The barrier() calls are redundant in the common case when this is
  429. * called externally, but just in case this is called from within this
  430. * file.
  431. *
  432. */
  433. void rcu_all_qs(void)
  434. {
  435. unsigned long flags;
  436. if (!raw_cpu_read(rcu_dynticks.rcu_urgent_qs))
  437. return;
  438. preempt_disable();
  439. /* Load rcu_urgent_qs before other flags. */
  440. if (!smp_load_acquire(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs))) {
  441. preempt_enable();
  442. return;
  443. }
  444. this_cpu_write(rcu_dynticks.rcu_urgent_qs, false);
  445. barrier(); /* Avoid RCU read-side critical sections leaking down. */
  446. if (unlikely(raw_cpu_read(rcu_dynticks.rcu_need_heavy_qs))) {
  447. local_irq_save(flags);
  448. rcu_momentary_dyntick_idle();
  449. local_irq_restore(flags);
  450. }
  451. if (unlikely(raw_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)))
  452. rcu_sched_qs();
  453. this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
  454. barrier(); /* Avoid RCU read-side critical sections leaking up. */
  455. preempt_enable();
  456. }
  457. EXPORT_SYMBOL_GPL(rcu_all_qs);
  458. #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch. */
  459. static long blimit = DEFAULT_RCU_BLIMIT;
  460. #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
  461. static long qhimark = DEFAULT_RCU_QHIMARK;
  462. #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */
  463. static long qlowmark = DEFAULT_RCU_QLOMARK;
  464. module_param(blimit, long, 0444);
  465. module_param(qhimark, long, 0444);
  466. module_param(qlowmark, long, 0444);
  467. static ulong jiffies_till_first_fqs = ULONG_MAX;
  468. static ulong jiffies_till_next_fqs = ULONG_MAX;
  469. static bool rcu_kick_kthreads;
  470. module_param(jiffies_till_first_fqs, ulong, 0644);
  471. module_param(jiffies_till_next_fqs, ulong, 0644);
  472. module_param(rcu_kick_kthreads, bool, 0644);
  473. /*
  474. * How long the grace period must be before we start recruiting
  475. * quiescent-state help from rcu_note_context_switch().
  476. */
  477. static ulong jiffies_till_sched_qs = HZ / 10;
  478. module_param(jiffies_till_sched_qs, ulong, 0444);
  479. static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp));
  480. static void force_quiescent_state(struct rcu_state *rsp);
  481. static int rcu_pending(void);
  482. /*
  483. * Return the number of RCU batches started thus far for debug & stats.
  484. */
  485. unsigned long rcu_batches_started(void)
  486. {
  487. return rcu_state_p->gpnum;
  488. }
  489. EXPORT_SYMBOL_GPL(rcu_batches_started);
  490. /*
  491. * Return the number of RCU-sched batches started thus far for debug & stats.
  492. */
  493. unsigned long rcu_batches_started_sched(void)
  494. {
  495. return rcu_sched_state.gpnum;
  496. }
  497. EXPORT_SYMBOL_GPL(rcu_batches_started_sched);
  498. /*
  499. * Return the number of RCU BH batches started thus far for debug & stats.
  500. */
  501. unsigned long rcu_batches_started_bh(void)
  502. {
  503. return rcu_bh_state.gpnum;
  504. }
  505. EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
  506. /*
  507. * Return the number of RCU batches completed thus far for debug & stats.
  508. */
  509. unsigned long rcu_batches_completed(void)
  510. {
  511. return rcu_state_p->completed;
  512. }
  513. EXPORT_SYMBOL_GPL(rcu_batches_completed);
  514. /*
  515. * Return the number of RCU-sched batches completed thus far for debug & stats.
  516. */
  517. unsigned long rcu_batches_completed_sched(void)
  518. {
  519. return rcu_sched_state.completed;
  520. }
  521. EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
  522. /*
  523. * Return the number of RCU BH batches completed thus far for debug & stats.
  524. */
  525. unsigned long rcu_batches_completed_bh(void)
  526. {
  527. return rcu_bh_state.completed;
  528. }
  529. EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
  530. /*
  531. * Return the number of RCU expedited batches completed thus far for
  532. * debug & stats. Odd numbers mean that a batch is in progress, even
  533. * numbers mean idle. The value returned will thus be roughly double
  534. * the cumulative batches since boot.
  535. */
  536. unsigned long rcu_exp_batches_completed(void)
  537. {
  538. return rcu_state_p->expedited_sequence;
  539. }
  540. EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);
  541. /*
  542. * Return the number of RCU-sched expedited batches completed thus far
  543. * for debug & stats. Similar to rcu_exp_batches_completed().
  544. */
  545. unsigned long rcu_exp_batches_completed_sched(void)
  546. {
  547. return rcu_sched_state.expedited_sequence;
  548. }
  549. EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
  550. /*
  551. * Force a quiescent state.
  552. */
  553. void rcu_force_quiescent_state(void)
  554. {
  555. force_quiescent_state(rcu_state_p);
  556. }
  557. EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
  558. /*
  559. * Force a quiescent state for RCU BH.
  560. */
  561. void rcu_bh_force_quiescent_state(void)
  562. {
  563. force_quiescent_state(&rcu_bh_state);
  564. }
  565. EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
  566. /*
  567. * Force a quiescent state for RCU-sched.
  568. */
  569. void rcu_sched_force_quiescent_state(void)
  570. {
  571. force_quiescent_state(&rcu_sched_state);
  572. }
  573. EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state);
  574. /*
  575. * Show the state of the grace-period kthreads.
  576. */
  577. void show_rcu_gp_kthreads(void)
  578. {
  579. struct rcu_state *rsp;
  580. for_each_rcu_flavor(rsp) {
  581. pr_info("%s: wait state: %d ->state: %#lx\n",
  582. rsp->name, rsp->gp_state, rsp->gp_kthread->state);
  583. /* sched_show_task(rsp->gp_kthread); */
  584. }
  585. }
  586. EXPORT_SYMBOL_GPL(show_rcu_gp_kthreads);
  587. /*
  588. * Record the number of times rcutorture tests have been initiated and
  589. * terminated. This information allows the debugfs tracing stats to be
  590. * correlated to the rcutorture messages, even when the rcutorture module
  591. * is being repeatedly loaded and unloaded. In other words, we cannot
  592. * store this state in rcutorture itself.
  593. */
  594. void rcutorture_record_test_transition(void)
  595. {
  596. rcutorture_testseq++;
  597. rcutorture_vernum = 0;
  598. }
  599. EXPORT_SYMBOL_GPL(rcutorture_record_test_transition);
  600. /*
  601. * Send along grace-period-related data for rcutorture diagnostics.
  602. */
  603. void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
  604. unsigned long *gpnum, unsigned long *completed)
  605. {
  606. struct rcu_state *rsp = NULL;
  607. switch (test_type) {
  608. case RCU_FLAVOR:
  609. rsp = rcu_state_p;
  610. break;
  611. case RCU_BH_FLAVOR:
  612. rsp = &rcu_bh_state;
  613. break;
  614. case RCU_SCHED_FLAVOR:
  615. rsp = &rcu_sched_state;
  616. break;
  617. default:
  618. break;
  619. }
  620. if (rsp == NULL)
  621. return;
  622. *flags = READ_ONCE(rsp->gp_flags);
  623. *gpnum = READ_ONCE(rsp->gpnum);
  624. *completed = READ_ONCE(rsp->completed);
  625. }
  626. EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
  627. /*
  628. * Record the number of writer passes through the current rcutorture test.
  629. * This is also used to correlate debugfs tracing stats with the rcutorture
  630. * messages.
  631. */
  632. void rcutorture_record_progress(unsigned long vernum)
  633. {
  634. rcutorture_vernum++;
  635. }
  636. EXPORT_SYMBOL_GPL(rcutorture_record_progress);
  637. /*
  638. * Return the root node of the specified rcu_state structure.
  639. */
  640. static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
  641. {
  642. return &rsp->node[0];
  643. }
  644. /*
  645. * Enter an RCU extended quiescent state, which can be either the
  646. * idle loop or adaptive-tickless usermode execution.
  647. *
  648. * We crowbar the ->dynticks_nmi_nesting field to zero to allow for
  649. * the possibility of usermode upcalls having messed up our count
  650. * of interrupt nesting level during the prior busy period.
  651. */
  652. static void rcu_eqs_enter(bool user)
  653. {
  654. struct rcu_state *rsp;
  655. struct rcu_data *rdp;
  656. struct rcu_dynticks *rdtp;
  657. rdtp = this_cpu_ptr(&rcu_dynticks);
  658. WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0);
  659. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
  660. rdtp->dynticks_nesting == 0);
  661. if (rdtp->dynticks_nesting != 1) {
  662. rdtp->dynticks_nesting--;
  663. return;
  664. }
  665. lockdep_assert_irqs_disabled();
  666. trace_rcu_dyntick(TPS("Start"), rdtp->dynticks_nesting, 0, rdtp->dynticks);
  667. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
  668. for_each_rcu_flavor(rsp) {
  669. rdp = this_cpu_ptr(rsp->rda);
  670. do_nocb_deferred_wakeup(rdp);
  671. }
  672. rcu_prepare_for_idle();
  673. WRITE_ONCE(rdtp->dynticks_nesting, 0); /* Avoid irq-access tearing. */
  674. rcu_dynticks_eqs_enter();
  675. rcu_dynticks_task_enter();
  676. }
  677. /**
  678. * rcu_idle_enter - inform RCU that current CPU is entering idle
  679. *
  680. * Enter idle mode, in other words, -leave- the mode in which RCU
  681. * read-side critical sections can occur. (Though RCU read-side
  682. * critical sections can occur in irq handlers in idle, a possibility
  683. * handled by irq_enter() and irq_exit().)
  684. *
  685. * If you add or remove a call to rcu_idle_enter(), be sure to test with
  686. * CONFIG_RCU_EQS_DEBUG=y.
  687. */
  688. void rcu_idle_enter(void)
  689. {
  690. lockdep_assert_irqs_disabled();
  691. rcu_eqs_enter(false);
  692. }
  693. #ifdef CONFIG_NO_HZ_FULL
  694. /**
  695. * rcu_user_enter - inform RCU that we are resuming userspace.
  696. *
  697. * Enter RCU idle mode right before resuming userspace. No use of RCU
  698. * is permitted between this call and rcu_user_exit(). This way the
  699. * CPU doesn't need to maintain the tick for RCU maintenance purposes
  700. * when the CPU runs in userspace.
  701. *
  702. * If you add or remove a call to rcu_user_enter(), be sure to test with
  703. * CONFIG_RCU_EQS_DEBUG=y.
  704. */
  705. void rcu_user_enter(void)
  706. {
  707. lockdep_assert_irqs_disabled();
  708. rcu_eqs_enter(true);
  709. }
  710. #endif /* CONFIG_NO_HZ_FULL */
  711. /**
  712. * rcu_nmi_exit - inform RCU of exit from NMI context
  713. *
  714. * If we are returning from the outermost NMI handler that interrupted an
  715. * RCU-idle period, update rdtp->dynticks and rdtp->dynticks_nmi_nesting
  716. * to let the RCU grace-period handling know that the CPU is back to
  717. * being RCU-idle.
  718. *
  719. * If you add or remove a call to rcu_nmi_exit(), be sure to test
  720. * with CONFIG_RCU_EQS_DEBUG=y.
  721. */
  722. void rcu_nmi_exit(void)
  723. {
  724. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  725. /*
  726. * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
  727. * (We are exiting an NMI handler, so RCU better be paying attention
  728. * to us!)
  729. */
  730. WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
  731. WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());
  732. /*
  733. * If the nesting level is not 1, the CPU wasn't RCU-idle, so
  734. * leave it in non-RCU-idle state.
  735. */
  736. if (rdtp->dynticks_nmi_nesting != 1) {
  737. trace_rcu_dyntick(TPS("--="), rdtp->dynticks_nmi_nesting, rdtp->dynticks_nmi_nesting - 2, rdtp->dynticks);
  738. WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* No store tearing. */
  739. rdtp->dynticks_nmi_nesting - 2);
  740. return;
  741. }
  742. /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
  743. trace_rcu_dyntick(TPS("Startirq"), rdtp->dynticks_nmi_nesting, 0, rdtp->dynticks);
  744. WRITE_ONCE(rdtp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
  745. rcu_dynticks_eqs_enter();
  746. }
  747. /**
  748. * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle
  749. *
  750. * Exit from an interrupt handler, which might possibly result in entering
  751. * idle mode, in other words, leaving the mode in which read-side critical
  752. * sections can occur. The caller must have disabled interrupts.
  753. *
  754. * This code assumes that the idle loop never does anything that might
  755. * result in unbalanced calls to irq_enter() and irq_exit(). If your
  756. * architecture's idle loop violates this assumption, RCU will give you what
  757. * you deserve, good and hard. But very infrequently and irreproducibly.
  758. *
  759. * Use things like work queues to work around this limitation.
  760. *
  761. * You have been warned.
  762. *
  763. * If you add or remove a call to rcu_irq_exit(), be sure to test with
  764. * CONFIG_RCU_EQS_DEBUG=y.
  765. */
  766. void rcu_irq_exit(void)
  767. {
  768. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  769. lockdep_assert_irqs_disabled();
  770. if (rdtp->dynticks_nmi_nesting == 1)
  771. rcu_prepare_for_idle();
  772. rcu_nmi_exit();
  773. if (rdtp->dynticks_nmi_nesting == 0)
  774. rcu_dynticks_task_enter();
  775. }
  776. /*
  777. * Wrapper for rcu_irq_exit() where interrupts are enabled.
  778. *
  779. * If you add or remove a call to rcu_irq_exit_irqson(), be sure to test
  780. * with CONFIG_RCU_EQS_DEBUG=y.
  781. */
  782. void rcu_irq_exit_irqson(void)
  783. {
  784. unsigned long flags;
  785. local_irq_save(flags);
  786. rcu_irq_exit();
  787. local_irq_restore(flags);
  788. }
  789. /*
  790. * Exit an RCU extended quiescent state, which can be either the
  791. * idle loop or adaptive-tickless usermode execution.
  792. *
  793. * We crowbar the ->dynticks_nmi_nesting field to DYNTICK_IRQ_NONIDLE to
  794. * allow for the possibility of usermode upcalls messing up our count of
  795. * interrupt nesting level during the busy period that is just now starting.
  796. */
  797. static void rcu_eqs_exit(bool user)
  798. {
  799. struct rcu_dynticks *rdtp;
  800. long oldval;
  801. lockdep_assert_irqs_disabled();
  802. rdtp = this_cpu_ptr(&rcu_dynticks);
  803. oldval = rdtp->dynticks_nesting;
  804. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
  805. if (oldval) {
  806. rdtp->dynticks_nesting++;
  807. return;
  808. }
  809. rcu_dynticks_task_exit();
  810. rcu_dynticks_eqs_exit();
  811. rcu_cleanup_after_idle();
  812. trace_rcu_dyntick(TPS("End"), rdtp->dynticks_nesting, 1, rdtp->dynticks);
  813. WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !user && !is_idle_task(current));
  814. WRITE_ONCE(rdtp->dynticks_nesting, 1);
  815. WRITE_ONCE(rdtp->dynticks_nmi_nesting, DYNTICK_IRQ_NONIDLE);
  816. }
  817. /**
  818. * rcu_idle_exit - inform RCU that current CPU is leaving idle
  819. *
  820. * Exit idle mode, in other words, -enter- the mode in which RCU
  821. * read-side critical sections can occur.
  822. *
  823. * If you add or remove a call to rcu_idle_exit(), be sure to test with
  824. * CONFIG_RCU_EQS_DEBUG=y.
  825. */
  826. void rcu_idle_exit(void)
  827. {
  828. unsigned long flags;
  829. local_irq_save(flags);
  830. rcu_eqs_exit(false);
  831. local_irq_restore(flags);
  832. }
  833. #ifdef CONFIG_NO_HZ_FULL
  834. /**
  835. * rcu_user_exit - inform RCU that we are exiting userspace.
  836. *
  837. * Exit RCU idle mode while entering the kernel because it can
  838. * run a RCU read side critical section anytime.
  839. *
  840. * If you add or remove a call to rcu_user_exit(), be sure to test with
  841. * CONFIG_RCU_EQS_DEBUG=y.
  842. */
  843. void rcu_user_exit(void)
  844. {
  845. rcu_eqs_exit(1);
  846. }
  847. #endif /* CONFIG_NO_HZ_FULL */
  848. /**
  849. * rcu_nmi_enter - inform RCU of entry to NMI context
  850. *
  851. * If the CPU was idle from RCU's viewpoint, update rdtp->dynticks and
  852. * rdtp->dynticks_nmi_nesting to let the RCU grace-period handling know
  853. * that the CPU is active. This implementation permits nested NMIs, as
  854. * long as the nesting level does not overflow an int. (You will probably
  855. * run out of stack space first.)
  856. *
  857. * If you add or remove a call to rcu_nmi_enter(), be sure to test
  858. * with CONFIG_RCU_EQS_DEBUG=y.
  859. */
  860. void rcu_nmi_enter(void)
  861. {
  862. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  863. long incby = 2;
  864. /* Complain about underflow. */
  865. WARN_ON_ONCE(rdtp->dynticks_nmi_nesting < 0);
  866. /*
  867. * If idle from RCU viewpoint, atomically increment ->dynticks
  868. * to mark non-idle and increment ->dynticks_nmi_nesting by one.
  869. * Otherwise, increment ->dynticks_nmi_nesting by two. This means
  870. * if ->dynticks_nmi_nesting is equal to one, we are guaranteed
  871. * to be in the outermost NMI handler that interrupted an RCU-idle
  872. * period (observation due to Andy Lutomirski).
  873. */
  874. if (rcu_dynticks_curr_cpu_in_eqs()) {
  875. rcu_dynticks_eqs_exit();
  876. incby = 1;
  877. }
  878. trace_rcu_dyntick(incby == 1 ? TPS("Endirq") : TPS("++="),
  879. rdtp->dynticks_nmi_nesting,
  880. rdtp->dynticks_nmi_nesting + incby, rdtp->dynticks);
  881. WRITE_ONCE(rdtp->dynticks_nmi_nesting, /* Prevent store tearing. */
  882. rdtp->dynticks_nmi_nesting + incby);
  883. barrier();
  884. }
  885. /**
  886. * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle
  887. *
  888. * Enter an interrupt handler, which might possibly result in exiting
  889. * idle mode, in other words, entering the mode in which read-side critical
  890. * sections can occur. The caller must have disabled interrupts.
  891. *
  892. * Note that the Linux kernel is fully capable of entering an interrupt
  893. * handler that it never exits, for example when doing upcalls to user mode!
  894. * This code assumes that the idle loop never does upcalls to user mode.
  895. * If your architecture's idle loop does do upcalls to user mode (or does
  896. * anything else that results in unbalanced calls to the irq_enter() and
  897. * irq_exit() functions), RCU will give you what you deserve, good and hard.
  898. * But very infrequently and irreproducibly.
  899. *
  900. * Use things like work queues to work around this limitation.
  901. *
  902. * You have been warned.
  903. *
  904. * If you add or remove a call to rcu_irq_enter(), be sure to test with
  905. * CONFIG_RCU_EQS_DEBUG=y.
  906. */
  907. void rcu_irq_enter(void)
  908. {
  909. struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
  910. lockdep_assert_irqs_disabled();
  911. if (rdtp->dynticks_nmi_nesting == 0)
  912. rcu_dynticks_task_exit();
  913. rcu_nmi_enter();
  914. if (rdtp->dynticks_nmi_nesting == 1)
  915. rcu_cleanup_after_idle();
  916. }
  917. /*
  918. * Wrapper for rcu_irq_enter() where interrupts are enabled.
  919. *
  920. * If you add or remove a call to rcu_irq_enter_irqson(), be sure to test
  921. * with CONFIG_RCU_EQS_DEBUG=y.
  922. */
  923. void rcu_irq_enter_irqson(void)
  924. {
  925. unsigned long flags;
  926. local_irq_save(flags);
  927. rcu_irq_enter();
  928. local_irq_restore(flags);
  929. }
  930. /**
  931. * rcu_is_watching - see if RCU thinks that the current CPU is idle
  932. *
  933. * Return true if RCU is watching the running CPU, which means that this
  934. * CPU can safely enter RCU read-side critical sections. In other words,
  935. * if the current CPU is in its idle loop and is neither in an interrupt
  936. * or NMI handler, return true.
  937. */
  938. bool notrace rcu_is_watching(void)
  939. {
  940. bool ret;
  941. preempt_disable_notrace();
  942. ret = !rcu_dynticks_curr_cpu_in_eqs();
  943. preempt_enable_notrace();
  944. return ret;
  945. }
  946. EXPORT_SYMBOL_GPL(rcu_is_watching);
  947. /*
  948. * If a holdout task is actually running, request an urgent quiescent
  949. * state from its CPU. This is unsynchronized, so migrations can cause
  950. * the request to go to the wrong CPU. Which is OK, all that will happen
  951. * is that the CPU's next context switch will be a bit slower and next
  952. * time around this task will generate another request.
  953. */
  954. void rcu_request_urgent_qs_task(struct task_struct *t)
  955. {
  956. int cpu;
  957. barrier();
  958. cpu = task_cpu(t);
  959. if (!task_curr(t))
  960. return; /* This task is not running on that CPU. */
  961. smp_store_release(per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, cpu), true);
  962. }
  963. #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU)
  964. /*
  965. * Is the current CPU online? Disable preemption to avoid false positives
  966. * that could otherwise happen due to the current CPU number being sampled,
  967. * this task being preempted, its old CPU being taken offline, resuming
  968. * on some other CPU, then determining that its old CPU is now offline.
  969. * It is OK to use RCU on an offline processor during initial boot, hence
  970. * the check for rcu_scheduler_fully_active. Note also that it is OK
  971. * for a CPU coming online to use RCU for one jiffy prior to marking itself
  972. * online in the cpu_online_mask. Similarly, it is OK for a CPU going
  973. * offline to continue to use RCU for one jiffy after marking itself
  974. * offline in the cpu_online_mask. This leniency is necessary given the
  975. * non-atomic nature of the online and offline processing, for example,
  976. * the fact that a CPU enters the scheduler after completing the teardown
  977. * of the CPU.
  978. *
  979. * This is also why RCU internally marks CPUs online during in the
  980. * preparation phase and offline after the CPU has been taken down.
  981. *
  982. * Disable checking if in an NMI handler because we cannot safely report
  983. * errors from NMI handlers anyway.
  984. */
  985. bool rcu_lockdep_current_cpu_online(void)
  986. {
  987. struct rcu_data *rdp;
  988. struct rcu_node *rnp;
  989. bool ret;
  990. if (in_nmi())
  991. return true;
  992. preempt_disable();
  993. rdp = this_cpu_ptr(&rcu_sched_data);
  994. rnp = rdp->mynode;
  995. ret = (rdp->grpmask & rcu_rnp_online_cpus(rnp)) ||
  996. !rcu_scheduler_fully_active;
  997. preempt_enable();
  998. return ret;
  999. }
  1000. EXPORT_SYMBOL_GPL(rcu_lockdep_current_cpu_online);
  1001. #endif /* #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) */
  1002. /**
  1003. * rcu_is_cpu_rrupt_from_idle - see if idle or immediately interrupted from idle
  1004. *
  1005. * If the current CPU is idle or running at a first-level (not nested)
  1006. * interrupt from idle, return true. The caller must have at least
  1007. * disabled preemption.
  1008. */
  1009. static int rcu_is_cpu_rrupt_from_idle(void)
  1010. {
  1011. return __this_cpu_read(rcu_dynticks.dynticks_nesting) <= 0 &&
  1012. __this_cpu_read(rcu_dynticks.dynticks_nmi_nesting) <= 1;
  1013. }
  1014. /*
  1015. * We are reporting a quiescent state on behalf of some other CPU, so
  1016. * it is our responsibility to check for and handle potential overflow
  1017. * of the rcu_node ->gpnum counter with respect to the rcu_data counters.
  1018. * After all, the CPU might be in deep idle state, and thus executing no
  1019. * code whatsoever.
  1020. */
  1021. static void rcu_gpnum_ovf(struct rcu_node *rnp, struct rcu_data *rdp)
  1022. {
  1023. raw_lockdep_assert_held_rcu_node(rnp);
  1024. if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4, rnp->gpnum))
  1025. WRITE_ONCE(rdp->gpwrap, true);
  1026. if (ULONG_CMP_LT(rdp->rcu_iw_gpnum + ULONG_MAX / 4, rnp->gpnum))
  1027. rdp->rcu_iw_gpnum = rnp->gpnum + ULONG_MAX / 4;
  1028. }
  1029. /*
  1030. * Snapshot the specified CPU's dynticks counter so that we can later
  1031. * credit them with an implicit quiescent state. Return 1 if this CPU
  1032. * is in dynticks idle mode, which is an extended quiescent state.
  1033. */
  1034. static int dyntick_save_progress_counter(struct rcu_data *rdp)
  1035. {
  1036. rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
  1037. if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
  1038. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
  1039. rcu_gpnum_ovf(rdp->mynode, rdp);
  1040. return 1;
  1041. }
  1042. return 0;
  1043. }
  1044. /*
  1045. * Handler for the irq_work request posted when a grace period has
  1046. * gone on for too long, but not yet long enough for an RCU CPU
  1047. * stall warning. Set state appropriately, but just complain if
  1048. * there is unexpected state on entry.
  1049. */
  1050. static void rcu_iw_handler(struct irq_work *iwp)
  1051. {
  1052. struct rcu_data *rdp;
  1053. struct rcu_node *rnp;
  1054. rdp = container_of(iwp, struct rcu_data, rcu_iw);
  1055. rnp = rdp->mynode;
  1056. raw_spin_lock_rcu_node(rnp);
  1057. if (!WARN_ON_ONCE(!rdp->rcu_iw_pending)) {
  1058. rdp->rcu_iw_gpnum = rnp->gpnum;
  1059. rdp->rcu_iw_pending = false;
  1060. }
  1061. raw_spin_unlock_rcu_node(rnp);
  1062. }
  1063. /*
  1064. * Return true if the specified CPU has passed through a quiescent
  1065. * state by virtue of being in or having passed through an dynticks
  1066. * idle state since the last call to dyntick_save_progress_counter()
  1067. * for this same CPU, or by virtue of having been offline.
  1068. */
  1069. static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
  1070. {
  1071. unsigned long jtsq;
  1072. bool *rnhqp;
  1073. bool *ruqp;
  1074. struct rcu_node *rnp = rdp->mynode;
  1075. /*
  1076. * If the CPU passed through or entered a dynticks idle phase with
  1077. * no active irq/NMI handlers, then we can safely pretend that the CPU
  1078. * already acknowledged the request to pass through a quiescent
  1079. * state. Either way, that CPU cannot possibly be in an RCU
  1080. * read-side critical section that started before the beginning
  1081. * of the current RCU grace period.
  1082. */
  1083. if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
  1084. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
  1085. rdp->dynticks_fqs++;
  1086. rcu_gpnum_ovf(rnp, rdp);
  1087. return 1;
  1088. }
  1089. /*
  1090. * Has this CPU encountered a cond_resched() since the beginning
  1091. * of the grace period? For this to be the case, the CPU has to
  1092. * have noticed the current grace period. This might not be the
  1093. * case for nohz_full CPUs looping in the kernel.
  1094. */
  1095. jtsq = jiffies_till_sched_qs;
  1096. ruqp = per_cpu_ptr(&rcu_dynticks.rcu_urgent_qs, rdp->cpu);
  1097. if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
  1098. READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
  1099. READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
  1100. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
  1101. rcu_gpnum_ovf(rnp, rdp);
  1102. return 1;
  1103. } else if (time_after(jiffies, rdp->rsp->gp_start + jtsq)) {
  1104. /* Load rcu_qs_ctr before store to rcu_urgent_qs. */
  1105. smp_store_release(ruqp, true);
  1106. }
  1107. /* Check for the CPU being offline. */
  1108. if (!(rdp->grpmask & rcu_rnp_online_cpus(rnp))) {
  1109. trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("ofl"));
  1110. rdp->offline_fqs++;
  1111. rcu_gpnum_ovf(rnp, rdp);
  1112. return 1;
  1113. }
  1114. /*
  1115. * A CPU running for an extended time within the kernel can
  1116. * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
  1117. * even context-switching back and forth between a pair of
  1118. * in-kernel CPU-bound tasks cannot advance grace periods.
  1119. * So if the grace period is old enough, make the CPU pay attention.
  1120. * Note that the unsynchronized assignments to the per-CPU
  1121. * rcu_need_heavy_qs variable are safe. Yes, setting of
  1122. * bits can be lost, but they will be set again on the next
  1123. * force-quiescent-state pass. So lost bit sets do not result
  1124. * in incorrect behavior, merely in a grace period lasting
  1125. * a few jiffies longer than it might otherwise. Because
  1126. * there are at most four threads involved, and because the
  1127. * updates are only once every few jiffies, the probability of
  1128. * lossage (and thus of slight grace-period extension) is
  1129. * quite low.
  1130. */
  1131. rnhqp = &per_cpu(rcu_dynticks.rcu_need_heavy_qs, rdp->cpu);
  1132. if (!READ_ONCE(*rnhqp) &&
  1133. (time_after(jiffies, rdp->rsp->gp_start + jtsq) ||
  1134. time_after(jiffies, rdp->rsp->jiffies_resched))) {
  1135. WRITE_ONCE(*rnhqp, true);
  1136. /* Store rcu_need_heavy_qs before rcu_urgent_qs. */
  1137. smp_store_release(ruqp, true);
  1138. rdp->rsp->jiffies_resched += jtsq; /* Re-enable beating. */
  1139. }
  1140. /*
  1141. * If more than halfway to RCU CPU stall-warning time, do a
  1142. * resched_cpu() to try to loosen things up a bit. Also check to
  1143. * see if the CPU is getting hammered with interrupts, but only
  1144. * once per grace period, just to keep the IPIs down to a dull roar.
  1145. */
  1146. if (jiffies - rdp->rsp->gp_start > rcu_jiffies_till_stall_check() / 2) {
  1147. resched_cpu(rdp->cpu);
  1148. if (IS_ENABLED(CONFIG_IRQ_WORK) &&
  1149. !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum &&
  1150. (rnp->ffmask & rdp->grpmask)) {
  1151. init_irq_work(&rdp->rcu_iw, rcu_iw_handler);
  1152. rdp->rcu_iw_pending = true;
  1153. rdp->rcu_iw_gpnum = rnp->gpnum;
  1154. irq_work_queue_on(&rdp->rcu_iw, rdp->cpu);
  1155. }
  1156. }
  1157. return 0;
  1158. }
  1159. static void record_gp_stall_check_time(struct rcu_state *rsp)
  1160. {
  1161. unsigned long j = jiffies;
  1162. unsigned long j1;
  1163. rsp->gp_start = j;
  1164. smp_wmb(); /* Record start time before stall time. */
  1165. j1 = rcu_jiffies_till_stall_check();
  1166. WRITE_ONCE(rsp->jiffies_stall, j + j1);
  1167. rsp->jiffies_resched = j + j1 / 2;
  1168. rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
  1169. }
  1170. /*
  1171. * Convert a ->gp_state value to a character string.
  1172. */
  1173. static const char *gp_state_getname(short gs)
  1174. {
  1175. if (gs < 0 || gs >= ARRAY_SIZE(gp_state_names))
  1176. return "???";
  1177. return gp_state_names[gs];
  1178. }
  1179. /*
  1180. * Complain about starvation of grace-period kthread.
  1181. */
  1182. static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
  1183. {
  1184. unsigned long gpa;
  1185. unsigned long j;
  1186. j = jiffies;
  1187. gpa = READ_ONCE(rsp->gp_activity);
  1188. if (j - gpa > 2 * HZ) {
  1189. pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x %s(%d) ->state=%#lx ->cpu=%d\n",
  1190. rsp->name, j - gpa,
  1191. rsp->gpnum, rsp->completed,
  1192. rsp->gp_flags,
  1193. gp_state_getname(rsp->gp_state), rsp->gp_state,
  1194. rsp->gp_kthread ? rsp->gp_kthread->state : ~0,
  1195. rsp->gp_kthread ? task_cpu(rsp->gp_kthread) : -1);
  1196. if (rsp->gp_kthread) {
  1197. pr_err("RCU grace-period kthread stack dump:\n");
  1198. sched_show_task(rsp->gp_kthread);
  1199. wake_up_process(rsp->gp_kthread);
  1200. }
  1201. }
  1202. }
  1203. /*
  1204. * Dump stacks of all tasks running on stalled CPUs. First try using
  1205. * NMIs, but fall back to manual remote stack tracing on architectures
  1206. * that don't support NMI-based stack dumps. The NMI-triggered stack
  1207. * traces are more accurate because they are printed by the target CPU.
  1208. */
  1209. static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
  1210. {
  1211. int cpu;
  1212. unsigned long flags;
  1213. struct rcu_node *rnp;
  1214. rcu_for_each_leaf_node(rsp, rnp) {
  1215. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  1216. for_each_leaf_node_possible_cpu(rnp, cpu)
  1217. if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu))
  1218. if (!trigger_single_cpu_backtrace(cpu))
  1219. dump_cpu_task(cpu);
  1220. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  1221. }
  1222. }
  1223. /*
  1224. * If too much time has passed in the current grace period, and if
  1225. * so configured, go kick the relevant kthreads.
  1226. */
  1227. static void rcu_stall_kick_kthreads(struct rcu_state *rsp)
  1228. {
  1229. unsigned long j;
  1230. if (!rcu_kick_kthreads)
  1231. return;
  1232. j = READ_ONCE(rsp->jiffies_kick_kthreads);
  1233. if (time_after(jiffies, j) && rsp->gp_kthread &&
  1234. (rcu_gp_in_progress(rsp) || READ_ONCE(rsp->gp_flags))) {
  1235. WARN_ONCE(1, "Kicking %s grace-period kthread\n", rsp->name);
  1236. rcu_ftrace_dump(DUMP_ALL);
  1237. wake_up_process(rsp->gp_kthread);
  1238. WRITE_ONCE(rsp->jiffies_kick_kthreads, j + HZ);
  1239. }
  1240. }
  1241. static inline void panic_on_rcu_stall(void)
  1242. {
  1243. if (sysctl_panic_on_rcu_stall)
  1244. panic("RCU Stall\n");
  1245. }
  1246. static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
  1247. {
  1248. int cpu;
  1249. long delta;
  1250. unsigned long flags;
  1251. unsigned long gpa;
  1252. unsigned long j;
  1253. int ndetected = 0;
  1254. struct rcu_node *rnp = rcu_get_root(rsp);
  1255. long totqlen = 0;
  1256. /* Kick and suppress, if so configured. */
  1257. rcu_stall_kick_kthreads(rsp);
  1258. if (rcu_cpu_stall_suppress)
  1259. return;
  1260. /* Only let one CPU complain about others per time interval. */
  1261. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  1262. delta = jiffies - READ_ONCE(rsp->jiffies_stall);
  1263. if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
  1264. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  1265. return;
  1266. }
  1267. WRITE_ONCE(rsp->jiffies_stall,
  1268. jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
  1269. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  1270. /*
  1271. * OK, time to rat on our buddy...
  1272. * See Documentation/RCU/stallwarn.txt for info on how to debug
  1273. * RCU CPU stall warnings.
  1274. */
  1275. pr_err("INFO: %s detected stalls on CPUs/tasks:",
  1276. rsp->name);
  1277. print_cpu_stall_info_begin();
  1278. rcu_for_each_leaf_node(rsp, rnp) {
  1279. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  1280. ndetected += rcu_print_task_stall(rnp);
  1281. if (rnp->qsmask != 0) {
  1282. for_each_leaf_node_possible_cpu(rnp, cpu)
  1283. if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
  1284. print_cpu_stall_info(rsp, cpu);
  1285. ndetected++;
  1286. }
  1287. }
  1288. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  1289. }
  1290. print_cpu_stall_info_end();
  1291. for_each_possible_cpu(cpu)
  1292. totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
  1293. cpu)->cblist);
  1294. pr_cont("(detected by %d, t=%ld jiffies, g=%ld, c=%ld, q=%lu)\n",
  1295. smp_processor_id(), (long)(jiffies - rsp->gp_start),
  1296. (long)rsp->gpnum, (long)rsp->completed, totqlen);
  1297. if (ndetected) {
  1298. rcu_dump_cpu_stacks(rsp);
  1299. /* Complain about tasks blocking the grace period. */
  1300. rcu_print_detail_task_stall(rsp);
  1301. } else {
  1302. if (READ_ONCE(rsp->gpnum) != gpnum ||
  1303. READ_ONCE(rsp->completed) == gpnum) {
  1304. pr_err("INFO: Stall ended before state dump start\n");
  1305. } else {
  1306. j = jiffies;
  1307. gpa = READ_ONCE(rsp->gp_activity);
  1308. pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
  1309. rsp->name, j - gpa, j, gpa,
  1310. jiffies_till_next_fqs,
  1311. rcu_get_root(rsp)->qsmask);
  1312. /* In this case, the current CPU might be at fault. */
  1313. sched_show_task(current);
  1314. }
  1315. }
  1316. rcu_check_gp_kthread_starvation(rsp);
  1317. panic_on_rcu_stall();
  1318. force_quiescent_state(rsp); /* Kick them all. */
  1319. }
  1320. static void print_cpu_stall(struct rcu_state *rsp)
  1321. {
  1322. int cpu;
  1323. unsigned long flags;
  1324. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  1325. struct rcu_node *rnp = rcu_get_root(rsp);
  1326. long totqlen = 0;
  1327. /* Kick and suppress, if so configured. */
  1328. rcu_stall_kick_kthreads(rsp);
  1329. if (rcu_cpu_stall_suppress)
  1330. return;
  1331. /*
  1332. * OK, time to rat on ourselves...
  1333. * See Documentation/RCU/stallwarn.txt for info on how to debug
  1334. * RCU CPU stall warnings.
  1335. */
  1336. pr_err("INFO: %s self-detected stall on CPU", rsp->name);
  1337. print_cpu_stall_info_begin();
  1338. raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
  1339. print_cpu_stall_info(rsp, smp_processor_id());
  1340. raw_spin_unlock_irqrestore_rcu_node(rdp->mynode, flags);
  1341. print_cpu_stall_info_end();
  1342. for_each_possible_cpu(cpu)
  1343. totqlen += rcu_segcblist_n_cbs(&per_cpu_ptr(rsp->rda,
  1344. cpu)->cblist);
  1345. pr_cont(" (t=%lu jiffies g=%ld c=%ld q=%lu)\n",
  1346. jiffies - rsp->gp_start,
  1347. (long)rsp->gpnum, (long)rsp->completed, totqlen);
  1348. rcu_check_gp_kthread_starvation(rsp);
  1349. rcu_dump_cpu_stacks(rsp);
  1350. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  1351. if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
  1352. WRITE_ONCE(rsp->jiffies_stall,
  1353. jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
  1354. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  1355. panic_on_rcu_stall();
  1356. /*
  1357. * Attempt to revive the RCU machinery by forcing a context switch.
  1358. *
  1359. * A context switch would normally allow the RCU state machine to make
  1360. * progress and it could be we're stuck in kernel space without context
  1361. * switches for an entirely unreasonable amount of time.
  1362. */
  1363. resched_cpu(smp_processor_id());
  1364. }
  1365. static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
  1366. {
  1367. unsigned long completed;
  1368. unsigned long gpnum;
  1369. unsigned long gps;
  1370. unsigned long j;
  1371. unsigned long js;
  1372. struct rcu_node *rnp;
  1373. if ((rcu_cpu_stall_suppress && !rcu_kick_kthreads) ||
  1374. !rcu_gp_in_progress(rsp))
  1375. return;
  1376. rcu_stall_kick_kthreads(rsp);
  1377. j = jiffies;
  1378. /*
  1379. * Lots of memory barriers to reject false positives.
  1380. *
  1381. * The idea is to pick up rsp->gpnum, then rsp->jiffies_stall,
  1382. * then rsp->gp_start, and finally rsp->completed. These values
  1383. * are updated in the opposite order with memory barriers (or
  1384. * equivalent) during grace-period initialization and cleanup.
  1385. * Now, a false positive can occur if we get an new value of
  1386. * rsp->gp_start and a old value of rsp->jiffies_stall. But given
  1387. * the memory barriers, the only way that this can happen is if one
  1388. * grace period ends and another starts between these two fetches.
  1389. * Detect this by comparing rsp->completed with the previous fetch
  1390. * from rsp->gpnum.
  1391. *
  1392. * Given this check, comparisons of jiffies, rsp->jiffies_stall,
  1393. * and rsp->gp_start suffice to forestall false positives.
  1394. */
  1395. gpnum = READ_ONCE(rsp->gpnum);
  1396. smp_rmb(); /* Pick up ->gpnum first... */
  1397. js = READ_ONCE(rsp->jiffies_stall);
  1398. smp_rmb(); /* ...then ->jiffies_stall before the rest... */
  1399. gps = READ_ONCE(rsp->gp_start);
  1400. smp_rmb(); /* ...and finally ->gp_start before ->completed. */
  1401. completed = READ_ONCE(rsp->completed);
  1402. if (ULONG_CMP_GE(completed, gpnum) ||
  1403. ULONG_CMP_LT(j, js) ||
  1404. ULONG_CMP_GE(gps, js))
  1405. return; /* No stall or GP completed since entering function. */
  1406. rnp = rdp->mynode;
  1407. if (rcu_gp_in_progress(rsp) &&
  1408. (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
  1409. /* We haven't checked in, so go dump stack. */
  1410. print_cpu_stall(rsp);
  1411. } else if (rcu_gp_in_progress(rsp) &&
  1412. ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
  1413. /* They had a few time units to dump stack, so complain. */
  1414. print_other_cpu_stall(rsp, gpnum);
  1415. }
  1416. }
  1417. /**
  1418. * rcu_cpu_stall_reset - prevent further stall warnings in current grace period
  1419. *
  1420. * Set the stall-warning timeout way off into the future, thus preventing
  1421. * any RCU CPU stall-warning messages from appearing in the current set of
  1422. * RCU grace periods.
  1423. *
  1424. * The caller must disable hard irqs.
  1425. */
  1426. void rcu_cpu_stall_reset(void)
  1427. {
  1428. struct rcu_state *rsp;
  1429. for_each_rcu_flavor(rsp)
  1430. WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
  1431. }
  1432. /*
  1433. * Determine the value that ->completed will have at the end of the
  1434. * next subsequent grace period. This is used to tag callbacks so that
  1435. * a CPU can invoke callbacks in a timely fashion even if that CPU has
  1436. * been dyntick-idle for an extended period with callbacks under the
  1437. * influence of RCU_FAST_NO_HZ.
  1438. *
  1439. * The caller must hold rnp->lock with interrupts disabled.
  1440. */
  1441. static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
  1442. struct rcu_node *rnp)
  1443. {
  1444. raw_lockdep_assert_held_rcu_node(rnp);
  1445. /*
  1446. * If RCU is idle, we just wait for the next grace period.
  1447. * But we can only be sure that RCU is idle if we are looking
  1448. * at the root rcu_node structure -- otherwise, a new grace
  1449. * period might have started, but just not yet gotten around
  1450. * to initializing the current non-root rcu_node structure.
  1451. */
  1452. if (rcu_get_root(rsp) == rnp && rnp->gpnum == rnp->completed)
  1453. return rnp->completed + 1;
  1454. /*
  1455. * If the current rcu_node structure believes that RCU is
  1456. * idle, and if the rcu_state structure does not yet reflect
  1457. * the start of a new grace period, then the next grace period
  1458. * will suffice. The memory barrier is needed to accurately
  1459. * sample the rsp->gpnum, and pairs with the second lock
  1460. * acquisition in rcu_gp_init(), which is augmented with
  1461. * smp_mb__after_unlock_lock() for this purpose.
  1462. */
  1463. if (rnp->gpnum == rnp->completed) {
  1464. smp_mb(); /* See above block comment. */
  1465. if (READ_ONCE(rsp->gpnum) == rnp->completed)
  1466. return rnp->completed + 1;
  1467. }
  1468. /*
  1469. * Otherwise, wait for a possible partial grace period and
  1470. * then the subsequent full grace period.
  1471. */
  1472. return rnp->completed + 2;
  1473. }
  1474. /* Trace-event wrapper function for trace_rcu_future_grace_period. */
  1475. static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  1476. unsigned long c, const char *s)
  1477. {
  1478. trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
  1479. rnp->completed, c, rnp->level,
  1480. rnp->grplo, rnp->grphi, s);
  1481. }
  1482. /*
  1483. * Start the specified grace period, as needed to handle newly arrived
  1484. * callbacks. The required future grace periods are recorded in each
  1485. * rcu_node structure's ->need_future_gp[] field. Returns true if there
  1486. * is reason to awaken the grace-period kthread.
  1487. *
  1488. * The caller must hold the specified rcu_node structure's ->lock, which
  1489. * is why the caller is responsible for waking the grace-period kthread.
  1490. */
  1491. static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
  1492. unsigned long c)
  1493. {
  1494. bool ret = false;
  1495. struct rcu_state *rsp = rdp->rsp;
  1496. struct rcu_node *rnp_root;
  1497. /*
  1498. * Use funnel locking to either acquire the root rcu_node
  1499. * structure's lock or bail out if the need for this grace period
  1500. * has already been recorded -- or has already started. If there
  1501. * is already a grace period in progress in a non-leaf node, no
  1502. * recording is needed because the end of the grace period will
  1503. * scan the leaf rcu_node structures. Note that rnp->lock must
  1504. * not be released.
  1505. */
  1506. raw_lockdep_assert_held_rcu_node(rnp);
  1507. trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
  1508. for (rnp_root = rnp; 1; rnp_root = rnp_root->parent) {
  1509. if (rnp_root != rnp)
  1510. raw_spin_lock_rcu_node(rnp_root);
  1511. WARN_ON_ONCE(ULONG_CMP_LT(rnp_root->gpnum +
  1512. need_future_gp_mask(), c));
  1513. if (need_future_gp_element(rnp_root, c) ||
  1514. ULONG_CMP_GE(rnp_root->gpnum, c) ||
  1515. (rnp != rnp_root &&
  1516. rnp_root->gpnum != rnp_root->completed)) {
  1517. trace_rcu_this_gp(rnp_root, rdp, c, TPS("Prestarted"));
  1518. goto unlock_out;
  1519. }
  1520. need_future_gp_element(rnp_root, c) = true;
  1521. if (rnp_root != rnp && rnp_root->parent != NULL)
  1522. raw_spin_unlock_rcu_node(rnp_root);
  1523. if (!rnp_root->parent)
  1524. break; /* At root, and perhaps also leaf. */
  1525. }
  1526. /* If GP already in progress, just leave, otherwise start one. */
  1527. if (rnp_root->gpnum != rnp_root->completed) {
  1528. trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedleafroot"));
  1529. goto unlock_out;
  1530. }
  1531. trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
  1532. WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
  1533. if (!rsp->gp_kthread) {
  1534. trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
  1535. goto unlock_out;
  1536. }
  1537. trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum), TPS("newreq"));
  1538. ret = true; /* Caller must wake GP kthread. */
  1539. unlock_out:
  1540. if (rnp != rnp_root)
  1541. raw_spin_unlock_rcu_node(rnp_root);
  1542. return ret;
  1543. }
  1544. /*
  1545. * Clean up any old requests for the just-ended grace period. Also return
  1546. * whether any additional grace periods have been requested.
  1547. */
  1548. static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
  1549. {
  1550. unsigned long c = rnp->completed;
  1551. bool needmore;
  1552. struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
  1553. need_future_gp_element(rnp, c) = false;
  1554. needmore = need_any_future_gp(rnp);
  1555. trace_rcu_this_gp(rnp, rdp, c,
  1556. needmore ? TPS("CleanupMore") : TPS("Cleanup"));
  1557. return needmore;
  1558. }
  1559. /*
  1560. * Awaken the grace-period kthread for the specified flavor of RCU.
  1561. * Don't do a self-awaken, and don't bother awakening when there is
  1562. * nothing for the grace-period kthread to do (as in several CPUs
  1563. * raced to awaken, and we lost), and finally don't try to awaken
  1564. * a kthread that has not yet been created.
  1565. */
  1566. static void rcu_gp_kthread_wake(struct rcu_state *rsp)
  1567. {
  1568. if (current == rsp->gp_kthread ||
  1569. !READ_ONCE(rsp->gp_flags) ||
  1570. !rsp->gp_kthread)
  1571. return;
  1572. swake_up_one(&rsp->gp_wq);
  1573. }
  1574. /*
  1575. * If there is room, assign a ->completed number to any callbacks on
  1576. * this CPU that have not already been assigned. Also accelerate any
  1577. * callbacks that were previously assigned a ->completed number that has
  1578. * since proven to be too conservative, which can happen if callbacks get
  1579. * assigned a ->completed number while RCU is idle, but with reference to
  1580. * a non-root rcu_node structure. This function is idempotent, so it does
  1581. * not hurt to call it repeatedly. Returns an flag saying that we should
  1582. * awaken the RCU grace-period kthread.
  1583. *
  1584. * The caller must hold rnp->lock with interrupts disabled.
  1585. */
  1586. static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
  1587. struct rcu_data *rdp)
  1588. {
  1589. unsigned long c;
  1590. bool ret = false;
  1591. raw_lockdep_assert_held_rcu_node(rnp);
  1592. /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
  1593. if (!rcu_segcblist_pend_cbs(&rdp->cblist))
  1594. return false;
  1595. /*
  1596. * Callbacks are often registered with incomplete grace-period
  1597. * information. Something about the fact that getting exact
  1598. * information requires acquiring a global lock... RCU therefore
  1599. * makes a conservative estimate of the grace period number at which
  1600. * a given callback will become ready to invoke. The following
  1601. * code checks this estimate and improves it when possible, thus
  1602. * accelerating callback invocation to an earlier grace-period
  1603. * number.
  1604. */
  1605. c = rcu_cbs_completed(rsp, rnp);
  1606. if (rcu_segcblist_accelerate(&rdp->cblist, c))
  1607. ret = rcu_start_this_gp(rnp, rdp, c);
  1608. /* Trace depending on how much we were able to accelerate. */
  1609. if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
  1610. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccWaitCB"));
  1611. else
  1612. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("AccReadyCB"));
  1613. return ret;
  1614. }
  1615. /*
  1616. * Move any callbacks whose grace period has completed to the
  1617. * RCU_DONE_TAIL sublist, then compact the remaining sublists and
  1618. * assign ->completed numbers to any callbacks in the RCU_NEXT_TAIL
  1619. * sublist. This function is idempotent, so it does not hurt to
  1620. * invoke it repeatedly. As long as it is not invoked -too- often...
  1621. * Returns true if the RCU grace-period kthread needs to be awakened.
  1622. *
  1623. * The caller must hold rnp->lock with interrupts disabled.
  1624. */
  1625. static bool rcu_advance_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
  1626. struct rcu_data *rdp)
  1627. {
  1628. raw_lockdep_assert_held_rcu_node(rnp);
  1629. /* If no pending (not yet ready to invoke) callbacks, nothing to do. */
  1630. if (!rcu_segcblist_pend_cbs(&rdp->cblist))
  1631. return false;
  1632. /*
  1633. * Find all callbacks whose ->completed numbers indicate that they
  1634. * are ready to invoke, and put them into the RCU_DONE_TAIL sublist.
  1635. */
  1636. rcu_segcblist_advance(&rdp->cblist, rnp->completed);
  1637. /* Classify any remaining callbacks. */
  1638. return rcu_accelerate_cbs(rsp, rnp, rdp);
  1639. }
  1640. /*
  1641. * Update CPU-local rcu_data state to record the beginnings and ends of
  1642. * grace periods. The caller must hold the ->lock of the leaf rcu_node
  1643. * structure corresponding to the current CPU, and must have irqs disabled.
  1644. * Returns true if the grace-period kthread needs to be awakened.
  1645. */
  1646. static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
  1647. struct rcu_data *rdp)
  1648. {
  1649. bool ret;
  1650. bool need_gp;
  1651. raw_lockdep_assert_held_rcu_node(rnp);
  1652. /* Handle the ends of any preceding grace periods first. */
  1653. if (rdp->completed == rnp->completed &&
  1654. !unlikely(READ_ONCE(rdp->gpwrap))) {
  1655. /* No grace period end, so just accelerate recent callbacks. */
  1656. ret = rcu_accelerate_cbs(rsp, rnp, rdp);
  1657. } else {
  1658. /* Advance callbacks. */
  1659. ret = rcu_advance_cbs(rsp, rnp, rdp);
  1660. /* Remember that we saw this grace-period completion. */
  1661. rdp->completed = rnp->completed;
  1662. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
  1663. }
  1664. if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
  1665. /*
  1666. * If the current grace period is waiting for this CPU,
  1667. * set up to detect a quiescent state, otherwise don't
  1668. * go looking for one.
  1669. */
  1670. rdp->gpnum = rnp->gpnum;
  1671. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
  1672. need_gp = !!(rnp->qsmask & rdp->grpmask);
  1673. rdp->cpu_no_qs.b.norm = need_gp;
  1674. rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
  1675. rdp->core_needs_qs = need_gp;
  1676. zero_cpu_stall_ticks(rdp);
  1677. WRITE_ONCE(rdp->gpwrap, false);
  1678. rcu_gpnum_ovf(rnp, rdp);
  1679. }
  1680. return ret;
  1681. }
  1682. static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
  1683. {
  1684. unsigned long flags;
  1685. bool needwake;
  1686. struct rcu_node *rnp;
  1687. local_irq_save(flags);
  1688. rnp = rdp->mynode;
  1689. if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
  1690. rdp->completed == READ_ONCE(rnp->completed) &&
  1691. !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
  1692. !raw_spin_trylock_rcu_node(rnp)) { /* irqs already off, so later. */
  1693. local_irq_restore(flags);
  1694. return;
  1695. }
  1696. needwake = __note_gp_changes(rsp, rnp, rdp);
  1697. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  1698. if (needwake)
  1699. rcu_gp_kthread_wake(rsp);
  1700. }
  1701. static void rcu_gp_slow(struct rcu_state *rsp, int delay)
  1702. {
  1703. if (delay > 0 &&
  1704. !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
  1705. schedule_timeout_uninterruptible(delay);
  1706. }
  1707. /*
  1708. * Initialize a new grace period. Return false if no grace period required.
  1709. */
  1710. static bool rcu_gp_init(struct rcu_state *rsp)
  1711. {
  1712. unsigned long oldmask;
  1713. struct rcu_data *rdp;
  1714. struct rcu_node *rnp = rcu_get_root(rsp);
  1715. WRITE_ONCE(rsp->gp_activity, jiffies);
  1716. raw_spin_lock_irq_rcu_node(rnp);
  1717. if (!READ_ONCE(rsp->gp_flags)) {
  1718. /* Spurious wakeup, tell caller to go back to sleep. */
  1719. raw_spin_unlock_irq_rcu_node(rnp);
  1720. return false;
  1721. }
  1722. WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
  1723. if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
  1724. /*
  1725. * Grace period already in progress, don't start another.
  1726. * Not supposed to be able to happen.
  1727. */
  1728. raw_spin_unlock_irq_rcu_node(rnp);
  1729. return false;
  1730. }
  1731. /* Advance to a new grace period and initialize state. */
  1732. record_gp_stall_check_time(rsp);
  1733. /* Record GP times before starting GP, hence smp_store_release(). */
  1734. smp_store_release(&rsp->gpnum, rsp->gpnum + 1);
  1735. trace_rcu_grace_period(rsp->name, rsp->gpnum, TPS("start"));
  1736. raw_spin_unlock_irq_rcu_node(rnp);
  1737. /*
  1738. * Apply per-leaf buffered online and offline operations to the
  1739. * rcu_node tree. Note that this new grace period need not wait
  1740. * for subsequent online CPUs, and that quiescent-state forcing
  1741. * will handle subsequent offline CPUs.
  1742. */
  1743. rcu_for_each_leaf_node(rsp, rnp) {
  1744. rcu_gp_slow(rsp, gp_preinit_delay);
  1745. raw_spin_lock_irq_rcu_node(rnp);
  1746. if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
  1747. !rnp->wait_blkd_tasks) {
  1748. /* Nothing to do on this leaf rcu_node structure. */
  1749. raw_spin_unlock_irq_rcu_node(rnp);
  1750. continue;
  1751. }
  1752. /* Record old state, apply changes to ->qsmaskinit field. */
  1753. oldmask = rnp->qsmaskinit;
  1754. rnp->qsmaskinit = rnp->qsmaskinitnext;
  1755. /* If zero-ness of ->qsmaskinit changed, propagate up tree. */
  1756. if (!oldmask != !rnp->qsmaskinit) {
  1757. if (!oldmask) /* First online CPU for this rcu_node. */
  1758. rcu_init_new_rnp(rnp);
  1759. else if (rcu_preempt_has_tasks(rnp)) /* blocked tasks */
  1760. rnp->wait_blkd_tasks = true;
  1761. else /* Last offline CPU and can propagate. */
  1762. rcu_cleanup_dead_rnp(rnp);
  1763. }
  1764. /*
  1765. * If all waited-on tasks from prior grace period are
  1766. * done, and if all this rcu_node structure's CPUs are
  1767. * still offline, propagate up the rcu_node tree and
  1768. * clear ->wait_blkd_tasks. Otherwise, if one of this
  1769. * rcu_node structure's CPUs has since come back online,
  1770. * simply clear ->wait_blkd_tasks (but rcu_cleanup_dead_rnp()
  1771. * checks for this, so just call it unconditionally).
  1772. */
  1773. if (rnp->wait_blkd_tasks &&
  1774. (!rcu_preempt_has_tasks(rnp) ||
  1775. rnp->qsmaskinit)) {
  1776. rnp->wait_blkd_tasks = false;
  1777. rcu_cleanup_dead_rnp(rnp);
  1778. }
  1779. raw_spin_unlock_irq_rcu_node(rnp);
  1780. }
  1781. /*
  1782. * Set the quiescent-state-needed bits in all the rcu_node
  1783. * structures for all currently online CPUs in breadth-first order,
  1784. * starting from the root rcu_node structure, relying on the layout
  1785. * of the tree within the rsp->node[] array. Note that other CPUs
  1786. * will access only the leaves of the hierarchy, thus seeing that no
  1787. * grace period is in progress, at least until the corresponding
  1788. * leaf node has been initialized.
  1789. *
  1790. * The grace period cannot complete until the initialization
  1791. * process finishes, because this kthread handles both.
  1792. */
  1793. rcu_for_each_node_breadth_first(rsp, rnp) {
  1794. rcu_gp_slow(rsp, gp_init_delay);
  1795. raw_spin_lock_irq_rcu_node(rnp);
  1796. rdp = this_cpu_ptr(rsp->rda);
  1797. rcu_preempt_check_blocked_tasks(rnp);
  1798. rnp->qsmask = rnp->qsmaskinit;
  1799. WRITE_ONCE(rnp->gpnum, rsp->gpnum);
  1800. if (WARN_ON_ONCE(rnp->completed != rsp->completed))
  1801. WRITE_ONCE(rnp->completed, rsp->completed);
  1802. if (rnp == rdp->mynode)
  1803. (void)__note_gp_changes(rsp, rnp, rdp);
  1804. rcu_preempt_boost_start_gp(rnp);
  1805. trace_rcu_grace_period_init(rsp->name, rnp->gpnum,
  1806. rnp->level, rnp->grplo,
  1807. rnp->grphi, rnp->qsmask);
  1808. raw_spin_unlock_irq_rcu_node(rnp);
  1809. cond_resched_tasks_rcu_qs();
  1810. WRITE_ONCE(rsp->gp_activity, jiffies);
  1811. }
  1812. return true;
  1813. }
  1814. /*
  1815. * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state
  1816. * time.
  1817. */
  1818. static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp)
  1819. {
  1820. struct rcu_node *rnp = rcu_get_root(rsp);
  1821. /* Someone like call_rcu() requested a force-quiescent-state scan. */
  1822. *gfp = READ_ONCE(rsp->gp_flags);
  1823. if (*gfp & RCU_GP_FLAG_FQS)
  1824. return true;
  1825. /* The current grace period has completed. */
  1826. if (!READ_ONCE(rnp->qsmask) && !rcu_preempt_blocked_readers_cgp(rnp))
  1827. return true;
  1828. return false;
  1829. }
  1830. /*
  1831. * Do one round of quiescent-state forcing.
  1832. */
  1833. static void rcu_gp_fqs(struct rcu_state *rsp, bool first_time)
  1834. {
  1835. struct rcu_node *rnp = rcu_get_root(rsp);
  1836. WRITE_ONCE(rsp->gp_activity, jiffies);
  1837. rsp->n_force_qs++;
  1838. if (first_time) {
  1839. /* Collect dyntick-idle snapshots. */
  1840. force_qs_rnp(rsp, dyntick_save_progress_counter);
  1841. } else {
  1842. /* Handle dyntick-idle and offline CPUs. */
  1843. force_qs_rnp(rsp, rcu_implicit_dynticks_qs);
  1844. }
  1845. /* Clear flag to prevent immediate re-entry. */
  1846. if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
  1847. raw_spin_lock_irq_rcu_node(rnp);
  1848. WRITE_ONCE(rsp->gp_flags,
  1849. READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
  1850. raw_spin_unlock_irq_rcu_node(rnp);
  1851. }
  1852. }
  1853. /*
  1854. * Clean up after the old grace period.
  1855. */
  1856. static void rcu_gp_cleanup(struct rcu_state *rsp)
  1857. {
  1858. unsigned long gp_duration;
  1859. bool needgp = false;
  1860. struct rcu_data *rdp;
  1861. struct rcu_node *rnp = rcu_get_root(rsp);
  1862. struct swait_queue_head *sq;
  1863. WRITE_ONCE(rsp->gp_activity, jiffies);
  1864. raw_spin_lock_irq_rcu_node(rnp);
  1865. gp_duration = jiffies - rsp->gp_start;
  1866. if (gp_duration > rsp->gp_max)
  1867. rsp->gp_max = gp_duration;
  1868. /*
  1869. * We know the grace period is complete, but to everyone else
  1870. * it appears to still be ongoing. But it is also the case
  1871. * that to everyone else it looks like there is nothing that
  1872. * they can do to advance the grace period. It is therefore
  1873. * safe for us to drop the lock in order to mark the grace
  1874. * period as completed in all of the rcu_node structures.
  1875. */
  1876. raw_spin_unlock_irq_rcu_node(rnp);
  1877. /*
  1878. * Propagate new ->completed value to rcu_node structures so
  1879. * that other CPUs don't have to wait until the start of the next
  1880. * grace period to process their callbacks. This also avoids
  1881. * some nasty RCU grace-period initialization races by forcing
  1882. * the end of the current grace period to be completely recorded in
  1883. * all of the rcu_node structures before the beginning of the next
  1884. * grace period is recorded in any of the rcu_node structures.
  1885. */
  1886. rcu_for_each_node_breadth_first(rsp, rnp) {
  1887. raw_spin_lock_irq_rcu_node(rnp);
  1888. WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
  1889. WARN_ON_ONCE(rnp->qsmask);
  1890. WRITE_ONCE(rnp->completed, rsp->gpnum);
  1891. rdp = this_cpu_ptr(rsp->rda);
  1892. if (rnp == rdp->mynode)
  1893. needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
  1894. /* smp_mb() provided by prior unlock-lock pair. */
  1895. needgp = rcu_future_gp_cleanup(rsp, rnp) || needgp;
  1896. sq = rcu_nocb_gp_get(rnp);
  1897. raw_spin_unlock_irq_rcu_node(rnp);
  1898. rcu_nocb_gp_cleanup(sq);
  1899. cond_resched_tasks_rcu_qs();
  1900. WRITE_ONCE(rsp->gp_activity, jiffies);
  1901. rcu_gp_slow(rsp, gp_cleanup_delay);
  1902. }
  1903. rnp = rcu_get_root(rsp);
  1904. raw_spin_lock_irq_rcu_node(rnp); /* Order GP before ->completed update. */
  1905. /* Declare grace period done. */
  1906. WRITE_ONCE(rsp->completed, rsp->gpnum);
  1907. trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
  1908. rsp->gp_state = RCU_GP_IDLE;
  1909. /* Check for GP requests since above loop. */
  1910. rdp = this_cpu_ptr(rsp->rda);
  1911. if (need_any_future_gp(rnp)) {
  1912. trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
  1913. TPS("CleanupMore"));
  1914. needgp = true;
  1915. }
  1916. /* Advance CBs to reduce false positives below. */
  1917. if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
  1918. WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
  1919. trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
  1920. TPS("newreq"));
  1921. }
  1922. WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
  1923. raw_spin_unlock_irq_rcu_node(rnp);
  1924. }
  1925. /*
  1926. * Body of kthread that handles grace periods.
  1927. */
  1928. static int __noreturn rcu_gp_kthread(void *arg)
  1929. {
  1930. bool first_gp_fqs;
  1931. int gf;
  1932. unsigned long j;
  1933. int ret;
  1934. struct rcu_state *rsp = arg;
  1935. struct rcu_node *rnp = rcu_get_root(rsp);
  1936. rcu_bind_gp_kthread();
  1937. for (;;) {
  1938. /* Handle grace-period start. */
  1939. for (;;) {
  1940. trace_rcu_grace_period(rsp->name,
  1941. READ_ONCE(rsp->gpnum),
  1942. TPS("reqwait"));
  1943. rsp->gp_state = RCU_GP_WAIT_GPS;
  1944. swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) &
  1945. RCU_GP_FLAG_INIT);
  1946. rsp->gp_state = RCU_GP_DONE_GPS;
  1947. /* Locking provides needed memory barrier. */
  1948. if (rcu_gp_init(rsp))
  1949. break;
  1950. cond_resched_tasks_rcu_qs();
  1951. WRITE_ONCE(rsp->gp_activity, jiffies);
  1952. WARN_ON(signal_pending(current));
  1953. trace_rcu_grace_period(rsp->name,
  1954. READ_ONCE(rsp->gpnum),
  1955. TPS("reqwaitsig"));
  1956. }
  1957. /* Handle quiescent-state forcing. */
  1958. first_gp_fqs = true;
  1959. j = jiffies_till_first_fqs;
  1960. if (j > HZ) {
  1961. j = HZ;
  1962. jiffies_till_first_fqs = HZ;
  1963. }
  1964. ret = 0;
  1965. for (;;) {
  1966. if (!ret) {
  1967. rsp->jiffies_force_qs = jiffies + j;
  1968. WRITE_ONCE(rsp->jiffies_kick_kthreads,
  1969. jiffies + 3 * j);
  1970. }
  1971. trace_rcu_grace_period(rsp->name,
  1972. READ_ONCE(rsp->gpnum),
  1973. TPS("fqswait"));
  1974. rsp->gp_state = RCU_GP_WAIT_FQS;
  1975. ret = swait_event_idle_timeout_exclusive(rsp->gp_wq,
  1976. rcu_gp_fqs_check_wake(rsp, &gf), j);
  1977. rsp->gp_state = RCU_GP_DOING_FQS;
  1978. /* Locking provides needed memory barriers. */
  1979. /* If grace period done, leave loop. */
  1980. if (!READ_ONCE(rnp->qsmask) &&
  1981. !rcu_preempt_blocked_readers_cgp(rnp))
  1982. break;
  1983. /* If time for quiescent-state forcing, do it. */
  1984. if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
  1985. (gf & RCU_GP_FLAG_FQS)) {
  1986. trace_rcu_grace_period(rsp->name,
  1987. READ_ONCE(rsp->gpnum),
  1988. TPS("fqsstart"));
  1989. rcu_gp_fqs(rsp, first_gp_fqs);
  1990. first_gp_fqs = false;
  1991. trace_rcu_grace_period(rsp->name,
  1992. READ_ONCE(rsp->gpnum),
  1993. TPS("fqsend"));
  1994. cond_resched_tasks_rcu_qs();
  1995. WRITE_ONCE(rsp->gp_activity, jiffies);
  1996. ret = 0; /* Force full wait till next FQS. */
  1997. j = jiffies_till_next_fqs;
  1998. if (j > HZ) {
  1999. j = HZ;
  2000. jiffies_till_next_fqs = HZ;
  2001. } else if (j < 1) {
  2002. j = 1;
  2003. jiffies_till_next_fqs = 1;
  2004. }
  2005. } else {
  2006. /* Deal with stray signal. */
  2007. cond_resched_tasks_rcu_qs();
  2008. WRITE_ONCE(rsp->gp_activity, jiffies);
  2009. WARN_ON(signal_pending(current));
  2010. trace_rcu_grace_period(rsp->name,
  2011. READ_ONCE(rsp->gpnum),
  2012. TPS("fqswaitsig"));
  2013. ret = 1; /* Keep old FQS timing. */
  2014. j = jiffies;
  2015. if (time_after(jiffies, rsp->jiffies_force_qs))
  2016. j = 1;
  2017. else
  2018. j = rsp->jiffies_force_qs - j;
  2019. }
  2020. }
  2021. /* Handle grace-period end. */
  2022. rsp->gp_state = RCU_GP_CLEANUP;
  2023. rcu_gp_cleanup(rsp);
  2024. rsp->gp_state = RCU_GP_CLEANED;
  2025. }
  2026. }
  2027. /*
  2028. * Report a full set of quiescent states to the specified rcu_state data
  2029. * structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period
  2030. * kthread if another grace period is required. Whether we wake
  2031. * the grace-period kthread or it awakens itself for the next round
  2032. * of quiescent-state forcing, that kthread will clean up after the
  2033. * just-completed grace period. Note that the caller must hold rnp->lock,
  2034. * which is released before return.
  2035. */
  2036. static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
  2037. __releases(rcu_get_root(rsp)->lock)
  2038. {
  2039. raw_lockdep_assert_held_rcu_node(rcu_get_root(rsp));
  2040. WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
  2041. WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
  2042. raw_spin_unlock_irqrestore_rcu_node(rcu_get_root(rsp), flags);
  2043. rcu_gp_kthread_wake(rsp);
  2044. }
  2045. /*
  2046. * Similar to rcu_report_qs_rdp(), for which it is a helper function.
  2047. * Allows quiescent states for a group of CPUs to be reported at one go
  2048. * to the specified rcu_node structure, though all the CPUs in the group
  2049. * must be represented by the same rcu_node structure (which need not be a
  2050. * leaf rcu_node structure, though it often will be). The gps parameter
  2051. * is the grace-period snapshot, which means that the quiescent states
  2052. * are valid only if rnp->gpnum is equal to gps. That structure's lock
  2053. * must be held upon entry, and it is released before return.
  2054. */
  2055. static void
  2056. rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
  2057. struct rcu_node *rnp, unsigned long gps, unsigned long flags)
  2058. __releases(rnp->lock)
  2059. {
  2060. unsigned long oldmask = 0;
  2061. struct rcu_node *rnp_c;
  2062. raw_lockdep_assert_held_rcu_node(rnp);
  2063. /* Walk up the rcu_node hierarchy. */
  2064. for (;;) {
  2065. if (!(rnp->qsmask & mask) || rnp->gpnum != gps) {
  2066. /*
  2067. * Our bit has already been cleared, or the
  2068. * relevant grace period is already over, so done.
  2069. */
  2070. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2071. return;
  2072. }
  2073. WARN_ON_ONCE(oldmask); /* Any child must be all zeroed! */
  2074. WARN_ON_ONCE(!rcu_is_leaf_node(rnp) &&
  2075. rcu_preempt_blocked_readers_cgp(rnp));
  2076. rnp->qsmask &= ~mask;
  2077. trace_rcu_quiescent_state_report(rsp->name, rnp->gpnum,
  2078. mask, rnp->qsmask, rnp->level,
  2079. rnp->grplo, rnp->grphi,
  2080. !!rnp->gp_tasks);
  2081. if (rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  2082. /* Other bits still set at this level, so done. */
  2083. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2084. return;
  2085. }
  2086. mask = rnp->grpmask;
  2087. if (rnp->parent == NULL) {
  2088. /* No more levels. Exit loop holding root lock. */
  2089. break;
  2090. }
  2091. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2092. rnp_c = rnp;
  2093. rnp = rnp->parent;
  2094. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  2095. oldmask = rnp_c->qsmask;
  2096. }
  2097. /*
  2098. * Get here if we are the last CPU to pass through a quiescent
  2099. * state for this grace period. Invoke rcu_report_qs_rsp()
  2100. * to clean up and start the next grace period if one is needed.
  2101. */
  2102. rcu_report_qs_rsp(rsp, flags); /* releases rnp->lock. */
  2103. }
  2104. /*
  2105. * Record a quiescent state for all tasks that were previously queued
  2106. * on the specified rcu_node structure and that were blocking the current
  2107. * RCU grace period. The caller must hold the specified rnp->lock with
  2108. * irqs disabled, and this lock is released upon return, but irqs remain
  2109. * disabled.
  2110. */
  2111. static void rcu_report_unblock_qs_rnp(struct rcu_state *rsp,
  2112. struct rcu_node *rnp, unsigned long flags)
  2113. __releases(rnp->lock)
  2114. {
  2115. unsigned long gps;
  2116. unsigned long mask;
  2117. struct rcu_node *rnp_p;
  2118. raw_lockdep_assert_held_rcu_node(rnp);
  2119. if (rcu_state_p == &rcu_sched_state || rsp != rcu_state_p ||
  2120. rnp->qsmask != 0 || rcu_preempt_blocked_readers_cgp(rnp)) {
  2121. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2122. return; /* Still need more quiescent states! */
  2123. }
  2124. rnp_p = rnp->parent;
  2125. if (rnp_p == NULL) {
  2126. /*
  2127. * Only one rcu_node structure in the tree, so don't
  2128. * try to report up to its nonexistent parent!
  2129. */
  2130. rcu_report_qs_rsp(rsp, flags);
  2131. return;
  2132. }
  2133. /* Report up the rest of the hierarchy, tracking current ->gpnum. */
  2134. gps = rnp->gpnum;
  2135. mask = rnp->grpmask;
  2136. raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
  2137. raw_spin_lock_rcu_node(rnp_p); /* irqs already disabled. */
  2138. rcu_report_qs_rnp(mask, rsp, rnp_p, gps, flags);
  2139. }
  2140. /*
  2141. * Record a quiescent state for the specified CPU to that CPU's rcu_data
  2142. * structure. This must be called from the specified CPU.
  2143. */
  2144. static void
  2145. rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
  2146. {
  2147. unsigned long flags;
  2148. unsigned long mask;
  2149. bool needwake;
  2150. struct rcu_node *rnp;
  2151. rnp = rdp->mynode;
  2152. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  2153. if (rdp->cpu_no_qs.b.norm || rdp->gpnum != rnp->gpnum ||
  2154. rnp->completed == rnp->gpnum || rdp->gpwrap) {
  2155. /*
  2156. * The grace period in which this quiescent state was
  2157. * recorded has ended, so don't report it upwards.
  2158. * We will instead need a new quiescent state that lies
  2159. * within the current grace period.
  2160. */
  2161. rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
  2162. rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
  2163. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2164. return;
  2165. }
  2166. mask = rdp->grpmask;
  2167. if ((rnp->qsmask & mask) == 0) {
  2168. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2169. } else {
  2170. rdp->core_needs_qs = false;
  2171. /*
  2172. * This GP can't end until cpu checks in, so all of our
  2173. * callbacks can be processed during the next GP.
  2174. */
  2175. needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
  2176. rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
  2177. /* ^^^ Released rnp->lock */
  2178. if (needwake)
  2179. rcu_gp_kthread_wake(rsp);
  2180. }
  2181. }
  2182. /*
  2183. * Check to see if there is a new grace period of which this CPU
  2184. * is not yet aware, and if so, set up local rcu_data state for it.
  2185. * Otherwise, see if this CPU has just passed through its first
  2186. * quiescent state for this grace period, and record that fact if so.
  2187. */
  2188. static void
  2189. rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
  2190. {
  2191. /* Check for grace-period ends and beginnings. */
  2192. note_gp_changes(rsp, rdp);
  2193. /*
  2194. * Does this CPU still need to do its part for current grace period?
  2195. * If no, return and let the other CPUs do their part as well.
  2196. */
  2197. if (!rdp->core_needs_qs)
  2198. return;
  2199. /*
  2200. * Was there a quiescent state since the beginning of the grace
  2201. * period? If no, then exit and wait for the next call.
  2202. */
  2203. if (rdp->cpu_no_qs.b.norm)
  2204. return;
  2205. /*
  2206. * Tell RCU we are done (but rcu_report_qs_rdp() will be the
  2207. * judge of that).
  2208. */
  2209. rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
  2210. }
  2211. /*
  2212. * Trace the fact that this CPU is going offline.
  2213. */
  2214. static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
  2215. {
  2216. RCU_TRACE(unsigned long mask;)
  2217. RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda);)
  2218. RCU_TRACE(struct rcu_node *rnp = rdp->mynode;)
  2219. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  2220. return;
  2221. RCU_TRACE(mask = rdp->grpmask;)
  2222. trace_rcu_grace_period(rsp->name,
  2223. rnp->gpnum + 1 - !!(rnp->qsmask & mask),
  2224. TPS("cpuofl"));
  2225. }
  2226. /*
  2227. * All CPUs for the specified rcu_node structure have gone offline,
  2228. * and all tasks that were preempted within an RCU read-side critical
  2229. * section while running on one of those CPUs have since exited their RCU
  2230. * read-side critical section. Some other CPU is reporting this fact with
  2231. * the specified rcu_node structure's ->lock held and interrupts disabled.
  2232. * This function therefore goes up the tree of rcu_node structures,
  2233. * clearing the corresponding bits in the ->qsmaskinit fields. Note that
  2234. * the leaf rcu_node structure's ->qsmaskinit field has already been
  2235. * updated
  2236. *
  2237. * This function does check that the specified rcu_node structure has
  2238. * all CPUs offline and no blocked tasks, so it is OK to invoke it
  2239. * prematurely. That said, invoking it after the fact will cost you
  2240. * a needless lock acquisition. So once it has done its work, don't
  2241. * invoke it again.
  2242. */
  2243. static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
  2244. {
  2245. long mask;
  2246. struct rcu_node *rnp = rnp_leaf;
  2247. raw_lockdep_assert_held_rcu_node(rnp);
  2248. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
  2249. rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
  2250. return;
  2251. for (;;) {
  2252. mask = rnp->grpmask;
  2253. rnp = rnp->parent;
  2254. if (!rnp)
  2255. break;
  2256. raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
  2257. rnp->qsmaskinit &= ~mask;
  2258. rnp->qsmask &= ~mask;
  2259. if (rnp->qsmaskinit) {
  2260. raw_spin_unlock_rcu_node(rnp);
  2261. /* irqs remain disabled. */
  2262. return;
  2263. }
  2264. raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
  2265. }
  2266. }
  2267. /*
  2268. * The CPU has been completely removed, and some other CPU is reporting
  2269. * this fact from process context. Do the remainder of the cleanup.
  2270. * There can only be one CPU hotplug operation at a time, so no need for
  2271. * explicit locking.
  2272. */
  2273. static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
  2274. {
  2275. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  2276. struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
  2277. if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
  2278. return;
  2279. /* Adjust any no-longer-needed kthreads. */
  2280. rcu_boost_kthread_setaffinity(rnp, -1);
  2281. }
  2282. /*
  2283. * Invoke any RCU callbacks that have made it to the end of their grace
  2284. * period. Thottle as specified by rdp->blimit.
  2285. */
  2286. static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
  2287. {
  2288. unsigned long flags;
  2289. struct rcu_head *rhp;
  2290. struct rcu_cblist rcl = RCU_CBLIST_INITIALIZER(rcl);
  2291. long bl, count;
  2292. /* If no callbacks are ready, just return. */
  2293. if (!rcu_segcblist_ready_cbs(&rdp->cblist)) {
  2294. trace_rcu_batch_start(rsp->name,
  2295. rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  2296. rcu_segcblist_n_cbs(&rdp->cblist), 0);
  2297. trace_rcu_batch_end(rsp->name, 0,
  2298. !rcu_segcblist_empty(&rdp->cblist),
  2299. need_resched(), is_idle_task(current),
  2300. rcu_is_callbacks_kthread());
  2301. return;
  2302. }
  2303. /*
  2304. * Extract the list of ready callbacks, disabling to prevent
  2305. * races with call_rcu() from interrupt handlers. Leave the
  2306. * callback counts, as rcu_barrier() needs to be conservative.
  2307. */
  2308. local_irq_save(flags);
  2309. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  2310. bl = rdp->blimit;
  2311. trace_rcu_batch_start(rsp->name, rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  2312. rcu_segcblist_n_cbs(&rdp->cblist), bl);
  2313. rcu_segcblist_extract_done_cbs(&rdp->cblist, &rcl);
  2314. local_irq_restore(flags);
  2315. /* Invoke callbacks. */
  2316. rhp = rcu_cblist_dequeue(&rcl);
  2317. for (; rhp; rhp = rcu_cblist_dequeue(&rcl)) {
  2318. debug_rcu_head_unqueue(rhp);
  2319. if (__rcu_reclaim(rsp->name, rhp))
  2320. rcu_cblist_dequeued_lazy(&rcl);
  2321. /*
  2322. * Stop only if limit reached and CPU has something to do.
  2323. * Note: The rcl structure counts down from zero.
  2324. */
  2325. if (-rcl.len >= bl &&
  2326. (need_resched() ||
  2327. (!is_idle_task(current) && !rcu_is_callbacks_kthread())))
  2328. break;
  2329. }
  2330. local_irq_save(flags);
  2331. count = -rcl.len;
  2332. trace_rcu_batch_end(rsp->name, count, !!rcl.head, need_resched(),
  2333. is_idle_task(current), rcu_is_callbacks_kthread());
  2334. /* Update counts and requeue any remaining callbacks. */
  2335. rcu_segcblist_insert_done_cbs(&rdp->cblist, &rcl);
  2336. smp_mb(); /* List handling before counting for rcu_barrier(). */
  2337. rcu_segcblist_insert_count(&rdp->cblist, &rcl);
  2338. /* Reinstate batch limit if we have worked down the excess. */
  2339. count = rcu_segcblist_n_cbs(&rdp->cblist);
  2340. if (rdp->blimit == LONG_MAX && count <= qlowmark)
  2341. rdp->blimit = blimit;
  2342. /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
  2343. if (count == 0 && rdp->qlen_last_fqs_check != 0) {
  2344. rdp->qlen_last_fqs_check = 0;
  2345. rdp->n_force_qs_snap = rsp->n_force_qs;
  2346. } else if (count < rdp->qlen_last_fqs_check - qhimark)
  2347. rdp->qlen_last_fqs_check = count;
  2348. /*
  2349. * The following usually indicates a double call_rcu(). To track
  2350. * this down, try building with CONFIG_DEBUG_OBJECTS_RCU_HEAD=y.
  2351. */
  2352. WARN_ON_ONCE(rcu_segcblist_empty(&rdp->cblist) != (count == 0));
  2353. local_irq_restore(flags);
  2354. /* Re-invoke RCU core processing if there are callbacks remaining. */
  2355. if (rcu_segcblist_ready_cbs(&rdp->cblist))
  2356. invoke_rcu_core();
  2357. }
  2358. /*
  2359. * Check to see if this CPU is in a non-context-switch quiescent state
  2360. * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
  2361. * Also schedule RCU core processing.
  2362. *
  2363. * This function must be called from hardirq context. It is normally
  2364. * invoked from the scheduling-clock interrupt.
  2365. */
  2366. void rcu_check_callbacks(int user)
  2367. {
  2368. trace_rcu_utilization(TPS("Start scheduler-tick"));
  2369. increment_cpu_stall_ticks();
  2370. if (user || rcu_is_cpu_rrupt_from_idle()) {
  2371. /*
  2372. * Get here if this CPU took its interrupt from user
  2373. * mode or from the idle loop, and if this is not a
  2374. * nested interrupt. In this case, the CPU is in
  2375. * a quiescent state, so note it.
  2376. *
  2377. * No memory barrier is required here because both
  2378. * rcu_sched_qs() and rcu_bh_qs() reference only CPU-local
  2379. * variables that other CPUs neither access nor modify,
  2380. * at least not while the corresponding CPU is online.
  2381. */
  2382. rcu_sched_qs();
  2383. rcu_bh_qs();
  2384. } else if (!in_softirq()) {
  2385. /*
  2386. * Get here if this CPU did not take its interrupt from
  2387. * softirq, in other words, if it is not interrupting
  2388. * a rcu_bh read-side critical section. This is an _bh
  2389. * critical section, so note it.
  2390. */
  2391. rcu_bh_qs();
  2392. }
  2393. rcu_preempt_check_callbacks();
  2394. if (rcu_pending())
  2395. invoke_rcu_core();
  2396. if (user)
  2397. rcu_note_voluntary_context_switch(current);
  2398. trace_rcu_utilization(TPS("End scheduler-tick"));
  2399. }
  2400. /*
  2401. * Scan the leaf rcu_node structures, processing dyntick state for any that
  2402. * have not yet encountered a quiescent state, using the function specified.
  2403. * Also initiate boosting for any threads blocked on the root rcu_node.
  2404. *
  2405. * The caller must have suppressed start of new grace periods.
  2406. */
  2407. static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
  2408. {
  2409. int cpu;
  2410. unsigned long flags;
  2411. unsigned long mask;
  2412. struct rcu_node *rnp;
  2413. rcu_for_each_leaf_node(rsp, rnp) {
  2414. cond_resched_tasks_rcu_qs();
  2415. mask = 0;
  2416. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  2417. if (rnp->qsmask == 0) {
  2418. if (rcu_state_p == &rcu_sched_state ||
  2419. rsp != rcu_state_p ||
  2420. rcu_preempt_blocked_readers_cgp(rnp)) {
  2421. /*
  2422. * No point in scanning bits because they
  2423. * are all zero. But we might need to
  2424. * priority-boost blocked readers.
  2425. */
  2426. rcu_initiate_boost(rnp, flags);
  2427. /* rcu_initiate_boost() releases rnp->lock */
  2428. continue;
  2429. }
  2430. if (rnp->parent &&
  2431. (rnp->parent->qsmask & rnp->grpmask)) {
  2432. /*
  2433. * Race between grace-period
  2434. * initialization and task exiting RCU
  2435. * read-side critical section: Report.
  2436. */
  2437. rcu_report_unblock_qs_rnp(rsp, rnp, flags);
  2438. /* rcu_report_unblock_qs_rnp() rlses ->lock */
  2439. continue;
  2440. }
  2441. }
  2442. for_each_leaf_node_possible_cpu(rnp, cpu) {
  2443. unsigned long bit = leaf_node_cpu_bit(rnp, cpu);
  2444. if ((rnp->qsmask & bit) != 0) {
  2445. if (f(per_cpu_ptr(rsp->rda, cpu)))
  2446. mask |= bit;
  2447. }
  2448. }
  2449. if (mask != 0) {
  2450. /* Idle/offline CPUs, report (releases rnp->lock. */
  2451. rcu_report_qs_rnp(mask, rsp, rnp, rnp->gpnum, flags);
  2452. } else {
  2453. /* Nothing to do here, so just drop the lock. */
  2454. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2455. }
  2456. }
  2457. }
  2458. /*
  2459. * Force quiescent states on reluctant CPUs, and also detect which
  2460. * CPUs are in dyntick-idle mode.
  2461. */
  2462. static void force_quiescent_state(struct rcu_state *rsp)
  2463. {
  2464. unsigned long flags;
  2465. bool ret;
  2466. struct rcu_node *rnp;
  2467. struct rcu_node *rnp_old = NULL;
  2468. /* Funnel through hierarchy to reduce memory contention. */
  2469. rnp = __this_cpu_read(rsp->rda->mynode);
  2470. for (; rnp != NULL; rnp = rnp->parent) {
  2471. ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
  2472. !raw_spin_trylock(&rnp->fqslock);
  2473. if (rnp_old != NULL)
  2474. raw_spin_unlock(&rnp_old->fqslock);
  2475. if (ret)
  2476. return;
  2477. rnp_old = rnp;
  2478. }
  2479. /* rnp_old == rcu_get_root(rsp), rnp == NULL. */
  2480. /* Reached the root of the rcu_node tree, acquire lock. */
  2481. raw_spin_lock_irqsave_rcu_node(rnp_old, flags);
  2482. raw_spin_unlock(&rnp_old->fqslock);
  2483. if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
  2484. raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
  2485. return; /* Someone beat us to it. */
  2486. }
  2487. WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
  2488. raw_spin_unlock_irqrestore_rcu_node(rnp_old, flags);
  2489. rcu_gp_kthread_wake(rsp);
  2490. }
  2491. /*
  2492. * This does the RCU core processing work for the specified rcu_state
  2493. * and rcu_data structures. This may be called only from the CPU to
  2494. * whom the rdp belongs.
  2495. */
  2496. static void
  2497. __rcu_process_callbacks(struct rcu_state *rsp)
  2498. {
  2499. unsigned long flags;
  2500. bool needwake;
  2501. struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
  2502. struct rcu_node *rnp;
  2503. WARN_ON_ONCE(!rdp->beenonline);
  2504. /* Update RCU state based on any recent quiescent states. */
  2505. rcu_check_quiescent_state(rsp, rdp);
  2506. /* No grace period and unregistered callbacks? */
  2507. if (!rcu_gp_in_progress(rsp) &&
  2508. rcu_segcblist_is_enabled(&rdp->cblist)) {
  2509. local_irq_save(flags);
  2510. if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
  2511. local_irq_restore(flags);
  2512. } else {
  2513. rnp = rdp->mynode;
  2514. raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
  2515. needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
  2516. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  2517. if (needwake)
  2518. rcu_gp_kthread_wake(rsp);
  2519. }
  2520. }
  2521. /* If there are callbacks ready, invoke them. */
  2522. if (rcu_segcblist_ready_cbs(&rdp->cblist))
  2523. invoke_rcu_callbacks(rsp, rdp);
  2524. /* Do any needed deferred wakeups of rcuo kthreads. */
  2525. do_nocb_deferred_wakeup(rdp);
  2526. }
  2527. /*
  2528. * Do RCU core processing for the current CPU.
  2529. */
  2530. static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
  2531. {
  2532. struct rcu_state *rsp;
  2533. if (cpu_is_offline(smp_processor_id()))
  2534. return;
  2535. trace_rcu_utilization(TPS("Start RCU core"));
  2536. for_each_rcu_flavor(rsp)
  2537. __rcu_process_callbacks(rsp);
  2538. trace_rcu_utilization(TPS("End RCU core"));
  2539. }
  2540. /*
  2541. * Schedule RCU callback invocation. If the specified type of RCU
  2542. * does not support RCU priority boosting, just do a direct call,
  2543. * otherwise wake up the per-CPU kernel kthread. Note that because we
  2544. * are running on the current CPU with softirqs disabled, the
  2545. * rcu_cpu_kthread_task cannot disappear out from under us.
  2546. */
  2547. static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
  2548. {
  2549. if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
  2550. return;
  2551. if (likely(!rsp->boost)) {
  2552. rcu_do_batch(rsp, rdp);
  2553. return;
  2554. }
  2555. invoke_rcu_callbacks_kthread();
  2556. }
  2557. static void invoke_rcu_core(void)
  2558. {
  2559. if (cpu_online(smp_processor_id()))
  2560. raise_softirq(RCU_SOFTIRQ);
  2561. }
  2562. /*
  2563. * Handle any core-RCU processing required by a call_rcu() invocation.
  2564. */
  2565. static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
  2566. struct rcu_head *head, unsigned long flags)
  2567. {
  2568. bool needwake;
  2569. /*
  2570. * If called from an extended quiescent state, invoke the RCU
  2571. * core in order to force a re-evaluation of RCU's idleness.
  2572. */
  2573. if (!rcu_is_watching())
  2574. invoke_rcu_core();
  2575. /* If interrupts were disabled or CPU offline, don't invoke RCU core. */
  2576. if (irqs_disabled_flags(flags) || cpu_is_offline(smp_processor_id()))
  2577. return;
  2578. /*
  2579. * Force the grace period if too many callbacks or too long waiting.
  2580. * Enforce hysteresis, and don't invoke force_quiescent_state()
  2581. * if some other CPU has recently done so. Also, don't bother
  2582. * invoking force_quiescent_state() if the newly enqueued callback
  2583. * is the only one waiting for a grace period to complete.
  2584. */
  2585. if (unlikely(rcu_segcblist_n_cbs(&rdp->cblist) >
  2586. rdp->qlen_last_fqs_check + qhimark)) {
  2587. /* Are we ignoring a completed grace period? */
  2588. note_gp_changes(rsp, rdp);
  2589. /* Start a new grace period if one not already started. */
  2590. if (!rcu_gp_in_progress(rsp)) {
  2591. struct rcu_node *rnp = rdp->mynode;
  2592. raw_spin_lock_rcu_node(rnp);
  2593. needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
  2594. raw_spin_unlock_rcu_node(rnp);
  2595. if (needwake)
  2596. rcu_gp_kthread_wake(rsp);
  2597. } else {
  2598. /* Give the grace period a kick. */
  2599. rdp->blimit = LONG_MAX;
  2600. if (rsp->n_force_qs == rdp->n_force_qs_snap &&
  2601. rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
  2602. force_quiescent_state(rsp);
  2603. rdp->n_force_qs_snap = rsp->n_force_qs;
  2604. rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
  2605. }
  2606. }
  2607. }
  2608. /*
  2609. * RCU callback function to leak a callback.
  2610. */
  2611. static void rcu_leak_callback(struct rcu_head *rhp)
  2612. {
  2613. }
  2614. /*
  2615. * Helper function for call_rcu() and friends. The cpu argument will
  2616. * normally be -1, indicating "currently running CPU". It may specify
  2617. * a CPU only if that CPU is a no-CBs CPU. Currently, only _rcu_barrier()
  2618. * is expected to specify a CPU.
  2619. */
  2620. static void
  2621. __call_rcu(struct rcu_head *head, rcu_callback_t func,
  2622. struct rcu_state *rsp, int cpu, bool lazy)
  2623. {
  2624. unsigned long flags;
  2625. struct rcu_data *rdp;
  2626. /* Misaligned rcu_head! */
  2627. WARN_ON_ONCE((unsigned long)head & (sizeof(void *) - 1));
  2628. if (debug_rcu_head_queue(head)) {
  2629. /*
  2630. * Probable double call_rcu(), so leak the callback.
  2631. * Use rcu:rcu_callback trace event to find the previous
  2632. * time callback was passed to __call_rcu().
  2633. */
  2634. WARN_ONCE(1, "__call_rcu(): Double-freed CB %p->%pF()!!!\n",
  2635. head, head->func);
  2636. WRITE_ONCE(head->func, rcu_leak_callback);
  2637. return;
  2638. }
  2639. head->func = func;
  2640. head->next = NULL;
  2641. local_irq_save(flags);
  2642. rdp = this_cpu_ptr(rsp->rda);
  2643. /* Add the callback to our list. */
  2644. if (unlikely(!rcu_segcblist_is_enabled(&rdp->cblist)) || cpu != -1) {
  2645. int offline;
  2646. if (cpu != -1)
  2647. rdp = per_cpu_ptr(rsp->rda, cpu);
  2648. if (likely(rdp->mynode)) {
  2649. /* Post-boot, so this should be for a no-CBs CPU. */
  2650. offline = !__call_rcu_nocb(rdp, head, lazy, flags);
  2651. WARN_ON_ONCE(offline);
  2652. /* Offline CPU, _call_rcu() illegal, leak callback. */
  2653. local_irq_restore(flags);
  2654. return;
  2655. }
  2656. /*
  2657. * Very early boot, before rcu_init(). Initialize if needed
  2658. * and then drop through to queue the callback.
  2659. */
  2660. BUG_ON(cpu != -1);
  2661. WARN_ON_ONCE(!rcu_is_watching());
  2662. if (rcu_segcblist_empty(&rdp->cblist))
  2663. rcu_segcblist_init(&rdp->cblist);
  2664. }
  2665. rcu_segcblist_enqueue(&rdp->cblist, head, lazy);
  2666. if (!lazy)
  2667. rcu_idle_count_callbacks_posted();
  2668. if (__is_kfree_rcu_offset((unsigned long)func))
  2669. trace_rcu_kfree_callback(rsp->name, head, (unsigned long)func,
  2670. rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  2671. rcu_segcblist_n_cbs(&rdp->cblist));
  2672. else
  2673. trace_rcu_callback(rsp->name, head,
  2674. rcu_segcblist_n_lazy_cbs(&rdp->cblist),
  2675. rcu_segcblist_n_cbs(&rdp->cblist));
  2676. /* Go handle any RCU core processing required. */
  2677. __call_rcu_core(rsp, rdp, head, flags);
  2678. local_irq_restore(flags);
  2679. }
  2680. /**
  2681. * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
  2682. * @head: structure to be used for queueing the RCU updates.
  2683. * @func: actual callback function to be invoked after the grace period
  2684. *
  2685. * The callback function will be invoked some time after a full grace
  2686. * period elapses, in other words after all currently executing RCU
  2687. * read-side critical sections have completed. call_rcu_sched() assumes
  2688. * that the read-side critical sections end on enabling of preemption
  2689. * or on voluntary preemption.
  2690. * RCU read-side critical sections are delimited by:
  2691. *
  2692. * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR
  2693. * - anything that disables preemption.
  2694. *
  2695. * These may be nested.
  2696. *
  2697. * See the description of call_rcu() for more detailed information on
  2698. * memory ordering guarantees.
  2699. */
  2700. void call_rcu_sched(struct rcu_head *head, rcu_callback_t func)
  2701. {
  2702. __call_rcu(head, func, &rcu_sched_state, -1, 0);
  2703. }
  2704. EXPORT_SYMBOL_GPL(call_rcu_sched);
  2705. /**
  2706. * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
  2707. * @head: structure to be used for queueing the RCU updates.
  2708. * @func: actual callback function to be invoked after the grace period
  2709. *
  2710. * The callback function will be invoked some time after a full grace
  2711. * period elapses, in other words after all currently executing RCU
  2712. * read-side critical sections have completed. call_rcu_bh() assumes
  2713. * that the read-side critical sections end on completion of a softirq
  2714. * handler. This means that read-side critical sections in process
  2715. * context must not be interrupted by softirqs. This interface is to be
  2716. * used when most of the read-side critical sections are in softirq context.
  2717. * RCU read-side critical sections are delimited by:
  2718. *
  2719. * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR
  2720. * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
  2721. *
  2722. * These may be nested.
  2723. *
  2724. * See the description of call_rcu() for more detailed information on
  2725. * memory ordering guarantees.
  2726. */
  2727. void call_rcu_bh(struct rcu_head *head, rcu_callback_t func)
  2728. {
  2729. __call_rcu(head, func, &rcu_bh_state, -1, 0);
  2730. }
  2731. EXPORT_SYMBOL_GPL(call_rcu_bh);
  2732. /*
  2733. * Queue an RCU callback for lazy invocation after a grace period.
  2734. * This will likely be later named something like "call_rcu_lazy()",
  2735. * but this change will require some way of tagging the lazy RCU
  2736. * callbacks in the list of pending callbacks. Until then, this
  2737. * function may only be called from __kfree_rcu().
  2738. */
  2739. void kfree_call_rcu(struct rcu_head *head,
  2740. rcu_callback_t func)
  2741. {
  2742. __call_rcu(head, func, rcu_state_p, -1, 1);
  2743. }
  2744. EXPORT_SYMBOL_GPL(kfree_call_rcu);
  2745. /*
  2746. * Because a context switch is a grace period for RCU-sched and RCU-bh,
  2747. * any blocking grace-period wait automatically implies a grace period
  2748. * if there is only one CPU online at any point time during execution
  2749. * of either synchronize_sched() or synchronize_rcu_bh(). It is OK to
  2750. * occasionally incorrectly indicate that there are multiple CPUs online
  2751. * when there was in fact only one the whole time, as this just adds
  2752. * some overhead: RCU still operates correctly.
  2753. */
  2754. static inline int rcu_blocking_is_gp(void)
  2755. {
  2756. int ret;
  2757. might_sleep(); /* Check for RCU read-side critical section. */
  2758. preempt_disable();
  2759. ret = num_online_cpus() <= 1;
  2760. preempt_enable();
  2761. return ret;
  2762. }
  2763. /**
  2764. * synchronize_sched - wait until an rcu-sched grace period has elapsed.
  2765. *
  2766. * Control will return to the caller some time after a full rcu-sched
  2767. * grace period has elapsed, in other words after all currently executing
  2768. * rcu-sched read-side critical sections have completed. These read-side
  2769. * critical sections are delimited by rcu_read_lock_sched() and
  2770. * rcu_read_unlock_sched(), and may be nested. Note that preempt_disable(),
  2771. * local_irq_disable(), and so on may be used in place of
  2772. * rcu_read_lock_sched().
  2773. *
  2774. * This means that all preempt_disable code sequences, including NMI and
  2775. * non-threaded hardware-interrupt handlers, in progress on entry will
  2776. * have completed before this primitive returns. However, this does not
  2777. * guarantee that softirq handlers will have completed, since in some
  2778. * kernels, these handlers can run in process context, and can block.
  2779. *
  2780. * Note that this guarantee implies further memory-ordering guarantees.
  2781. * On systems with more than one CPU, when synchronize_sched() returns,
  2782. * each CPU is guaranteed to have executed a full memory barrier since the
  2783. * end of its last RCU-sched read-side critical section whose beginning
  2784. * preceded the call to synchronize_sched(). In addition, each CPU having
  2785. * an RCU read-side critical section that extends beyond the return from
  2786. * synchronize_sched() is guaranteed to have executed a full memory barrier
  2787. * after the beginning of synchronize_sched() and before the beginning of
  2788. * that RCU read-side critical section. Note that these guarantees include
  2789. * CPUs that are offline, idle, or executing in user mode, as well as CPUs
  2790. * that are executing in the kernel.
  2791. *
  2792. * Furthermore, if CPU A invoked synchronize_sched(), which returned
  2793. * to its caller on CPU B, then both CPU A and CPU B are guaranteed
  2794. * to have executed a full memory barrier during the execution of
  2795. * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
  2796. * again only if the system has more than one CPU).
  2797. */
  2798. void synchronize_sched(void)
  2799. {
  2800. RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
  2801. lock_is_held(&rcu_lock_map) ||
  2802. lock_is_held(&rcu_sched_lock_map),
  2803. "Illegal synchronize_sched() in RCU-sched read-side critical section");
  2804. if (rcu_blocking_is_gp())
  2805. return;
  2806. if (rcu_gp_is_expedited())
  2807. synchronize_sched_expedited();
  2808. else
  2809. wait_rcu_gp(call_rcu_sched);
  2810. }
  2811. EXPORT_SYMBOL_GPL(synchronize_sched);
  2812. /**
  2813. * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  2814. *
  2815. * Control will return to the caller some time after a full rcu_bh grace
  2816. * period has elapsed, in other words after all currently executing rcu_bh
  2817. * read-side critical sections have completed. RCU read-side critical
  2818. * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
  2819. * and may be nested.
  2820. *
  2821. * See the description of synchronize_sched() for more detailed information
  2822. * on memory ordering guarantees.
  2823. */
  2824. void synchronize_rcu_bh(void)
  2825. {
  2826. RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
  2827. lock_is_held(&rcu_lock_map) ||
  2828. lock_is_held(&rcu_sched_lock_map),
  2829. "Illegal synchronize_rcu_bh() in RCU-bh read-side critical section");
  2830. if (rcu_blocking_is_gp())
  2831. return;
  2832. if (rcu_gp_is_expedited())
  2833. synchronize_rcu_bh_expedited();
  2834. else
  2835. wait_rcu_gp(call_rcu_bh);
  2836. }
  2837. EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
  2838. /**
  2839. * get_state_synchronize_rcu - Snapshot current RCU state
  2840. *
  2841. * Returns a cookie that is used by a later call to cond_synchronize_rcu()
  2842. * to determine whether or not a full grace period has elapsed in the
  2843. * meantime.
  2844. */
  2845. unsigned long get_state_synchronize_rcu(void)
  2846. {
  2847. /*
  2848. * Any prior manipulation of RCU-protected data must happen
  2849. * before the load from ->gpnum.
  2850. */
  2851. smp_mb(); /* ^^^ */
  2852. /*
  2853. * Make sure this load happens before the purportedly
  2854. * time-consuming work between get_state_synchronize_rcu()
  2855. * and cond_synchronize_rcu().
  2856. */
  2857. return smp_load_acquire(&rcu_state_p->gpnum);
  2858. }
  2859. EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
  2860. /**
  2861. * cond_synchronize_rcu - Conditionally wait for an RCU grace period
  2862. *
  2863. * @oldstate: return value from earlier call to get_state_synchronize_rcu()
  2864. *
  2865. * If a full RCU grace period has elapsed since the earlier call to
  2866. * get_state_synchronize_rcu(), just return. Otherwise, invoke
  2867. * synchronize_rcu() to wait for a full grace period.
  2868. *
  2869. * Yes, this function does not take counter wrap into account. But
  2870. * counter wrap is harmless. If the counter wraps, we have waited for
  2871. * more than 2 billion grace periods (and way more on a 64-bit system!),
  2872. * so waiting for one additional grace period should be just fine.
  2873. */
  2874. void cond_synchronize_rcu(unsigned long oldstate)
  2875. {
  2876. unsigned long newstate;
  2877. /*
  2878. * Ensure that this load happens before any RCU-destructive
  2879. * actions the caller might carry out after we return.
  2880. */
  2881. newstate = smp_load_acquire(&rcu_state_p->completed);
  2882. if (ULONG_CMP_GE(oldstate, newstate))
  2883. synchronize_rcu();
  2884. }
  2885. EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
  2886. /**
  2887. * get_state_synchronize_sched - Snapshot current RCU-sched state
  2888. *
  2889. * Returns a cookie that is used by a later call to cond_synchronize_sched()
  2890. * to determine whether or not a full grace period has elapsed in the
  2891. * meantime.
  2892. */
  2893. unsigned long get_state_synchronize_sched(void)
  2894. {
  2895. /*
  2896. * Any prior manipulation of RCU-protected data must happen
  2897. * before the load from ->gpnum.
  2898. */
  2899. smp_mb(); /* ^^^ */
  2900. /*
  2901. * Make sure this load happens before the purportedly
  2902. * time-consuming work between get_state_synchronize_sched()
  2903. * and cond_synchronize_sched().
  2904. */
  2905. return smp_load_acquire(&rcu_sched_state.gpnum);
  2906. }
  2907. EXPORT_SYMBOL_GPL(get_state_synchronize_sched);
  2908. /**
  2909. * cond_synchronize_sched - Conditionally wait for an RCU-sched grace period
  2910. *
  2911. * @oldstate: return value from earlier call to get_state_synchronize_sched()
  2912. *
  2913. * If a full RCU-sched grace period has elapsed since the earlier call to
  2914. * get_state_synchronize_sched(), just return. Otherwise, invoke
  2915. * synchronize_sched() to wait for a full grace period.
  2916. *
  2917. * Yes, this function does not take counter wrap into account. But
  2918. * counter wrap is harmless. If the counter wraps, we have waited for
  2919. * more than 2 billion grace periods (and way more on a 64-bit system!),
  2920. * so waiting for one additional grace period should be just fine.
  2921. */
  2922. void cond_synchronize_sched(unsigned long oldstate)
  2923. {
  2924. unsigned long newstate;
  2925. /*
  2926. * Ensure that this load happens before any RCU-destructive
  2927. * actions the caller might carry out after we return.
  2928. */
  2929. newstate = smp_load_acquire(&rcu_sched_state.completed);
  2930. if (ULONG_CMP_GE(oldstate, newstate))
  2931. synchronize_sched();
  2932. }
  2933. EXPORT_SYMBOL_GPL(cond_synchronize_sched);
  2934. /*
  2935. * Check to see if there is any immediate RCU-related work to be done
  2936. * by the current CPU, for the specified type of RCU, returning 1 if so.
  2937. * The checks are in order of increasing expense: checks that can be
  2938. * carried out against CPU-local state are performed first. However,
  2939. * we must check for CPU stalls first, else we might not get a chance.
  2940. */
  2941. static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
  2942. {
  2943. struct rcu_node *rnp = rdp->mynode;
  2944. /* Check for CPU stalls, if enabled. */
  2945. check_cpu_stall(rsp, rdp);
  2946. /* Is this CPU a NO_HZ_FULL CPU that should ignore RCU? */
  2947. if (rcu_nohz_full_cpu(rsp))
  2948. return 0;
  2949. /* Is the RCU core waiting for a quiescent state from this CPU? */
  2950. if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm)
  2951. return 1;
  2952. /* Does this CPU have callbacks ready to invoke? */
  2953. if (rcu_segcblist_ready_cbs(&rdp->cblist))
  2954. return 1;
  2955. /* Has RCU gone idle with this CPU needing another grace period? */
  2956. if (!rcu_gp_in_progress(rsp) &&
  2957. rcu_segcblist_is_enabled(&rdp->cblist) &&
  2958. !rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
  2959. return 1;
  2960. /* Has another RCU grace period completed? */
  2961. if (READ_ONCE(rnp->completed) != rdp->completed) /* outside lock */
  2962. return 1;
  2963. /* Has a new RCU grace period started? */
  2964. if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
  2965. unlikely(READ_ONCE(rdp->gpwrap))) /* outside lock */
  2966. return 1;
  2967. /* Does this CPU need a deferred NOCB wakeup? */
  2968. if (rcu_nocb_need_deferred_wakeup(rdp))
  2969. return 1;
  2970. /* nothing to do */
  2971. return 0;
  2972. }
  2973. /*
  2974. * Check to see if there is any immediate RCU-related work to be done
  2975. * by the current CPU, returning 1 if so. This function is part of the
  2976. * RCU implementation; it is -not- an exported member of the RCU API.
  2977. */
  2978. static int rcu_pending(void)
  2979. {
  2980. struct rcu_state *rsp;
  2981. for_each_rcu_flavor(rsp)
  2982. if (__rcu_pending(rsp, this_cpu_ptr(rsp->rda)))
  2983. return 1;
  2984. return 0;
  2985. }
  2986. /*
  2987. * Return true if the specified CPU has any callback. If all_lazy is
  2988. * non-NULL, store an indication of whether all callbacks are lazy.
  2989. * (If there are no callbacks, all of them are deemed to be lazy.)
  2990. */
  2991. static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
  2992. {
  2993. bool al = true;
  2994. bool hc = false;
  2995. struct rcu_data *rdp;
  2996. struct rcu_state *rsp;
  2997. for_each_rcu_flavor(rsp) {
  2998. rdp = this_cpu_ptr(rsp->rda);
  2999. if (rcu_segcblist_empty(&rdp->cblist))
  3000. continue;
  3001. hc = true;
  3002. if (rcu_segcblist_n_nonlazy_cbs(&rdp->cblist) || !all_lazy) {
  3003. al = false;
  3004. break;
  3005. }
  3006. }
  3007. if (all_lazy)
  3008. *all_lazy = al;
  3009. return hc;
  3010. }
  3011. /*
  3012. * Helper function for _rcu_barrier() tracing. If tracing is disabled,
  3013. * the compiler is expected to optimize this away.
  3014. */
  3015. static void _rcu_barrier_trace(struct rcu_state *rsp, const char *s,
  3016. int cpu, unsigned long done)
  3017. {
  3018. trace_rcu_barrier(rsp->name, s, cpu,
  3019. atomic_read(&rsp->barrier_cpu_count), done);
  3020. }
  3021. /*
  3022. * RCU callback function for _rcu_barrier(). If we are last, wake
  3023. * up the task executing _rcu_barrier().
  3024. */
  3025. static void rcu_barrier_callback(struct rcu_head *rhp)
  3026. {
  3027. struct rcu_data *rdp = container_of(rhp, struct rcu_data, barrier_head);
  3028. struct rcu_state *rsp = rdp->rsp;
  3029. if (atomic_dec_and_test(&rsp->barrier_cpu_count)) {
  3030. _rcu_barrier_trace(rsp, TPS("LastCB"), -1,
  3031. rsp->barrier_sequence);
  3032. complete(&rsp->barrier_completion);
  3033. } else {
  3034. _rcu_barrier_trace(rsp, TPS("CB"), -1, rsp->barrier_sequence);
  3035. }
  3036. }
  3037. /*
  3038. * Called with preemption disabled, and from cross-cpu IRQ context.
  3039. */
  3040. static void rcu_barrier_func(void *type)
  3041. {
  3042. struct rcu_state *rsp = type;
  3043. struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
  3044. _rcu_barrier_trace(rsp, TPS("IRQ"), -1, rsp->barrier_sequence);
  3045. rdp->barrier_head.func = rcu_barrier_callback;
  3046. debug_rcu_head_queue(&rdp->barrier_head);
  3047. if (rcu_segcblist_entrain(&rdp->cblist, &rdp->barrier_head, 0)) {
  3048. atomic_inc(&rsp->barrier_cpu_count);
  3049. } else {
  3050. debug_rcu_head_unqueue(&rdp->barrier_head);
  3051. _rcu_barrier_trace(rsp, TPS("IRQNQ"), -1,
  3052. rsp->barrier_sequence);
  3053. }
  3054. }
  3055. /*
  3056. * Orchestrate the specified type of RCU barrier, waiting for all
  3057. * RCU callbacks of the specified type to complete.
  3058. */
  3059. static void _rcu_barrier(struct rcu_state *rsp)
  3060. {
  3061. int cpu;
  3062. struct rcu_data *rdp;
  3063. unsigned long s = rcu_seq_snap(&rsp->barrier_sequence);
  3064. _rcu_barrier_trace(rsp, TPS("Begin"), -1, s);
  3065. /* Take mutex to serialize concurrent rcu_barrier() requests. */
  3066. mutex_lock(&rsp->barrier_mutex);
  3067. /* Did someone else do our work for us? */
  3068. if (rcu_seq_done(&rsp->barrier_sequence, s)) {
  3069. _rcu_barrier_trace(rsp, TPS("EarlyExit"), -1,
  3070. rsp->barrier_sequence);
  3071. smp_mb(); /* caller's subsequent code after above check. */
  3072. mutex_unlock(&rsp->barrier_mutex);
  3073. return;
  3074. }
  3075. /* Mark the start of the barrier operation. */
  3076. rcu_seq_start(&rsp->barrier_sequence);
  3077. _rcu_barrier_trace(rsp, TPS("Inc1"), -1, rsp->barrier_sequence);
  3078. /*
  3079. * Initialize the count to one rather than to zero in order to
  3080. * avoid a too-soon return to zero in case of a short grace period
  3081. * (or preemption of this task). Exclude CPU-hotplug operations
  3082. * to ensure that no offline CPU has callbacks queued.
  3083. */
  3084. init_completion(&rsp->barrier_completion);
  3085. atomic_set(&rsp->barrier_cpu_count, 1);
  3086. get_online_cpus();
  3087. /*
  3088. * Force each CPU with callbacks to register a new callback.
  3089. * When that callback is invoked, we will know that all of the
  3090. * corresponding CPU's preceding callbacks have been invoked.
  3091. */
  3092. for_each_possible_cpu(cpu) {
  3093. if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
  3094. continue;
  3095. rdp = per_cpu_ptr(rsp->rda, cpu);
  3096. if (rcu_is_nocb_cpu(cpu)) {
  3097. if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
  3098. _rcu_barrier_trace(rsp, TPS("OfflineNoCB"), cpu,
  3099. rsp->barrier_sequence);
  3100. } else {
  3101. _rcu_barrier_trace(rsp, TPS("OnlineNoCB"), cpu,
  3102. rsp->barrier_sequence);
  3103. smp_mb__before_atomic();
  3104. atomic_inc(&rsp->barrier_cpu_count);
  3105. __call_rcu(&rdp->barrier_head,
  3106. rcu_barrier_callback, rsp, cpu, 0);
  3107. }
  3108. } else if (rcu_segcblist_n_cbs(&rdp->cblist)) {
  3109. _rcu_barrier_trace(rsp, TPS("OnlineQ"), cpu,
  3110. rsp->barrier_sequence);
  3111. smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
  3112. } else {
  3113. _rcu_barrier_trace(rsp, TPS("OnlineNQ"), cpu,
  3114. rsp->barrier_sequence);
  3115. }
  3116. }
  3117. put_online_cpus();
  3118. /*
  3119. * Now that we have an rcu_barrier_callback() callback on each
  3120. * CPU, and thus each counted, remove the initial count.
  3121. */
  3122. if (atomic_dec_and_test(&rsp->barrier_cpu_count))
  3123. complete(&rsp->barrier_completion);
  3124. /* Wait for all rcu_barrier_callback() callbacks to be invoked. */
  3125. wait_for_completion(&rsp->barrier_completion);
  3126. /* Mark the end of the barrier operation. */
  3127. _rcu_barrier_trace(rsp, TPS("Inc2"), -1, rsp->barrier_sequence);
  3128. rcu_seq_end(&rsp->barrier_sequence);
  3129. /* Other rcu_barrier() invocations can now safely proceed. */
  3130. mutex_unlock(&rsp->barrier_mutex);
  3131. }
  3132. /**
  3133. * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  3134. */
  3135. void rcu_barrier_bh(void)
  3136. {
  3137. _rcu_barrier(&rcu_bh_state);
  3138. }
  3139. EXPORT_SYMBOL_GPL(rcu_barrier_bh);
  3140. /**
  3141. * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
  3142. */
  3143. void rcu_barrier_sched(void)
  3144. {
  3145. _rcu_barrier(&rcu_sched_state);
  3146. }
  3147. EXPORT_SYMBOL_GPL(rcu_barrier_sched);
  3148. /*
  3149. * Propagate ->qsinitmask bits up the rcu_node tree to account for the
  3150. * first CPU in a given leaf rcu_node structure coming online. The caller
  3151. * must hold the corresponding leaf rcu_node ->lock with interrrupts
  3152. * disabled.
  3153. */
  3154. static void rcu_init_new_rnp(struct rcu_node *rnp_leaf)
  3155. {
  3156. long mask;
  3157. struct rcu_node *rnp = rnp_leaf;
  3158. raw_lockdep_assert_held_rcu_node(rnp);
  3159. for (;;) {
  3160. mask = rnp->grpmask;
  3161. rnp = rnp->parent;
  3162. if (rnp == NULL)
  3163. return;
  3164. raw_spin_lock_rcu_node(rnp); /* Interrupts already disabled. */
  3165. rnp->qsmaskinit |= mask;
  3166. raw_spin_unlock_rcu_node(rnp); /* Interrupts remain disabled. */
  3167. }
  3168. }
  3169. /*
  3170. * Do boot-time initialization of a CPU's per-CPU RCU data.
  3171. */
  3172. static void __init
  3173. rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
  3174. {
  3175. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  3176. /* Set up local state, ensuring consistent view of global state. */
  3177. rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
  3178. rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
  3179. WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != 1);
  3180. WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
  3181. rdp->cpu = cpu;
  3182. rdp->rsp = rsp;
  3183. rcu_boot_init_nocb_percpu_data(rdp);
  3184. }
  3185. /*
  3186. * Initialize a CPU's per-CPU RCU data. Note that only one online or
  3187. * offline event can be happening at a given time. Note also that we
  3188. * can accept some slop in the rsp->completed access due to the fact
  3189. * that this CPU cannot possibly have any RCU callbacks in flight yet.
  3190. */
  3191. static void
  3192. rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
  3193. {
  3194. unsigned long flags;
  3195. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  3196. struct rcu_node *rnp = rcu_get_root(rsp);
  3197. /* Set up local state, ensuring consistent view of global state. */
  3198. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  3199. rdp->qlen_last_fqs_check = 0;
  3200. rdp->n_force_qs_snap = rsp->n_force_qs;
  3201. rdp->blimit = blimit;
  3202. if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
  3203. !init_nocb_callback_list(rdp))
  3204. rcu_segcblist_init(&rdp->cblist); /* Re-enable callbacks. */
  3205. rdp->dynticks->dynticks_nesting = 1; /* CPU not up, no tearing. */
  3206. rcu_dynticks_eqs_online();
  3207. raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
  3208. /*
  3209. * Add CPU to leaf rcu_node pending-online bitmask. Any needed
  3210. * propagation up the rcu_node tree will happen at the beginning
  3211. * of the next grace period.
  3212. */
  3213. rnp = rdp->mynode;
  3214. raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
  3215. rdp->beenonline = true; /* We have now been online. */
  3216. rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
  3217. rdp->completed = rnp->completed;
  3218. rdp->cpu_no_qs.b.norm = true;
  3219. rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
  3220. rdp->core_needs_qs = false;
  3221. rdp->rcu_iw_pending = false;
  3222. rdp->rcu_iw_gpnum = rnp->gpnum - 1;
  3223. trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
  3224. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  3225. }
  3226. /*
  3227. * Invoked early in the CPU-online process, when pretty much all
  3228. * services are available. The incoming CPU is not present.
  3229. */
  3230. int rcutree_prepare_cpu(unsigned int cpu)
  3231. {
  3232. struct rcu_state *rsp;
  3233. for_each_rcu_flavor(rsp)
  3234. rcu_init_percpu_data(cpu, rsp);
  3235. rcu_prepare_kthreads(cpu);
  3236. rcu_spawn_all_nocb_kthreads(cpu);
  3237. return 0;
  3238. }
  3239. /*
  3240. * Update RCU priority boot kthread affinity for CPU-hotplug changes.
  3241. */
  3242. static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
  3243. {
  3244. struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
  3245. rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
  3246. }
  3247. /*
  3248. * Near the end of the CPU-online process. Pretty much all services
  3249. * enabled, and the CPU is now very much alive.
  3250. */
  3251. int rcutree_online_cpu(unsigned int cpu)
  3252. {
  3253. unsigned long flags;
  3254. struct rcu_data *rdp;
  3255. struct rcu_node *rnp;
  3256. struct rcu_state *rsp;
  3257. for_each_rcu_flavor(rsp) {
  3258. rdp = per_cpu_ptr(rsp->rda, cpu);
  3259. rnp = rdp->mynode;
  3260. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  3261. rnp->ffmask |= rdp->grpmask;
  3262. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  3263. }
  3264. if (IS_ENABLED(CONFIG_TREE_SRCU))
  3265. srcu_online_cpu(cpu);
  3266. if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
  3267. return 0; /* Too early in boot for scheduler work. */
  3268. sync_sched_exp_online_cleanup(cpu);
  3269. rcutree_affinity_setting(cpu, -1);
  3270. return 0;
  3271. }
  3272. /*
  3273. * Near the beginning of the process. The CPU is still very much alive
  3274. * with pretty much all services enabled.
  3275. */
  3276. int rcutree_offline_cpu(unsigned int cpu)
  3277. {
  3278. unsigned long flags;
  3279. struct rcu_data *rdp;
  3280. struct rcu_node *rnp;
  3281. struct rcu_state *rsp;
  3282. for_each_rcu_flavor(rsp) {
  3283. rdp = per_cpu_ptr(rsp->rda, cpu);
  3284. rnp = rdp->mynode;
  3285. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  3286. rnp->ffmask &= ~rdp->grpmask;
  3287. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  3288. }
  3289. rcutree_affinity_setting(cpu, cpu);
  3290. if (IS_ENABLED(CONFIG_TREE_SRCU))
  3291. srcu_offline_cpu(cpu);
  3292. return 0;
  3293. }
  3294. /*
  3295. * Near the end of the offline process. We do only tracing here.
  3296. */
  3297. int rcutree_dying_cpu(unsigned int cpu)
  3298. {
  3299. struct rcu_state *rsp;
  3300. for_each_rcu_flavor(rsp)
  3301. rcu_cleanup_dying_cpu(rsp);
  3302. return 0;
  3303. }
  3304. /*
  3305. * The outgoing CPU is gone and we are running elsewhere.
  3306. */
  3307. int rcutree_dead_cpu(unsigned int cpu)
  3308. {
  3309. struct rcu_state *rsp;
  3310. for_each_rcu_flavor(rsp) {
  3311. rcu_cleanup_dead_cpu(cpu, rsp);
  3312. do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
  3313. }
  3314. return 0;
  3315. }
  3316. static DEFINE_PER_CPU(int, rcu_cpu_started);
  3317. /*
  3318. * Mark the specified CPU as being online so that subsequent grace periods
  3319. * (both expedited and normal) will wait on it. Note that this means that
  3320. * incoming CPUs are not allowed to use RCU read-side critical sections
  3321. * until this function is called. Failing to observe this restriction
  3322. * will result in lockdep splats.
  3323. *
  3324. * Note that this function is special in that it is invoked directly
  3325. * from the incoming CPU rather than from the cpuhp_step mechanism.
  3326. * This is because this function must be invoked at a precise location.
  3327. */
  3328. void rcu_cpu_starting(unsigned int cpu)
  3329. {
  3330. unsigned long flags;
  3331. unsigned long mask;
  3332. int nbits;
  3333. unsigned long oldmask;
  3334. struct rcu_data *rdp;
  3335. struct rcu_node *rnp;
  3336. struct rcu_state *rsp;
  3337. if (per_cpu(rcu_cpu_started, cpu))
  3338. return;
  3339. per_cpu(rcu_cpu_started, cpu) = 1;
  3340. for_each_rcu_flavor(rsp) {
  3341. rdp = per_cpu_ptr(rsp->rda, cpu);
  3342. rnp = rdp->mynode;
  3343. mask = rdp->grpmask;
  3344. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  3345. rnp->qsmaskinitnext |= mask;
  3346. oldmask = rnp->expmaskinitnext;
  3347. rnp->expmaskinitnext |= mask;
  3348. oldmask ^= rnp->expmaskinitnext;
  3349. nbits = bitmap_weight(&oldmask, BITS_PER_LONG);
  3350. /* Allow lockless access for expedited grace periods. */
  3351. smp_store_release(&rsp->ncpus, rsp->ncpus + nbits); /* ^^^ */
  3352. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  3353. }
  3354. smp_mb(); /* Ensure RCU read-side usage follows above initialization. */
  3355. }
  3356. #ifdef CONFIG_HOTPLUG_CPU
  3357. /*
  3358. * The CPU is exiting the idle loop into the arch_cpu_idle_dead()
  3359. * function. We now remove it from the rcu_node tree's ->qsmaskinit
  3360. * bit masks.
  3361. */
  3362. static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
  3363. {
  3364. unsigned long flags;
  3365. unsigned long mask;
  3366. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  3367. struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
  3368. /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
  3369. mask = rdp->grpmask;
  3370. raw_spin_lock_irqsave_rcu_node(rnp, flags); /* Enforce GP memory-order guarantee. */
  3371. rnp->qsmaskinitnext &= ~mask;
  3372. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  3373. }
  3374. /*
  3375. * The outgoing function has no further need of RCU, so remove it from
  3376. * the list of CPUs that RCU must track.
  3377. *
  3378. * Note that this function is special in that it is invoked directly
  3379. * from the outgoing CPU rather than from the cpuhp_step mechanism.
  3380. * This is because this function must be invoked at a precise location.
  3381. */
  3382. void rcu_report_dead(unsigned int cpu)
  3383. {
  3384. struct rcu_state *rsp;
  3385. /* QS for any half-done expedited RCU-sched GP. */
  3386. preempt_disable();
  3387. rcu_report_exp_rdp(&rcu_sched_state,
  3388. this_cpu_ptr(rcu_sched_state.rda), true);
  3389. preempt_enable();
  3390. for_each_rcu_flavor(rsp)
  3391. rcu_cleanup_dying_idle_cpu(cpu, rsp);
  3392. per_cpu(rcu_cpu_started, cpu) = 0;
  3393. }
  3394. /* Migrate the dead CPU's callbacks to the current CPU. */
  3395. static void rcu_migrate_callbacks(int cpu, struct rcu_state *rsp)
  3396. {
  3397. unsigned long flags;
  3398. struct rcu_data *my_rdp;
  3399. struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
  3400. struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
  3401. bool needwake;
  3402. if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
  3403. return; /* No callbacks to migrate. */
  3404. local_irq_save(flags);
  3405. my_rdp = this_cpu_ptr(rsp->rda);
  3406. if (rcu_nocb_adopt_orphan_cbs(my_rdp, rdp, flags)) {
  3407. local_irq_restore(flags);
  3408. return;
  3409. }
  3410. raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
  3411. /* Leverage recent GPs and set GP for new callbacks. */
  3412. needwake = rcu_advance_cbs(rsp, rnp_root, rdp) ||
  3413. rcu_advance_cbs(rsp, rnp_root, my_rdp);
  3414. rcu_segcblist_merge(&my_rdp->cblist, &rdp->cblist);
  3415. WARN_ON_ONCE(rcu_segcblist_empty(&my_rdp->cblist) !=
  3416. !rcu_segcblist_n_cbs(&my_rdp->cblist));
  3417. raw_spin_unlock_irqrestore_rcu_node(rnp_root, flags);
  3418. if (needwake)
  3419. rcu_gp_kthread_wake(rsp);
  3420. WARN_ONCE(rcu_segcblist_n_cbs(&rdp->cblist) != 0 ||
  3421. !rcu_segcblist_empty(&rdp->cblist),
  3422. "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, 1stCB=%p\n",
  3423. cpu, rcu_segcblist_n_cbs(&rdp->cblist),
  3424. rcu_segcblist_first_cb(&rdp->cblist));
  3425. }
  3426. /*
  3427. * The outgoing CPU has just passed through the dying-idle state,
  3428. * and we are being invoked from the CPU that was IPIed to continue the
  3429. * offline operation. We need to migrate the outgoing CPU's callbacks.
  3430. */
  3431. void rcutree_migrate_callbacks(int cpu)
  3432. {
  3433. struct rcu_state *rsp;
  3434. for_each_rcu_flavor(rsp)
  3435. rcu_migrate_callbacks(cpu, rsp);
  3436. }
  3437. #endif
  3438. /*
  3439. * On non-huge systems, use expedited RCU grace periods to make suspend
  3440. * and hibernation run faster.
  3441. */
  3442. static int rcu_pm_notify(struct notifier_block *self,
  3443. unsigned long action, void *hcpu)
  3444. {
  3445. switch (action) {
  3446. case PM_HIBERNATION_PREPARE:
  3447. case PM_SUSPEND_PREPARE:
  3448. if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
  3449. rcu_expedite_gp();
  3450. break;
  3451. case PM_POST_HIBERNATION:
  3452. case PM_POST_SUSPEND:
  3453. if (nr_cpu_ids <= 256) /* Expediting bad for large systems. */
  3454. rcu_unexpedite_gp();
  3455. break;
  3456. default:
  3457. break;
  3458. }
  3459. return NOTIFY_OK;
  3460. }
  3461. /*
  3462. * Spawn the kthreads that handle each RCU flavor's grace periods.
  3463. */
  3464. static int __init rcu_spawn_gp_kthread(void)
  3465. {
  3466. unsigned long flags;
  3467. int kthread_prio_in = kthread_prio;
  3468. struct rcu_node *rnp;
  3469. struct rcu_state *rsp;
  3470. struct sched_param sp;
  3471. struct task_struct *t;
  3472. /* Force priority into range. */
  3473. if (IS_ENABLED(CONFIG_RCU_BOOST) && kthread_prio < 1)
  3474. kthread_prio = 1;
  3475. else if (kthread_prio < 0)
  3476. kthread_prio = 0;
  3477. else if (kthread_prio > 99)
  3478. kthread_prio = 99;
  3479. if (kthread_prio != kthread_prio_in)
  3480. pr_alert("rcu_spawn_gp_kthread(): Limited prio to %d from %d\n",
  3481. kthread_prio, kthread_prio_in);
  3482. rcu_scheduler_fully_active = 1;
  3483. for_each_rcu_flavor(rsp) {
  3484. t = kthread_create(rcu_gp_kthread, rsp, "%s", rsp->name);
  3485. BUG_ON(IS_ERR(t));
  3486. rnp = rcu_get_root(rsp);
  3487. raw_spin_lock_irqsave_rcu_node(rnp, flags);
  3488. rsp->gp_kthread = t;
  3489. if (kthread_prio) {
  3490. sp.sched_priority = kthread_prio;
  3491. sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
  3492. }
  3493. raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
  3494. wake_up_process(t);
  3495. }
  3496. rcu_spawn_nocb_kthreads();
  3497. rcu_spawn_boost_kthreads();
  3498. return 0;
  3499. }
  3500. early_initcall(rcu_spawn_gp_kthread);
  3501. /*
  3502. * This function is invoked towards the end of the scheduler's
  3503. * initialization process. Before this is called, the idle task might
  3504. * contain synchronous grace-period primitives (during which time, this idle
  3505. * task is booting the system, and such primitives are no-ops). After this
  3506. * function is called, any synchronous grace-period primitives are run as
  3507. * expedited, with the requesting task driving the grace period forward.
  3508. * A later core_initcall() rcu_set_runtime_mode() will switch to full
  3509. * runtime RCU functionality.
  3510. */
  3511. void rcu_scheduler_starting(void)
  3512. {
  3513. WARN_ON(num_online_cpus() != 1);
  3514. WARN_ON(nr_context_switches() > 0);
  3515. rcu_test_sync_prims();
  3516. rcu_scheduler_active = RCU_SCHEDULER_INIT;
  3517. rcu_test_sync_prims();
  3518. }
  3519. /*
  3520. * Helper function for rcu_init() that initializes one rcu_state structure.
  3521. */
  3522. static void __init rcu_init_one(struct rcu_state *rsp)
  3523. {
  3524. static const char * const buf[] = RCU_NODE_NAME_INIT;
  3525. static const char * const fqs[] = RCU_FQS_NAME_INIT;
  3526. static struct lock_class_key rcu_node_class[RCU_NUM_LVLS];
  3527. static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
  3528. int levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
  3529. int cpustride = 1;
  3530. int i;
  3531. int j;
  3532. struct rcu_node *rnp;
  3533. BUILD_BUG_ON(RCU_NUM_LVLS > ARRAY_SIZE(buf)); /* Fix buf[] init! */
  3534. /* Silence gcc 4.8 false positive about array index out of range. */
  3535. if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
  3536. panic("rcu_init_one: rcu_num_lvls out of range");
  3537. /* Initialize the level-tracking arrays. */
  3538. for (i = 1; i < rcu_num_lvls; i++)
  3539. rsp->level[i] = rsp->level[i - 1] + num_rcu_lvl[i - 1];
  3540. rcu_init_levelspread(levelspread, num_rcu_lvl);
  3541. /* Initialize the elements themselves, starting from the leaves. */
  3542. for (i = rcu_num_lvls - 1; i >= 0; i--) {
  3543. cpustride *= levelspread[i];
  3544. rnp = rsp->level[i];
  3545. for (j = 0; j < num_rcu_lvl[i]; j++, rnp++) {
  3546. raw_spin_lock_init(&ACCESS_PRIVATE(rnp, lock));
  3547. lockdep_set_class_and_name(&ACCESS_PRIVATE(rnp, lock),
  3548. &rcu_node_class[i], buf[i]);
  3549. raw_spin_lock_init(&rnp->fqslock);
  3550. lockdep_set_class_and_name(&rnp->fqslock,
  3551. &rcu_fqs_class[i], fqs[i]);
  3552. rnp->gpnum = rsp->gpnum;
  3553. rnp->completed = rsp->completed;
  3554. rnp->qsmask = 0;
  3555. rnp->qsmaskinit = 0;
  3556. rnp->grplo = j * cpustride;
  3557. rnp->grphi = (j + 1) * cpustride - 1;
  3558. if (rnp->grphi >= nr_cpu_ids)
  3559. rnp->grphi = nr_cpu_ids - 1;
  3560. if (i == 0) {
  3561. rnp->grpnum = 0;
  3562. rnp->grpmask = 0;
  3563. rnp->parent = NULL;
  3564. } else {
  3565. rnp->grpnum = j % levelspread[i - 1];
  3566. rnp->grpmask = 1UL << rnp->grpnum;
  3567. rnp->parent = rsp->level[i - 1] +
  3568. j / levelspread[i - 1];
  3569. }
  3570. rnp->level = i;
  3571. INIT_LIST_HEAD(&rnp->blkd_tasks);
  3572. rcu_init_one_nocb(rnp);
  3573. init_waitqueue_head(&rnp->exp_wq[0]);
  3574. init_waitqueue_head(&rnp->exp_wq[1]);
  3575. init_waitqueue_head(&rnp->exp_wq[2]);
  3576. init_waitqueue_head(&rnp->exp_wq[3]);
  3577. spin_lock_init(&rnp->exp_lock);
  3578. }
  3579. }
  3580. init_swait_queue_head(&rsp->gp_wq);
  3581. init_swait_queue_head(&rsp->expedited_wq);
  3582. rnp = rcu_first_leaf_node(rsp);
  3583. for_each_possible_cpu(i) {
  3584. while (i > rnp->grphi)
  3585. rnp++;
  3586. per_cpu_ptr(rsp->rda, i)->mynode = rnp;
  3587. rcu_boot_init_percpu_data(i, rsp);
  3588. }
  3589. list_add(&rsp->flavors, &rcu_struct_flavors);
  3590. }
  3591. /*
  3592. * Compute the rcu_node tree geometry from kernel parameters. This cannot
  3593. * replace the definitions in tree.h because those are needed to size
  3594. * the ->node array in the rcu_state structure.
  3595. */
  3596. static void __init rcu_init_geometry(void)
  3597. {
  3598. ulong d;
  3599. int i;
  3600. int rcu_capacity[RCU_NUM_LVLS];
  3601. /*
  3602. * Initialize any unspecified boot parameters.
  3603. * The default values of jiffies_till_first_fqs and
  3604. * jiffies_till_next_fqs are set to the RCU_JIFFIES_TILL_FORCE_QS
  3605. * value, which is a function of HZ, then adding one for each
  3606. * RCU_JIFFIES_FQS_DIV CPUs that might be on the system.
  3607. */
  3608. d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV;
  3609. if (jiffies_till_first_fqs == ULONG_MAX)
  3610. jiffies_till_first_fqs = d;
  3611. if (jiffies_till_next_fqs == ULONG_MAX)
  3612. jiffies_till_next_fqs = d;
  3613. /* If the compile-time values are accurate, just leave. */
  3614. if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
  3615. nr_cpu_ids == NR_CPUS)
  3616. return;
  3617. pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%u\n",
  3618. rcu_fanout_leaf, nr_cpu_ids);
  3619. /*
  3620. * The boot-time rcu_fanout_leaf parameter must be at least two
  3621. * and cannot exceed the number of bits in the rcu_node masks.
  3622. * Complain and fall back to the compile-time values if this
  3623. * limit is exceeded.
  3624. */
  3625. if (rcu_fanout_leaf < 2 ||
  3626. rcu_fanout_leaf > sizeof(unsigned long) * 8) {
  3627. rcu_fanout_leaf = RCU_FANOUT_LEAF;
  3628. WARN_ON(1);
  3629. return;
  3630. }
  3631. /*
  3632. * Compute number of nodes that can be handled an rcu_node tree
  3633. * with the given number of levels.
  3634. */
  3635. rcu_capacity[0] = rcu_fanout_leaf;
  3636. for (i = 1; i < RCU_NUM_LVLS; i++)
  3637. rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
  3638. /*
  3639. * The tree must be able to accommodate the configured number of CPUs.
  3640. * If this limit is exceeded, fall back to the compile-time values.
  3641. */
  3642. if (nr_cpu_ids > rcu_capacity[RCU_NUM_LVLS - 1]) {
  3643. rcu_fanout_leaf = RCU_FANOUT_LEAF;
  3644. WARN_ON(1);
  3645. return;
  3646. }
  3647. /* Calculate the number of levels in the tree. */
  3648. for (i = 0; nr_cpu_ids > rcu_capacity[i]; i++) {
  3649. }
  3650. rcu_num_lvls = i + 1;
  3651. /* Calculate the number of rcu_nodes at each level of the tree. */
  3652. for (i = 0; i < rcu_num_lvls; i++) {
  3653. int cap = rcu_capacity[(rcu_num_lvls - 1) - i];
  3654. num_rcu_lvl[i] = DIV_ROUND_UP(nr_cpu_ids, cap);
  3655. }
  3656. /* Calculate the total number of rcu_node structures. */
  3657. rcu_num_nodes = 0;
  3658. for (i = 0; i < rcu_num_lvls; i++)
  3659. rcu_num_nodes += num_rcu_lvl[i];
  3660. }
  3661. /*
  3662. * Dump out the structure of the rcu_node combining tree associated
  3663. * with the rcu_state structure referenced by rsp.
  3664. */
  3665. static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
  3666. {
  3667. int level = 0;
  3668. struct rcu_node *rnp;
  3669. pr_info("rcu_node tree layout dump\n");
  3670. pr_info(" ");
  3671. rcu_for_each_node_breadth_first(rsp, rnp) {
  3672. if (rnp->level != level) {
  3673. pr_cont("\n");
  3674. pr_info(" ");
  3675. level = rnp->level;
  3676. }
  3677. pr_cont("%d:%d ^%d ", rnp->grplo, rnp->grphi, rnp->grpnum);
  3678. }
  3679. pr_cont("\n");
  3680. }
  3681. struct workqueue_struct *rcu_gp_wq;
  3682. struct workqueue_struct *rcu_par_gp_wq;
  3683. void __init rcu_init(void)
  3684. {
  3685. int cpu;
  3686. rcu_early_boot_tests();
  3687. rcu_bootup_announce();
  3688. rcu_init_geometry();
  3689. rcu_init_one(&rcu_bh_state);
  3690. rcu_init_one(&rcu_sched_state);
  3691. if (dump_tree)
  3692. rcu_dump_rcu_node_tree(&rcu_sched_state);
  3693. __rcu_init_preempt();
  3694. open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
  3695. /*
  3696. * We don't need protection against CPU-hotplug here because
  3697. * this is called early in boot, before either interrupts
  3698. * or the scheduler are operational.
  3699. */
  3700. pm_notifier(rcu_pm_notify, 0);
  3701. for_each_online_cpu(cpu) {
  3702. rcutree_prepare_cpu(cpu);
  3703. rcu_cpu_starting(cpu);
  3704. rcutree_online_cpu(cpu);
  3705. }
  3706. /* Create workqueue for expedited GPs and for Tree SRCU. */
  3707. rcu_gp_wq = alloc_workqueue("rcu_gp", WQ_MEM_RECLAIM, 0);
  3708. WARN_ON(!rcu_gp_wq);
  3709. rcu_par_gp_wq = alloc_workqueue("rcu_par_gp", WQ_MEM_RECLAIM, 0);
  3710. WARN_ON(!rcu_par_gp_wq);
  3711. }
  3712. #include "tree_exp.h"
  3713. #include "tree_plugin.h"