sched_fair.c 110 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352
  1. /*
  2. * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5. *
  6. * Interactivity improvements by Mike Galbraith
  7. * (C) 2007 Mike Galbraith <efault@gmx.de>
  8. *
  9. * Various enhancements by Dmitry Adamushko.
  10. * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
  11. *
  12. * Group scheduling enhancements by Srivatsa Vaddagiri
  13. * Copyright IBM Corporation, 2007
  14. * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
  15. *
  16. * Scaled math optimizations by Thomas Gleixner
  17. * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
  18. *
  19. * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
  20. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  21. */
  22. #include <linux/latencytop.h>
  23. #include <linux/sched.h>
  24. #include <linux/cpumask.h>
  25. /*
  26. * Targeted preemption latency for CPU-bound tasks:
  27. * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
  28. *
  29. * NOTE: this latency value is not the same as the concept of
  30. * 'timeslice length' - timeslices in CFS are of variable length
  31. * and have no persistent notion like in traditional, time-slice
  32. * based scheduling concepts.
  33. *
  34. * (to see the precise effective timeslice length of your workload,
  35. * run vmstat and monitor the context-switches (cs) field)
  36. */
  37. unsigned int sysctl_sched_latency = 6000000ULL;
  38. unsigned int normalized_sysctl_sched_latency = 6000000ULL;
  39. /*
  40. * The initial- and re-scaling of tunables is configurable
  41. * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
  42. *
  43. * Options are:
  44. * SCHED_TUNABLESCALING_NONE - unscaled, always *1
  45. * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
  46. * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
  47. */
  48. enum sched_tunable_scaling sysctl_sched_tunable_scaling
  49. = SCHED_TUNABLESCALING_LOG;
  50. /*
  51. * Minimal preemption granularity for CPU-bound tasks:
  52. * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
  53. */
  54. unsigned int sysctl_sched_min_granularity = 750000ULL;
  55. unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
  56. /*
  57. * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
  58. */
  59. static unsigned int sched_nr_latency = 8;
  60. /*
  61. * After fork, child runs first. If set to 0 (default) then
  62. * parent will (try to) run first.
  63. */
  64. unsigned int sysctl_sched_child_runs_first __read_mostly;
  65. /*
  66. * SCHED_OTHER wake-up granularity.
  67. * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
  68. *
  69. * This option delays the preemption effects of decoupled workloads
  70. * and reduces their over-scheduling. Synchronous workloads will still
  71. * have immediate wakeup/sleep latencies.
  72. */
  73. unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
  74. unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
  75. const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
  76. /*
  77. * The exponential sliding window over which load is averaged for shares
  78. * distribution.
  79. * (default: 10msec)
  80. */
  81. unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
  82. static const struct sched_class fair_sched_class;
  83. /**************************************************************
  84. * CFS operations on generic schedulable entities:
  85. */
  86. #ifdef CONFIG_FAIR_GROUP_SCHED
  87. /* cpu runqueue to which this cfs_rq is attached */
  88. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  89. {
  90. return cfs_rq->rq;
  91. }
  92. /* An entity is a task if it doesn't "own" a runqueue */
  93. #define entity_is_task(se) (!se->my_q)
  94. static inline struct task_struct *task_of(struct sched_entity *se)
  95. {
  96. #ifdef CONFIG_SCHED_DEBUG
  97. WARN_ON_ONCE(!entity_is_task(se));
  98. #endif
  99. return container_of(se, struct task_struct, se);
  100. }
  101. /* Walk up scheduling entities hierarchy */
  102. #define for_each_sched_entity(se) \
  103. for (; se; se = se->parent)
  104. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  105. {
  106. return p->se.cfs_rq;
  107. }
  108. /* runqueue on which this entity is (to be) queued */
  109. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  110. {
  111. return se->cfs_rq;
  112. }
  113. /* runqueue "owned" by this group */
  114. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  115. {
  116. return grp->my_q;
  117. }
  118. static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  119. {
  120. if (!cfs_rq->on_list) {
  121. /*
  122. * Ensure we either appear before our parent (if already
  123. * enqueued) or force our parent to appear after us when it is
  124. * enqueued. The fact that we always enqueue bottom-up
  125. * reduces this to two cases.
  126. */
  127. if (cfs_rq->tg->parent &&
  128. cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
  129. list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
  130. &rq_of(cfs_rq)->leaf_cfs_rq_list);
  131. } else {
  132. list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
  133. &rq_of(cfs_rq)->leaf_cfs_rq_list);
  134. }
  135. cfs_rq->on_list = 1;
  136. }
  137. }
  138. static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  139. {
  140. if (cfs_rq->on_list) {
  141. list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
  142. cfs_rq->on_list = 0;
  143. }
  144. }
  145. /* Iterate thr' all leaf cfs_rq's on a runqueue */
  146. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  147. list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  148. /* Do the two (enqueued) entities belong to the same group ? */
  149. static inline int
  150. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  151. {
  152. if (se->cfs_rq == pse->cfs_rq)
  153. return 1;
  154. return 0;
  155. }
  156. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  157. {
  158. return se->parent;
  159. }
  160. /* return depth at which a sched entity is present in the hierarchy */
  161. static inline int depth_se(struct sched_entity *se)
  162. {
  163. int depth = 0;
  164. for_each_sched_entity(se)
  165. depth++;
  166. return depth;
  167. }
  168. static void
  169. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  170. {
  171. int se_depth, pse_depth;
  172. /*
  173. * preemption test can be made between sibling entities who are in the
  174. * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
  175. * both tasks until we find their ancestors who are siblings of common
  176. * parent.
  177. */
  178. /* First walk up until both entities are at same depth */
  179. se_depth = depth_se(*se);
  180. pse_depth = depth_se(*pse);
  181. while (se_depth > pse_depth) {
  182. se_depth--;
  183. *se = parent_entity(*se);
  184. }
  185. while (pse_depth > se_depth) {
  186. pse_depth--;
  187. *pse = parent_entity(*pse);
  188. }
  189. while (!is_same_group(*se, *pse)) {
  190. *se = parent_entity(*se);
  191. *pse = parent_entity(*pse);
  192. }
  193. }
  194. #else /* !CONFIG_FAIR_GROUP_SCHED */
  195. static inline struct task_struct *task_of(struct sched_entity *se)
  196. {
  197. return container_of(se, struct task_struct, se);
  198. }
  199. static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
  200. {
  201. return container_of(cfs_rq, struct rq, cfs);
  202. }
  203. #define entity_is_task(se) 1
  204. #define for_each_sched_entity(se) \
  205. for (; se; se = NULL)
  206. static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
  207. {
  208. return &task_rq(p)->cfs;
  209. }
  210. static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
  211. {
  212. struct task_struct *p = task_of(se);
  213. struct rq *rq = task_rq(p);
  214. return &rq->cfs;
  215. }
  216. /* runqueue "owned" by this group */
  217. static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
  218. {
  219. return NULL;
  220. }
  221. static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  222. {
  223. }
  224. static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
  225. {
  226. }
  227. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  228. for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
  229. static inline int
  230. is_same_group(struct sched_entity *se, struct sched_entity *pse)
  231. {
  232. return 1;
  233. }
  234. static inline struct sched_entity *parent_entity(struct sched_entity *se)
  235. {
  236. return NULL;
  237. }
  238. static inline void
  239. find_matching_se(struct sched_entity **se, struct sched_entity **pse)
  240. {
  241. }
  242. #endif /* CONFIG_FAIR_GROUP_SCHED */
  243. /**************************************************************
  244. * Scheduling class tree data structure manipulation methods:
  245. */
  246. static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
  247. {
  248. s64 delta = (s64)(vruntime - min_vruntime);
  249. if (delta > 0)
  250. min_vruntime = vruntime;
  251. return min_vruntime;
  252. }
  253. static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
  254. {
  255. s64 delta = (s64)(vruntime - min_vruntime);
  256. if (delta < 0)
  257. min_vruntime = vruntime;
  258. return min_vruntime;
  259. }
  260. static inline int entity_before(struct sched_entity *a,
  261. struct sched_entity *b)
  262. {
  263. return (s64)(a->vruntime - b->vruntime) < 0;
  264. }
  265. static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
  266. {
  267. return se->vruntime - cfs_rq->min_vruntime;
  268. }
  269. static void update_min_vruntime(struct cfs_rq *cfs_rq)
  270. {
  271. u64 vruntime = cfs_rq->min_vruntime;
  272. if (cfs_rq->curr)
  273. vruntime = cfs_rq->curr->vruntime;
  274. if (cfs_rq->rb_leftmost) {
  275. struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
  276. struct sched_entity,
  277. run_node);
  278. if (!cfs_rq->curr)
  279. vruntime = se->vruntime;
  280. else
  281. vruntime = min_vruntime(vruntime, se->vruntime);
  282. }
  283. cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
  284. #ifndef CONFIG_64BIT
  285. smp_wmb();
  286. cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
  287. #endif
  288. }
  289. /*
  290. * Enqueue an entity into the rb-tree:
  291. */
  292. static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  293. {
  294. struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
  295. struct rb_node *parent = NULL;
  296. struct sched_entity *entry;
  297. s64 key = entity_key(cfs_rq, se);
  298. int leftmost = 1;
  299. /*
  300. * Find the right place in the rbtree:
  301. */
  302. while (*link) {
  303. parent = *link;
  304. entry = rb_entry(parent, struct sched_entity, run_node);
  305. /*
  306. * We dont care about collisions. Nodes with
  307. * the same key stay together.
  308. */
  309. if (key < entity_key(cfs_rq, entry)) {
  310. link = &parent->rb_left;
  311. } else {
  312. link = &parent->rb_right;
  313. leftmost = 0;
  314. }
  315. }
  316. /*
  317. * Maintain a cache of leftmost tree entries (it is frequently
  318. * used):
  319. */
  320. if (leftmost)
  321. cfs_rq->rb_leftmost = &se->run_node;
  322. rb_link_node(&se->run_node, parent, link);
  323. rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
  324. }
  325. static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  326. {
  327. if (cfs_rq->rb_leftmost == &se->run_node) {
  328. struct rb_node *next_node;
  329. next_node = rb_next(&se->run_node);
  330. cfs_rq->rb_leftmost = next_node;
  331. }
  332. rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
  333. }
  334. static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
  335. {
  336. struct rb_node *left = cfs_rq->rb_leftmost;
  337. if (!left)
  338. return NULL;
  339. return rb_entry(left, struct sched_entity, run_node);
  340. }
  341. static struct sched_entity *__pick_next_entity(struct sched_entity *se)
  342. {
  343. struct rb_node *next = rb_next(&se->run_node);
  344. if (!next)
  345. return NULL;
  346. return rb_entry(next, struct sched_entity, run_node);
  347. }
  348. #ifdef CONFIG_SCHED_DEBUG
  349. static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
  350. {
  351. struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
  352. if (!last)
  353. return NULL;
  354. return rb_entry(last, struct sched_entity, run_node);
  355. }
  356. /**************************************************************
  357. * Scheduling class statistics methods:
  358. */
  359. int sched_proc_update_handler(struct ctl_table *table, int write,
  360. void __user *buffer, size_t *lenp,
  361. loff_t *ppos)
  362. {
  363. int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
  364. int factor = get_update_sysctl_factor();
  365. if (ret || !write)
  366. return ret;
  367. sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
  368. sysctl_sched_min_granularity);
  369. #define WRT_SYSCTL(name) \
  370. (normalized_sysctl_##name = sysctl_##name / (factor))
  371. WRT_SYSCTL(sched_min_granularity);
  372. WRT_SYSCTL(sched_latency);
  373. WRT_SYSCTL(sched_wakeup_granularity);
  374. #undef WRT_SYSCTL
  375. return 0;
  376. }
  377. #endif
  378. /*
  379. * delta /= w
  380. */
  381. static inline unsigned long
  382. calc_delta_fair(unsigned long delta, struct sched_entity *se)
  383. {
  384. if (unlikely(se->load.weight != NICE_0_LOAD))
  385. delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
  386. return delta;
  387. }
  388. /*
  389. * The idea is to set a period in which each task runs once.
  390. *
  391. * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
  392. * this period because otherwise the slices get too small.
  393. *
  394. * p = (nr <= nl) ? l : l*nr/nl
  395. */
  396. static u64 __sched_period(unsigned long nr_running)
  397. {
  398. u64 period = sysctl_sched_latency;
  399. unsigned long nr_latency = sched_nr_latency;
  400. if (unlikely(nr_running > nr_latency)) {
  401. period = sysctl_sched_min_granularity;
  402. period *= nr_running;
  403. }
  404. return period;
  405. }
  406. /*
  407. * We calculate the wall-time slice from the period by taking a part
  408. * proportional to the weight.
  409. *
  410. * s = p*P[w/rw]
  411. */
  412. static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  413. {
  414. u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
  415. for_each_sched_entity(se) {
  416. struct load_weight *load;
  417. struct load_weight lw;
  418. cfs_rq = cfs_rq_of(se);
  419. load = &cfs_rq->load;
  420. if (unlikely(!se->on_rq)) {
  421. lw = cfs_rq->load;
  422. update_load_add(&lw, se->load.weight);
  423. load = &lw;
  424. }
  425. slice = calc_delta_mine(slice, se->load.weight, load);
  426. }
  427. return slice;
  428. }
  429. /*
  430. * We calculate the vruntime slice of a to be inserted task
  431. *
  432. * vs = s/w
  433. */
  434. static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  435. {
  436. return calc_delta_fair(sched_slice(cfs_rq, se), se);
  437. }
  438. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
  439. static void update_cfs_shares(struct cfs_rq *cfs_rq);
  440. /*
  441. * Update the current task's runtime statistics. Skip current tasks that
  442. * are not in our scheduling class.
  443. */
  444. static inline void
  445. __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
  446. unsigned long delta_exec)
  447. {
  448. unsigned long delta_exec_weighted;
  449. schedstat_set(curr->statistics.exec_max,
  450. max((u64)delta_exec, curr->statistics.exec_max));
  451. curr->sum_exec_runtime += delta_exec;
  452. schedstat_add(cfs_rq, exec_clock, delta_exec);
  453. delta_exec_weighted = calc_delta_fair(delta_exec, curr);
  454. curr->vruntime += delta_exec_weighted;
  455. update_min_vruntime(cfs_rq);
  456. #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
  457. cfs_rq->load_unacc_exec_time += delta_exec;
  458. #endif
  459. }
  460. static void update_curr(struct cfs_rq *cfs_rq)
  461. {
  462. struct sched_entity *curr = cfs_rq->curr;
  463. u64 now = rq_of(cfs_rq)->clock_task;
  464. unsigned long delta_exec;
  465. if (unlikely(!curr))
  466. return;
  467. /*
  468. * Get the amount of time the current task was running
  469. * since the last time we changed load (this cannot
  470. * overflow on 32 bits):
  471. */
  472. delta_exec = (unsigned long)(now - curr->exec_start);
  473. if (!delta_exec)
  474. return;
  475. __update_curr(cfs_rq, curr, delta_exec);
  476. curr->exec_start = now;
  477. if (entity_is_task(curr)) {
  478. struct task_struct *curtask = task_of(curr);
  479. trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
  480. cpuacct_charge(curtask, delta_exec);
  481. account_group_exec_runtime(curtask, delta_exec);
  482. }
  483. }
  484. static inline void
  485. update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  486. {
  487. schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
  488. }
  489. /*
  490. * Task is being enqueued - update stats:
  491. */
  492. static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  493. {
  494. /*
  495. * Are we enqueueing a waiting task? (for current tasks
  496. * a dequeue/enqueue event is a NOP)
  497. */
  498. if (se != cfs_rq->curr)
  499. update_stats_wait_start(cfs_rq, se);
  500. }
  501. static void
  502. update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
  503. {
  504. schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
  505. rq_of(cfs_rq)->clock - se->statistics.wait_start));
  506. schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
  507. schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
  508. rq_of(cfs_rq)->clock - se->statistics.wait_start);
  509. #ifdef CONFIG_SCHEDSTATS
  510. if (entity_is_task(se)) {
  511. trace_sched_stat_wait(task_of(se),
  512. rq_of(cfs_rq)->clock - se->statistics.wait_start);
  513. }
  514. #endif
  515. schedstat_set(se->statistics.wait_start, 0);
  516. }
  517. static inline void
  518. update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  519. {
  520. /*
  521. * Mark the end of the wait period if dequeueing a
  522. * waiting task:
  523. */
  524. if (se != cfs_rq->curr)
  525. update_stats_wait_end(cfs_rq, se);
  526. }
  527. /*
  528. * We are picking a new current task - update its stats:
  529. */
  530. static inline void
  531. update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
  532. {
  533. /*
  534. * We are starting a new run period:
  535. */
  536. se->exec_start = rq_of(cfs_rq)->clock_task;
  537. }
  538. /**************************************************
  539. * Scheduling class queueing methods:
  540. */
  541. #if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
  542. static void
  543. add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
  544. {
  545. cfs_rq->task_weight += weight;
  546. }
  547. #else
  548. static inline void
  549. add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
  550. {
  551. }
  552. #endif
  553. static void
  554. account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  555. {
  556. update_load_add(&cfs_rq->load, se->load.weight);
  557. if (!parent_entity(se))
  558. inc_cpu_load(rq_of(cfs_rq), se->load.weight);
  559. if (entity_is_task(se)) {
  560. add_cfs_task_weight(cfs_rq, se->load.weight);
  561. list_add(&se->group_node, &cfs_rq->tasks);
  562. }
  563. cfs_rq->nr_running++;
  564. }
  565. static void
  566. account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
  567. {
  568. update_load_sub(&cfs_rq->load, se->load.weight);
  569. if (!parent_entity(se))
  570. dec_cpu_load(rq_of(cfs_rq), se->load.weight);
  571. if (entity_is_task(se)) {
  572. add_cfs_task_weight(cfs_rq, -se->load.weight);
  573. list_del_init(&se->group_node);
  574. }
  575. cfs_rq->nr_running--;
  576. }
  577. #ifdef CONFIG_FAIR_GROUP_SCHED
  578. # ifdef CONFIG_SMP
  579. static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
  580. int global_update)
  581. {
  582. struct task_group *tg = cfs_rq->tg;
  583. long load_avg;
  584. load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
  585. load_avg -= cfs_rq->load_contribution;
  586. if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
  587. atomic_add(load_avg, &tg->load_weight);
  588. cfs_rq->load_contribution += load_avg;
  589. }
  590. }
  591. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
  592. {
  593. u64 period = sysctl_sched_shares_window;
  594. u64 now, delta;
  595. unsigned long load = cfs_rq->load.weight;
  596. if (cfs_rq->tg == &root_task_group)
  597. return;
  598. now = rq_of(cfs_rq)->clock_task;
  599. delta = now - cfs_rq->load_stamp;
  600. /* truncate load history at 4 idle periods */
  601. if (cfs_rq->load_stamp > cfs_rq->load_last &&
  602. now - cfs_rq->load_last > 4 * period) {
  603. cfs_rq->load_period = 0;
  604. cfs_rq->load_avg = 0;
  605. delta = period - 1;
  606. }
  607. cfs_rq->load_stamp = now;
  608. cfs_rq->load_unacc_exec_time = 0;
  609. cfs_rq->load_period += delta;
  610. if (load) {
  611. cfs_rq->load_last = now;
  612. cfs_rq->load_avg += delta * load;
  613. }
  614. /* consider updating load contribution on each fold or truncate */
  615. if (global_update || cfs_rq->load_period > period
  616. || !cfs_rq->load_period)
  617. update_cfs_rq_load_contribution(cfs_rq, global_update);
  618. while (cfs_rq->load_period > period) {
  619. /*
  620. * Inline assembly required to prevent the compiler
  621. * optimising this loop into a divmod call.
  622. * See __iter_div_u64_rem() for another example of this.
  623. */
  624. asm("" : "+rm" (cfs_rq->load_period));
  625. cfs_rq->load_period /= 2;
  626. cfs_rq->load_avg /= 2;
  627. }
  628. if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
  629. list_del_leaf_cfs_rq(cfs_rq);
  630. }
  631. static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
  632. {
  633. long load_weight, load, shares;
  634. load = cfs_rq->load.weight;
  635. load_weight = atomic_read(&tg->load_weight);
  636. load_weight += load;
  637. load_weight -= cfs_rq->load_contribution;
  638. shares = (tg->shares * load);
  639. if (load_weight)
  640. shares /= load_weight;
  641. if (shares < MIN_SHARES)
  642. shares = MIN_SHARES;
  643. if (shares > tg->shares)
  644. shares = tg->shares;
  645. return shares;
  646. }
  647. static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
  648. {
  649. if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
  650. update_cfs_load(cfs_rq, 0);
  651. update_cfs_shares(cfs_rq);
  652. }
  653. }
  654. # else /* CONFIG_SMP */
  655. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
  656. {
  657. }
  658. static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
  659. {
  660. return tg->shares;
  661. }
  662. static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
  663. {
  664. }
  665. # endif /* CONFIG_SMP */
  666. static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
  667. unsigned long weight)
  668. {
  669. if (se->on_rq) {
  670. /* commit outstanding execution time */
  671. if (cfs_rq->curr == se)
  672. update_curr(cfs_rq);
  673. account_entity_dequeue(cfs_rq, se);
  674. }
  675. update_load_set(&se->load, weight);
  676. if (se->on_rq)
  677. account_entity_enqueue(cfs_rq, se);
  678. }
  679. static void update_cfs_shares(struct cfs_rq *cfs_rq)
  680. {
  681. struct task_group *tg;
  682. struct sched_entity *se;
  683. long shares;
  684. tg = cfs_rq->tg;
  685. se = tg->se[cpu_of(rq_of(cfs_rq))];
  686. if (!se)
  687. return;
  688. #ifndef CONFIG_SMP
  689. if (likely(se->load.weight == tg->shares))
  690. return;
  691. #endif
  692. shares = calc_cfs_shares(cfs_rq, tg);
  693. reweight_entity(cfs_rq_of(se), se, shares);
  694. }
  695. #else /* CONFIG_FAIR_GROUP_SCHED */
  696. static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
  697. {
  698. }
  699. static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
  700. {
  701. }
  702. static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
  703. {
  704. }
  705. #endif /* CONFIG_FAIR_GROUP_SCHED */
  706. static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
  707. {
  708. #ifdef CONFIG_SCHEDSTATS
  709. struct task_struct *tsk = NULL;
  710. if (entity_is_task(se))
  711. tsk = task_of(se);
  712. if (se->statistics.sleep_start) {
  713. u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
  714. if ((s64)delta < 0)
  715. delta = 0;
  716. if (unlikely(delta > se->statistics.sleep_max))
  717. se->statistics.sleep_max = delta;
  718. se->statistics.sleep_start = 0;
  719. se->statistics.sum_sleep_runtime += delta;
  720. if (tsk) {
  721. account_scheduler_latency(tsk, delta >> 10, 1);
  722. trace_sched_stat_sleep(tsk, delta);
  723. }
  724. }
  725. if (se->statistics.block_start) {
  726. u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
  727. if ((s64)delta < 0)
  728. delta = 0;
  729. if (unlikely(delta > se->statistics.block_max))
  730. se->statistics.block_max = delta;
  731. se->statistics.block_start = 0;
  732. se->statistics.sum_sleep_runtime += delta;
  733. if (tsk) {
  734. if (tsk->in_iowait) {
  735. se->statistics.iowait_sum += delta;
  736. se->statistics.iowait_count++;
  737. trace_sched_stat_iowait(tsk, delta);
  738. }
  739. /*
  740. * Blocking time is in units of nanosecs, so shift by
  741. * 20 to get a milliseconds-range estimation of the
  742. * amount of time that the task spent sleeping:
  743. */
  744. if (unlikely(prof_on == SLEEP_PROFILING)) {
  745. profile_hits(SLEEP_PROFILING,
  746. (void *)get_wchan(tsk),
  747. delta >> 20);
  748. }
  749. account_scheduler_latency(tsk, delta >> 10, 0);
  750. }
  751. }
  752. #endif
  753. }
  754. static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
  755. {
  756. #ifdef CONFIG_SCHED_DEBUG
  757. s64 d = se->vruntime - cfs_rq->min_vruntime;
  758. if (d < 0)
  759. d = -d;
  760. if (d > 3*sysctl_sched_latency)
  761. schedstat_inc(cfs_rq, nr_spread_over);
  762. #endif
  763. }
  764. static void
  765. place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
  766. {
  767. u64 vruntime = cfs_rq->min_vruntime;
  768. /*
  769. * The 'current' period is already promised to the current tasks,
  770. * however the extra weight of the new task will slow them down a
  771. * little, place the new task so that it fits in the slot that
  772. * stays open at the end.
  773. */
  774. if (initial && sched_feat(START_DEBIT))
  775. vruntime += sched_vslice(cfs_rq, se);
  776. /* sleeps up to a single latency don't count. */
  777. if (!initial) {
  778. unsigned long thresh = sysctl_sched_latency;
  779. /*
  780. * Halve their sleep time's effect, to allow
  781. * for a gentler effect of sleepers:
  782. */
  783. if (sched_feat(GENTLE_FAIR_SLEEPERS))
  784. thresh >>= 1;
  785. vruntime -= thresh;
  786. }
  787. /* ensure we never gain time by being placed backwards. */
  788. vruntime = max_vruntime(se->vruntime, vruntime);
  789. se->vruntime = vruntime;
  790. }
  791. static void
  792. enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  793. {
  794. /*
  795. * Update the normalized vruntime before updating min_vruntime
  796. * through callig update_curr().
  797. */
  798. if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
  799. se->vruntime += cfs_rq->min_vruntime;
  800. /*
  801. * Update run-time statistics of the 'current'.
  802. */
  803. update_curr(cfs_rq);
  804. update_cfs_load(cfs_rq, 0);
  805. account_entity_enqueue(cfs_rq, se);
  806. update_cfs_shares(cfs_rq);
  807. if (flags & ENQUEUE_WAKEUP) {
  808. place_entity(cfs_rq, se, 0);
  809. enqueue_sleeper(cfs_rq, se);
  810. }
  811. update_stats_enqueue(cfs_rq, se);
  812. check_spread(cfs_rq, se);
  813. if (se != cfs_rq->curr)
  814. __enqueue_entity(cfs_rq, se);
  815. se->on_rq = 1;
  816. if (cfs_rq->nr_running == 1)
  817. list_add_leaf_cfs_rq(cfs_rq);
  818. }
  819. static void __clear_buddies_last(struct sched_entity *se)
  820. {
  821. for_each_sched_entity(se) {
  822. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  823. if (cfs_rq->last == se)
  824. cfs_rq->last = NULL;
  825. else
  826. break;
  827. }
  828. }
  829. static void __clear_buddies_next(struct sched_entity *se)
  830. {
  831. for_each_sched_entity(se) {
  832. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  833. if (cfs_rq->next == se)
  834. cfs_rq->next = NULL;
  835. else
  836. break;
  837. }
  838. }
  839. static void __clear_buddies_skip(struct sched_entity *se)
  840. {
  841. for_each_sched_entity(se) {
  842. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  843. if (cfs_rq->skip == se)
  844. cfs_rq->skip = NULL;
  845. else
  846. break;
  847. }
  848. }
  849. static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
  850. {
  851. if (cfs_rq->last == se)
  852. __clear_buddies_last(se);
  853. if (cfs_rq->next == se)
  854. __clear_buddies_next(se);
  855. if (cfs_rq->skip == se)
  856. __clear_buddies_skip(se);
  857. }
  858. static void
  859. dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
  860. {
  861. /*
  862. * Update run-time statistics of the 'current'.
  863. */
  864. update_curr(cfs_rq);
  865. update_stats_dequeue(cfs_rq, se);
  866. if (flags & DEQUEUE_SLEEP) {
  867. #ifdef CONFIG_SCHEDSTATS
  868. if (entity_is_task(se)) {
  869. struct task_struct *tsk = task_of(se);
  870. if (tsk->state & TASK_INTERRUPTIBLE)
  871. se->statistics.sleep_start = rq_of(cfs_rq)->clock;
  872. if (tsk->state & TASK_UNINTERRUPTIBLE)
  873. se->statistics.block_start = rq_of(cfs_rq)->clock;
  874. }
  875. #endif
  876. }
  877. clear_buddies(cfs_rq, se);
  878. if (se != cfs_rq->curr)
  879. __dequeue_entity(cfs_rq, se);
  880. se->on_rq = 0;
  881. update_cfs_load(cfs_rq, 0);
  882. account_entity_dequeue(cfs_rq, se);
  883. /*
  884. * Normalize the entity after updating the min_vruntime because the
  885. * update can refer to the ->curr item and we need to reflect this
  886. * movement in our normalized position.
  887. */
  888. if (!(flags & DEQUEUE_SLEEP))
  889. se->vruntime -= cfs_rq->min_vruntime;
  890. update_min_vruntime(cfs_rq);
  891. update_cfs_shares(cfs_rq);
  892. }
  893. /*
  894. * Preempt the current task with a newly woken task if needed:
  895. */
  896. static void
  897. check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  898. {
  899. unsigned long ideal_runtime, delta_exec;
  900. ideal_runtime = sched_slice(cfs_rq, curr);
  901. delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
  902. if (delta_exec > ideal_runtime) {
  903. resched_task(rq_of(cfs_rq)->curr);
  904. /*
  905. * The current task ran long enough, ensure it doesn't get
  906. * re-elected due to buddy favours.
  907. */
  908. clear_buddies(cfs_rq, curr);
  909. return;
  910. }
  911. /*
  912. * Ensure that a task that missed wakeup preemption by a
  913. * narrow margin doesn't have to wait for a full slice.
  914. * This also mitigates buddy induced latencies under load.
  915. */
  916. if (!sched_feat(WAKEUP_PREEMPT))
  917. return;
  918. if (delta_exec < sysctl_sched_min_granularity)
  919. return;
  920. if (cfs_rq->nr_running > 1) {
  921. struct sched_entity *se = __pick_first_entity(cfs_rq);
  922. s64 delta = curr->vruntime - se->vruntime;
  923. if (delta < 0)
  924. return;
  925. if (delta > ideal_runtime)
  926. resched_task(rq_of(cfs_rq)->curr);
  927. }
  928. }
  929. static void
  930. set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  931. {
  932. /* 'current' is not kept within the tree. */
  933. if (se->on_rq) {
  934. /*
  935. * Any task has to be enqueued before it get to execute on
  936. * a CPU. So account for the time it spent waiting on the
  937. * runqueue.
  938. */
  939. update_stats_wait_end(cfs_rq, se);
  940. __dequeue_entity(cfs_rq, se);
  941. }
  942. update_stats_curr_start(cfs_rq, se);
  943. cfs_rq->curr = se;
  944. #ifdef CONFIG_SCHEDSTATS
  945. /*
  946. * Track our maximum slice length, if the CPU's load is at
  947. * least twice that of our own weight (i.e. dont track it
  948. * when there are only lesser-weight tasks around):
  949. */
  950. if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
  951. se->statistics.slice_max = max(se->statistics.slice_max,
  952. se->sum_exec_runtime - se->prev_sum_exec_runtime);
  953. }
  954. #endif
  955. se->prev_sum_exec_runtime = se->sum_exec_runtime;
  956. }
  957. static int
  958. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
  959. /*
  960. * Pick the next process, keeping these things in mind, in this order:
  961. * 1) keep things fair between processes/task groups
  962. * 2) pick the "next" process, since someone really wants that to run
  963. * 3) pick the "last" process, for cache locality
  964. * 4) do not run the "skip" process, if something else is available
  965. */
  966. static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
  967. {
  968. struct sched_entity *se = __pick_first_entity(cfs_rq);
  969. struct sched_entity *left = se;
  970. /*
  971. * Avoid running the skip buddy, if running something else can
  972. * be done without getting too unfair.
  973. */
  974. if (cfs_rq->skip == se) {
  975. struct sched_entity *second = __pick_next_entity(se);
  976. if (second && wakeup_preempt_entity(second, left) < 1)
  977. se = second;
  978. }
  979. /*
  980. * Prefer last buddy, try to return the CPU to a preempted task.
  981. */
  982. if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
  983. se = cfs_rq->last;
  984. /*
  985. * Someone really wants this to run. If it's not unfair, run it.
  986. */
  987. if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
  988. se = cfs_rq->next;
  989. clear_buddies(cfs_rq, se);
  990. return se;
  991. }
  992. static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
  993. {
  994. /*
  995. * If still on the runqueue then deactivate_task()
  996. * was not called and update_curr() has to be done:
  997. */
  998. if (prev->on_rq)
  999. update_curr(cfs_rq);
  1000. check_spread(cfs_rq, prev);
  1001. if (prev->on_rq) {
  1002. update_stats_wait_start(cfs_rq, prev);
  1003. /* Put 'current' back into the tree. */
  1004. __enqueue_entity(cfs_rq, prev);
  1005. }
  1006. cfs_rq->curr = NULL;
  1007. }
  1008. static void
  1009. entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  1010. {
  1011. /*
  1012. * Update run-time statistics of the 'current'.
  1013. */
  1014. update_curr(cfs_rq);
  1015. /*
  1016. * Update share accounting for long-running entities.
  1017. */
  1018. update_entity_shares_tick(cfs_rq);
  1019. #ifdef CONFIG_SCHED_HRTICK
  1020. /*
  1021. * queued ticks are scheduled to match the slice, so don't bother
  1022. * validating it and just reschedule.
  1023. */
  1024. if (queued) {
  1025. resched_task(rq_of(cfs_rq)->curr);
  1026. return;
  1027. }
  1028. /*
  1029. * don't let the period tick interfere with the hrtick preemption
  1030. */
  1031. if (!sched_feat(DOUBLE_TICK) &&
  1032. hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
  1033. return;
  1034. #endif
  1035. if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
  1036. check_preempt_tick(cfs_rq, curr);
  1037. }
  1038. /**************************************************
  1039. * CFS operations on tasks:
  1040. */
  1041. #ifdef CONFIG_SCHED_HRTICK
  1042. static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
  1043. {
  1044. struct sched_entity *se = &p->se;
  1045. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  1046. WARN_ON(task_rq(p) != rq);
  1047. if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
  1048. u64 slice = sched_slice(cfs_rq, se);
  1049. u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
  1050. s64 delta = slice - ran;
  1051. if (delta < 0) {
  1052. if (rq->curr == p)
  1053. resched_task(p);
  1054. return;
  1055. }
  1056. /*
  1057. * Don't schedule slices shorter than 10000ns, that just
  1058. * doesn't make sense. Rely on vruntime for fairness.
  1059. */
  1060. if (rq->curr != p)
  1061. delta = max_t(s64, 10000LL, delta);
  1062. hrtick_start(rq, delta);
  1063. }
  1064. }
  1065. /*
  1066. * called from enqueue/dequeue and updates the hrtick when the
  1067. * current task is from our class and nr_running is low enough
  1068. * to matter.
  1069. */
  1070. static void hrtick_update(struct rq *rq)
  1071. {
  1072. struct task_struct *curr = rq->curr;
  1073. if (curr->sched_class != &fair_sched_class)
  1074. return;
  1075. if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
  1076. hrtick_start_fair(rq, curr);
  1077. }
  1078. #else /* !CONFIG_SCHED_HRTICK */
  1079. static inline void
  1080. hrtick_start_fair(struct rq *rq, struct task_struct *p)
  1081. {
  1082. }
  1083. static inline void hrtick_update(struct rq *rq)
  1084. {
  1085. }
  1086. #endif
  1087. /*
  1088. * The enqueue_task method is called before nr_running is
  1089. * increased. Here we update the fair scheduling stats and
  1090. * then put the task into the rbtree:
  1091. */
  1092. static void
  1093. enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  1094. {
  1095. struct cfs_rq *cfs_rq;
  1096. struct sched_entity *se = &p->se;
  1097. for_each_sched_entity(se) {
  1098. if (se->on_rq)
  1099. break;
  1100. cfs_rq = cfs_rq_of(se);
  1101. enqueue_entity(cfs_rq, se, flags);
  1102. flags = ENQUEUE_WAKEUP;
  1103. }
  1104. for_each_sched_entity(se) {
  1105. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  1106. update_cfs_load(cfs_rq, 0);
  1107. update_cfs_shares(cfs_rq);
  1108. }
  1109. hrtick_update(rq);
  1110. }
  1111. static void set_next_buddy(struct sched_entity *se);
  1112. /*
  1113. * The dequeue_task method is called before nr_running is
  1114. * decreased. We remove the task from the rbtree and
  1115. * update the fair scheduling stats:
  1116. */
  1117. static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  1118. {
  1119. struct cfs_rq *cfs_rq;
  1120. struct sched_entity *se = &p->se;
  1121. int task_sleep = flags & DEQUEUE_SLEEP;
  1122. for_each_sched_entity(se) {
  1123. cfs_rq = cfs_rq_of(se);
  1124. dequeue_entity(cfs_rq, se, flags);
  1125. /* Don't dequeue parent if it has other entities besides us */
  1126. if (cfs_rq->load.weight) {
  1127. /*
  1128. * Bias pick_next to pick a task from this cfs_rq, as
  1129. * p is sleeping when it is within its sched_slice.
  1130. */
  1131. if (task_sleep && parent_entity(se))
  1132. set_next_buddy(parent_entity(se));
  1133. /* avoid re-evaluating load for this entity */
  1134. se = parent_entity(se);
  1135. break;
  1136. }
  1137. flags |= DEQUEUE_SLEEP;
  1138. }
  1139. for_each_sched_entity(se) {
  1140. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  1141. update_cfs_load(cfs_rq, 0);
  1142. update_cfs_shares(cfs_rq);
  1143. }
  1144. hrtick_update(rq);
  1145. }
  1146. #ifdef CONFIG_SMP
  1147. static void task_waking_fair(struct task_struct *p)
  1148. {
  1149. struct sched_entity *se = &p->se;
  1150. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  1151. u64 min_vruntime;
  1152. #ifndef CONFIG_64BIT
  1153. u64 min_vruntime_copy;
  1154. do {
  1155. min_vruntime_copy = cfs_rq->min_vruntime_copy;
  1156. smp_rmb();
  1157. min_vruntime = cfs_rq->min_vruntime;
  1158. } while (min_vruntime != min_vruntime_copy);
  1159. #else
  1160. min_vruntime = cfs_rq->min_vruntime;
  1161. #endif
  1162. se->vruntime -= min_vruntime;
  1163. }
  1164. #ifdef CONFIG_FAIR_GROUP_SCHED
  1165. /*
  1166. * effective_load() calculates the load change as seen from the root_task_group
  1167. *
  1168. * Adding load to a group doesn't make a group heavier, but can cause movement
  1169. * of group shares between cpus. Assuming the shares were perfectly aligned one
  1170. * can calculate the shift in shares.
  1171. */
  1172. static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
  1173. {
  1174. struct sched_entity *se = tg->se[cpu];
  1175. if (!tg->parent)
  1176. return wl;
  1177. for_each_sched_entity(se) {
  1178. long lw, w;
  1179. tg = se->my_q->tg;
  1180. w = se->my_q->load.weight;
  1181. /* use this cpu's instantaneous contribution */
  1182. lw = atomic_read(&tg->load_weight);
  1183. lw -= se->my_q->load_contribution;
  1184. lw += w + wg;
  1185. wl += w;
  1186. if (lw > 0 && wl < lw)
  1187. wl = (wl * tg->shares) / lw;
  1188. else
  1189. wl = tg->shares;
  1190. /* zero point is MIN_SHARES */
  1191. if (wl < MIN_SHARES)
  1192. wl = MIN_SHARES;
  1193. wl -= se->load.weight;
  1194. wg = 0;
  1195. }
  1196. return wl;
  1197. }
  1198. #else
  1199. static inline unsigned long effective_load(struct task_group *tg, int cpu,
  1200. unsigned long wl, unsigned long wg)
  1201. {
  1202. return wl;
  1203. }
  1204. #endif
  1205. static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
  1206. {
  1207. s64 this_load, load;
  1208. int idx, this_cpu, prev_cpu;
  1209. unsigned long tl_per_task;
  1210. struct task_group *tg;
  1211. unsigned long weight;
  1212. int balanced;
  1213. idx = sd->wake_idx;
  1214. this_cpu = smp_processor_id();
  1215. prev_cpu = task_cpu(p);
  1216. load = source_load(prev_cpu, idx);
  1217. this_load = target_load(this_cpu, idx);
  1218. /*
  1219. * If sync wakeup then subtract the (maximum possible)
  1220. * effect of the currently running task from the load
  1221. * of the current CPU:
  1222. */
  1223. if (sync) {
  1224. tg = task_group(current);
  1225. weight = current->se.load.weight;
  1226. this_load += effective_load(tg, this_cpu, -weight, -weight);
  1227. load += effective_load(tg, prev_cpu, 0, -weight);
  1228. }
  1229. tg = task_group(p);
  1230. weight = p->se.load.weight;
  1231. /*
  1232. * In low-load situations, where prev_cpu is idle and this_cpu is idle
  1233. * due to the sync cause above having dropped this_load to 0, we'll
  1234. * always have an imbalance, but there's really nothing you can do
  1235. * about that, so that's good too.
  1236. *
  1237. * Otherwise check if either cpus are near enough in load to allow this
  1238. * task to be woken on this_cpu.
  1239. */
  1240. if (this_load > 0) {
  1241. s64 this_eff_load, prev_eff_load;
  1242. this_eff_load = 100;
  1243. this_eff_load *= power_of(prev_cpu);
  1244. this_eff_load *= this_load +
  1245. effective_load(tg, this_cpu, weight, weight);
  1246. prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
  1247. prev_eff_load *= power_of(this_cpu);
  1248. prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
  1249. balanced = this_eff_load <= prev_eff_load;
  1250. } else
  1251. balanced = true;
  1252. /*
  1253. * If the currently running task will sleep within
  1254. * a reasonable amount of time then attract this newly
  1255. * woken task:
  1256. */
  1257. if (sync && balanced)
  1258. return 1;
  1259. schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
  1260. tl_per_task = cpu_avg_load_per_task(this_cpu);
  1261. if (balanced ||
  1262. (this_load <= load &&
  1263. this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
  1264. /*
  1265. * This domain has SD_WAKE_AFFINE and
  1266. * p is cache cold in this domain, and
  1267. * there is no bad imbalance.
  1268. */
  1269. schedstat_inc(sd, ttwu_move_affine);
  1270. schedstat_inc(p, se.statistics.nr_wakeups_affine);
  1271. return 1;
  1272. }
  1273. return 0;
  1274. }
  1275. /*
  1276. * find_idlest_group finds and returns the least busy CPU group within the
  1277. * domain.
  1278. */
  1279. static struct sched_group *
  1280. find_idlest_group(struct sched_domain *sd, struct task_struct *p,
  1281. int this_cpu, int load_idx)
  1282. {
  1283. struct sched_group *idlest = NULL, *group = sd->groups;
  1284. unsigned long min_load = ULONG_MAX, this_load = 0;
  1285. int imbalance = 100 + (sd->imbalance_pct-100)/2;
  1286. do {
  1287. unsigned long load, avg_load;
  1288. int local_group;
  1289. int i;
  1290. /* Skip over this group if it has no CPUs allowed */
  1291. if (!cpumask_intersects(sched_group_cpus(group),
  1292. &p->cpus_allowed))
  1293. continue;
  1294. local_group = cpumask_test_cpu(this_cpu,
  1295. sched_group_cpus(group));
  1296. /* Tally up the load of all CPUs in the group */
  1297. avg_load = 0;
  1298. for_each_cpu(i, sched_group_cpus(group)) {
  1299. /* Bias balancing toward cpus of our domain */
  1300. if (local_group)
  1301. load = source_load(i, load_idx);
  1302. else
  1303. load = target_load(i, load_idx);
  1304. avg_load += load;
  1305. }
  1306. /* Adjust by relative CPU power of the group */
  1307. avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
  1308. if (local_group) {
  1309. this_load = avg_load;
  1310. } else if (avg_load < min_load) {
  1311. min_load = avg_load;
  1312. idlest = group;
  1313. }
  1314. } while (group = group->next, group != sd->groups);
  1315. if (!idlest || 100*this_load < imbalance*min_load)
  1316. return NULL;
  1317. return idlest;
  1318. }
  1319. /*
  1320. * find_idlest_cpu - find the idlest cpu among the cpus in group.
  1321. */
  1322. static int
  1323. find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  1324. {
  1325. unsigned long load, min_load = ULONG_MAX;
  1326. int idlest = -1;
  1327. int i;
  1328. /* Traverse only the allowed CPUs */
  1329. for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
  1330. load = weighted_cpuload(i);
  1331. if (load < min_load || (load == min_load && i == this_cpu)) {
  1332. min_load = load;
  1333. idlest = i;
  1334. }
  1335. }
  1336. return idlest;
  1337. }
  1338. /*
  1339. * Try and locate an idle CPU in the sched_domain.
  1340. */
  1341. static int select_idle_sibling(struct task_struct *p, int target)
  1342. {
  1343. int cpu = smp_processor_id();
  1344. int prev_cpu = task_cpu(p);
  1345. struct sched_domain *sd;
  1346. int i;
  1347. /*
  1348. * If the task is going to be woken-up on this cpu and if it is
  1349. * already idle, then it is the right target.
  1350. */
  1351. if (target == cpu && idle_cpu(cpu))
  1352. return cpu;
  1353. /*
  1354. * If the task is going to be woken-up on the cpu where it previously
  1355. * ran and if it is currently idle, then it the right target.
  1356. */
  1357. if (target == prev_cpu && idle_cpu(prev_cpu))
  1358. return prev_cpu;
  1359. /*
  1360. * Otherwise, iterate the domains and find an elegible idle cpu.
  1361. */
  1362. rcu_read_lock();
  1363. for_each_domain(target, sd) {
  1364. if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
  1365. break;
  1366. for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
  1367. if (idle_cpu(i)) {
  1368. target = i;
  1369. break;
  1370. }
  1371. }
  1372. /*
  1373. * Lets stop looking for an idle sibling when we reached
  1374. * the domain that spans the current cpu and prev_cpu.
  1375. */
  1376. if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
  1377. cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
  1378. break;
  1379. }
  1380. rcu_read_unlock();
  1381. return target;
  1382. }
  1383. /*
  1384. * sched_balance_self: balance the current task (running on cpu) in domains
  1385. * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
  1386. * SD_BALANCE_EXEC.
  1387. *
  1388. * Balance, ie. select the least loaded group.
  1389. *
  1390. * Returns the target CPU number, or the same CPU if no balancing is needed.
  1391. *
  1392. * preempt must be disabled.
  1393. */
  1394. static int
  1395. select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
  1396. {
  1397. struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
  1398. int cpu = smp_processor_id();
  1399. int prev_cpu = task_cpu(p);
  1400. int new_cpu = cpu;
  1401. int want_affine = 0;
  1402. int want_sd = 1;
  1403. int sync = wake_flags & WF_SYNC;
  1404. if (sd_flag & SD_BALANCE_WAKE) {
  1405. if (cpumask_test_cpu(cpu, &p->cpus_allowed))
  1406. want_affine = 1;
  1407. new_cpu = prev_cpu;
  1408. }
  1409. rcu_read_lock();
  1410. for_each_domain(cpu, tmp) {
  1411. if (!(tmp->flags & SD_LOAD_BALANCE))
  1412. continue;
  1413. /*
  1414. * If power savings logic is enabled for a domain, see if we
  1415. * are not overloaded, if so, don't balance wider.
  1416. */
  1417. if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
  1418. unsigned long power = 0;
  1419. unsigned long nr_running = 0;
  1420. unsigned long capacity;
  1421. int i;
  1422. for_each_cpu(i, sched_domain_span(tmp)) {
  1423. power += power_of(i);
  1424. nr_running += cpu_rq(i)->cfs.nr_running;
  1425. }
  1426. capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
  1427. if (tmp->flags & SD_POWERSAVINGS_BALANCE)
  1428. nr_running /= 2;
  1429. if (nr_running < capacity)
  1430. want_sd = 0;
  1431. }
  1432. /*
  1433. * If both cpu and prev_cpu are part of this domain,
  1434. * cpu is a valid SD_WAKE_AFFINE target.
  1435. */
  1436. if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
  1437. cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
  1438. affine_sd = tmp;
  1439. want_affine = 0;
  1440. }
  1441. if (!want_sd && !want_affine)
  1442. break;
  1443. if (!(tmp->flags & sd_flag))
  1444. continue;
  1445. if (want_sd)
  1446. sd = tmp;
  1447. }
  1448. if (affine_sd) {
  1449. if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
  1450. prev_cpu = cpu;
  1451. new_cpu = select_idle_sibling(p, prev_cpu);
  1452. goto unlock;
  1453. }
  1454. while (sd) {
  1455. int load_idx = sd->forkexec_idx;
  1456. struct sched_group *group;
  1457. int weight;
  1458. if (!(sd->flags & sd_flag)) {
  1459. sd = sd->child;
  1460. continue;
  1461. }
  1462. if (sd_flag & SD_BALANCE_WAKE)
  1463. load_idx = sd->wake_idx;
  1464. group = find_idlest_group(sd, p, cpu, load_idx);
  1465. if (!group) {
  1466. sd = sd->child;
  1467. continue;
  1468. }
  1469. new_cpu = find_idlest_cpu(group, p, cpu);
  1470. if (new_cpu == -1 || new_cpu == cpu) {
  1471. /* Now try balancing at a lower domain level of cpu */
  1472. sd = sd->child;
  1473. continue;
  1474. }
  1475. /* Now try balancing at a lower domain level of new_cpu */
  1476. cpu = new_cpu;
  1477. weight = sd->span_weight;
  1478. sd = NULL;
  1479. for_each_domain(cpu, tmp) {
  1480. if (weight <= tmp->span_weight)
  1481. break;
  1482. if (tmp->flags & sd_flag)
  1483. sd = tmp;
  1484. }
  1485. /* while loop will break here if sd == NULL */
  1486. }
  1487. unlock:
  1488. rcu_read_unlock();
  1489. return new_cpu;
  1490. }
  1491. #endif /* CONFIG_SMP */
  1492. static unsigned long
  1493. wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
  1494. {
  1495. unsigned long gran = sysctl_sched_wakeup_granularity;
  1496. /*
  1497. * Since its curr running now, convert the gran from real-time
  1498. * to virtual-time in his units.
  1499. *
  1500. * By using 'se' instead of 'curr' we penalize light tasks, so
  1501. * they get preempted easier. That is, if 'se' < 'curr' then
  1502. * the resulting gran will be larger, therefore penalizing the
  1503. * lighter, if otoh 'se' > 'curr' then the resulting gran will
  1504. * be smaller, again penalizing the lighter task.
  1505. *
  1506. * This is especially important for buddies when the leftmost
  1507. * task is higher priority than the buddy.
  1508. */
  1509. return calc_delta_fair(gran, se);
  1510. }
  1511. /*
  1512. * Should 'se' preempt 'curr'.
  1513. *
  1514. * |s1
  1515. * |s2
  1516. * |s3
  1517. * g
  1518. * |<--->|c
  1519. *
  1520. * w(c, s1) = -1
  1521. * w(c, s2) = 0
  1522. * w(c, s3) = 1
  1523. *
  1524. */
  1525. static int
  1526. wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
  1527. {
  1528. s64 gran, vdiff = curr->vruntime - se->vruntime;
  1529. if (vdiff <= 0)
  1530. return -1;
  1531. gran = wakeup_gran(curr, se);
  1532. if (vdiff > gran)
  1533. return 1;
  1534. return 0;
  1535. }
  1536. static void set_last_buddy(struct sched_entity *se)
  1537. {
  1538. if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
  1539. return;
  1540. for_each_sched_entity(se)
  1541. cfs_rq_of(se)->last = se;
  1542. }
  1543. static void set_next_buddy(struct sched_entity *se)
  1544. {
  1545. if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
  1546. return;
  1547. for_each_sched_entity(se)
  1548. cfs_rq_of(se)->next = se;
  1549. }
  1550. static void set_skip_buddy(struct sched_entity *se)
  1551. {
  1552. for_each_sched_entity(se)
  1553. cfs_rq_of(se)->skip = se;
  1554. }
  1555. /*
  1556. * Preempt the current task with a newly woken task if needed:
  1557. */
  1558. static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
  1559. {
  1560. struct task_struct *curr = rq->curr;
  1561. struct sched_entity *se = &curr->se, *pse = &p->se;
  1562. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  1563. int scale = cfs_rq->nr_running >= sched_nr_latency;
  1564. int next_buddy_marked = 0;
  1565. if (unlikely(se == pse))
  1566. return;
  1567. if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
  1568. set_next_buddy(pse);
  1569. next_buddy_marked = 1;
  1570. }
  1571. /*
  1572. * We can come here with TIF_NEED_RESCHED already set from new task
  1573. * wake up path.
  1574. */
  1575. if (test_tsk_need_resched(curr))
  1576. return;
  1577. /* Idle tasks are by definition preempted by non-idle tasks. */
  1578. if (unlikely(curr->policy == SCHED_IDLE) &&
  1579. likely(p->policy != SCHED_IDLE))
  1580. goto preempt;
  1581. /*
  1582. * Batch and idle tasks do not preempt non-idle tasks (their preemption
  1583. * is driven by the tick):
  1584. */
  1585. if (unlikely(p->policy != SCHED_NORMAL))
  1586. return;
  1587. if (!sched_feat(WAKEUP_PREEMPT))
  1588. return;
  1589. find_matching_se(&se, &pse);
  1590. update_curr(cfs_rq_of(se));
  1591. BUG_ON(!pse);
  1592. if (wakeup_preempt_entity(se, pse) == 1) {
  1593. /*
  1594. * Bias pick_next to pick the sched entity that is
  1595. * triggering this preemption.
  1596. */
  1597. if (!next_buddy_marked)
  1598. set_next_buddy(pse);
  1599. goto preempt;
  1600. }
  1601. return;
  1602. preempt:
  1603. resched_task(curr);
  1604. /*
  1605. * Only set the backward buddy when the current task is still
  1606. * on the rq. This can happen when a wakeup gets interleaved
  1607. * with schedule on the ->pre_schedule() or idle_balance()
  1608. * point, either of which can * drop the rq lock.
  1609. *
  1610. * Also, during early boot the idle thread is in the fair class,
  1611. * for obvious reasons its a bad idea to schedule back to it.
  1612. */
  1613. if (unlikely(!se->on_rq || curr == rq->idle))
  1614. return;
  1615. if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
  1616. set_last_buddy(se);
  1617. }
  1618. static struct task_struct *pick_next_task_fair(struct rq *rq)
  1619. {
  1620. struct task_struct *p;
  1621. struct cfs_rq *cfs_rq = &rq->cfs;
  1622. struct sched_entity *se;
  1623. if (!cfs_rq->nr_running)
  1624. return NULL;
  1625. do {
  1626. se = pick_next_entity(cfs_rq);
  1627. set_next_entity(cfs_rq, se);
  1628. cfs_rq = group_cfs_rq(se);
  1629. } while (cfs_rq);
  1630. p = task_of(se);
  1631. hrtick_start_fair(rq, p);
  1632. return p;
  1633. }
  1634. /*
  1635. * Account for a descheduled task:
  1636. */
  1637. static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
  1638. {
  1639. struct sched_entity *se = &prev->se;
  1640. struct cfs_rq *cfs_rq;
  1641. for_each_sched_entity(se) {
  1642. cfs_rq = cfs_rq_of(se);
  1643. put_prev_entity(cfs_rq, se);
  1644. }
  1645. }
  1646. /*
  1647. * sched_yield() is very simple
  1648. *
  1649. * The magic of dealing with the ->skip buddy is in pick_next_entity.
  1650. */
  1651. static void yield_task_fair(struct rq *rq)
  1652. {
  1653. struct task_struct *curr = rq->curr;
  1654. struct cfs_rq *cfs_rq = task_cfs_rq(curr);
  1655. struct sched_entity *se = &curr->se;
  1656. /*
  1657. * Are we the only task in the tree?
  1658. */
  1659. if (unlikely(rq->nr_running == 1))
  1660. return;
  1661. clear_buddies(cfs_rq, se);
  1662. if (curr->policy != SCHED_BATCH) {
  1663. update_rq_clock(rq);
  1664. /*
  1665. * Update run-time statistics of the 'current'.
  1666. */
  1667. update_curr(cfs_rq);
  1668. }
  1669. set_skip_buddy(se);
  1670. }
  1671. static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
  1672. {
  1673. struct sched_entity *se = &p->se;
  1674. if (!se->on_rq)
  1675. return false;
  1676. /* Tell the scheduler that we'd really like pse to run next. */
  1677. set_next_buddy(se);
  1678. yield_task_fair(rq);
  1679. return true;
  1680. }
  1681. #ifdef CONFIG_SMP
  1682. /**************************************************
  1683. * Fair scheduling class load-balancing methods:
  1684. */
  1685. /*
  1686. * pull_task - move a task from a remote runqueue to the local runqueue.
  1687. * Both runqueues must be locked.
  1688. */
  1689. static void pull_task(struct rq *src_rq, struct task_struct *p,
  1690. struct rq *this_rq, int this_cpu)
  1691. {
  1692. deactivate_task(src_rq, p, 0);
  1693. set_task_cpu(p, this_cpu);
  1694. activate_task(this_rq, p, 0);
  1695. check_preempt_curr(this_rq, p, 0);
  1696. }
  1697. /*
  1698. * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  1699. */
  1700. static
  1701. int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
  1702. struct sched_domain *sd, enum cpu_idle_type idle,
  1703. int *all_pinned)
  1704. {
  1705. int tsk_cache_hot = 0;
  1706. /*
  1707. * We do not migrate tasks that are:
  1708. * 1) running (obviously), or
  1709. * 2) cannot be migrated to this CPU due to cpus_allowed, or
  1710. * 3) are cache-hot on their current CPU.
  1711. */
  1712. if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
  1713. schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
  1714. return 0;
  1715. }
  1716. *all_pinned = 0;
  1717. if (task_running(rq, p)) {
  1718. schedstat_inc(p, se.statistics.nr_failed_migrations_running);
  1719. return 0;
  1720. }
  1721. /*
  1722. * Aggressive migration if:
  1723. * 1) task is cache cold, or
  1724. * 2) too many balance attempts have failed.
  1725. */
  1726. tsk_cache_hot = task_hot(p, rq->clock_task, sd);
  1727. if (!tsk_cache_hot ||
  1728. sd->nr_balance_failed > sd->cache_nice_tries) {
  1729. #ifdef CONFIG_SCHEDSTATS
  1730. if (tsk_cache_hot) {
  1731. schedstat_inc(sd, lb_hot_gained[idle]);
  1732. schedstat_inc(p, se.statistics.nr_forced_migrations);
  1733. }
  1734. #endif
  1735. return 1;
  1736. }
  1737. if (tsk_cache_hot) {
  1738. schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
  1739. return 0;
  1740. }
  1741. return 1;
  1742. }
  1743. /*
  1744. * move_one_task tries to move exactly one task from busiest to this_rq, as
  1745. * part of active balancing operations within "domain".
  1746. * Returns 1 if successful and 0 otherwise.
  1747. *
  1748. * Called with both runqueues locked.
  1749. */
  1750. static int
  1751. move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1752. struct sched_domain *sd, enum cpu_idle_type idle)
  1753. {
  1754. struct task_struct *p, *n;
  1755. struct cfs_rq *cfs_rq;
  1756. int pinned = 0;
  1757. for_each_leaf_cfs_rq(busiest, cfs_rq) {
  1758. list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
  1759. if (!can_migrate_task(p, busiest, this_cpu,
  1760. sd, idle, &pinned))
  1761. continue;
  1762. pull_task(busiest, p, this_rq, this_cpu);
  1763. /*
  1764. * Right now, this is only the second place pull_task()
  1765. * is called, so we can safely collect pull_task()
  1766. * stats here rather than inside pull_task().
  1767. */
  1768. schedstat_inc(sd, lb_gained[idle]);
  1769. return 1;
  1770. }
  1771. }
  1772. return 0;
  1773. }
  1774. static unsigned long
  1775. balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1776. unsigned long max_load_move, struct sched_domain *sd,
  1777. enum cpu_idle_type idle, int *all_pinned,
  1778. struct cfs_rq *busiest_cfs_rq)
  1779. {
  1780. int loops = 0, pulled = 0;
  1781. long rem_load_move = max_load_move;
  1782. struct task_struct *p, *n;
  1783. if (max_load_move == 0)
  1784. goto out;
  1785. list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
  1786. if (loops++ > sysctl_sched_nr_migrate)
  1787. break;
  1788. if ((p->se.load.weight >> 1) > rem_load_move ||
  1789. !can_migrate_task(p, busiest, this_cpu, sd, idle,
  1790. all_pinned))
  1791. continue;
  1792. pull_task(busiest, p, this_rq, this_cpu);
  1793. pulled++;
  1794. rem_load_move -= p->se.load.weight;
  1795. #ifdef CONFIG_PREEMPT
  1796. /*
  1797. * NEWIDLE balancing is a source of latency, so preemptible
  1798. * kernels will stop after the first task is pulled to minimize
  1799. * the critical section.
  1800. */
  1801. if (idle == CPU_NEWLY_IDLE)
  1802. break;
  1803. #endif
  1804. /*
  1805. * We only want to steal up to the prescribed amount of
  1806. * weighted load.
  1807. */
  1808. if (rem_load_move <= 0)
  1809. break;
  1810. }
  1811. out:
  1812. /*
  1813. * Right now, this is one of only two places pull_task() is called,
  1814. * so we can safely collect pull_task() stats here rather than
  1815. * inside pull_task().
  1816. */
  1817. schedstat_add(sd, lb_gained[idle], pulled);
  1818. return max_load_move - rem_load_move;
  1819. }
  1820. #ifdef CONFIG_FAIR_GROUP_SCHED
  1821. /*
  1822. * update tg->load_weight by folding this cpu's load_avg
  1823. */
  1824. static int update_shares_cpu(struct task_group *tg, int cpu)
  1825. {
  1826. struct cfs_rq *cfs_rq;
  1827. unsigned long flags;
  1828. struct rq *rq;
  1829. if (!tg->se[cpu])
  1830. return 0;
  1831. rq = cpu_rq(cpu);
  1832. cfs_rq = tg->cfs_rq[cpu];
  1833. raw_spin_lock_irqsave(&rq->lock, flags);
  1834. update_rq_clock(rq);
  1835. update_cfs_load(cfs_rq, 1);
  1836. /*
  1837. * We need to update shares after updating tg->load_weight in
  1838. * order to adjust the weight of groups with long running tasks.
  1839. */
  1840. update_cfs_shares(cfs_rq);
  1841. raw_spin_unlock_irqrestore(&rq->lock, flags);
  1842. return 0;
  1843. }
  1844. static void update_shares(int cpu)
  1845. {
  1846. struct cfs_rq *cfs_rq;
  1847. struct rq *rq = cpu_rq(cpu);
  1848. rcu_read_lock();
  1849. /*
  1850. * Iterates the task_group tree in a bottom up fashion, see
  1851. * list_add_leaf_cfs_rq() for details.
  1852. */
  1853. for_each_leaf_cfs_rq(rq, cfs_rq)
  1854. update_shares_cpu(cfs_rq->tg, cpu);
  1855. rcu_read_unlock();
  1856. }
  1857. /*
  1858. * Compute the cpu's hierarchical load factor for each task group.
  1859. * This needs to be done in a top-down fashion because the load of a child
  1860. * group is a fraction of its parents load.
  1861. */
  1862. static int tg_load_down(struct task_group *tg, void *data)
  1863. {
  1864. unsigned long load;
  1865. long cpu = (long)data;
  1866. if (!tg->parent) {
  1867. load = cpu_rq(cpu)->load.weight;
  1868. } else {
  1869. load = tg->parent->cfs_rq[cpu]->h_load;
  1870. load *= tg->se[cpu]->load.weight;
  1871. load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
  1872. }
  1873. tg->cfs_rq[cpu]->h_load = load;
  1874. return 0;
  1875. }
  1876. static void update_h_load(long cpu)
  1877. {
  1878. walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
  1879. }
  1880. static unsigned long
  1881. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1882. unsigned long max_load_move,
  1883. struct sched_domain *sd, enum cpu_idle_type idle,
  1884. int *all_pinned)
  1885. {
  1886. long rem_load_move = max_load_move;
  1887. struct cfs_rq *busiest_cfs_rq;
  1888. rcu_read_lock();
  1889. update_h_load(cpu_of(busiest));
  1890. for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
  1891. unsigned long busiest_h_load = busiest_cfs_rq->h_load;
  1892. unsigned long busiest_weight = busiest_cfs_rq->load.weight;
  1893. u64 rem_load, moved_load;
  1894. /*
  1895. * empty group
  1896. */
  1897. if (!busiest_cfs_rq->task_weight)
  1898. continue;
  1899. rem_load = (u64)rem_load_move * busiest_weight;
  1900. rem_load = div_u64(rem_load, busiest_h_load + 1);
  1901. moved_load = balance_tasks(this_rq, this_cpu, busiest,
  1902. rem_load, sd, idle, all_pinned,
  1903. busiest_cfs_rq);
  1904. if (!moved_load)
  1905. continue;
  1906. moved_load *= busiest_h_load;
  1907. moved_load = div_u64(moved_load, busiest_weight + 1);
  1908. rem_load_move -= moved_load;
  1909. if (rem_load_move < 0)
  1910. break;
  1911. }
  1912. rcu_read_unlock();
  1913. return max_load_move - rem_load_move;
  1914. }
  1915. #else
  1916. static inline void update_shares(int cpu)
  1917. {
  1918. }
  1919. static unsigned long
  1920. load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1921. unsigned long max_load_move,
  1922. struct sched_domain *sd, enum cpu_idle_type idle,
  1923. int *all_pinned)
  1924. {
  1925. return balance_tasks(this_rq, this_cpu, busiest,
  1926. max_load_move, sd, idle, all_pinned,
  1927. &busiest->cfs);
  1928. }
  1929. #endif
  1930. /*
  1931. * move_tasks tries to move up to max_load_move weighted load from busiest to
  1932. * this_rq, as part of a balancing operation within domain "sd".
  1933. * Returns 1 if successful and 0 otherwise.
  1934. *
  1935. * Called with both runqueues locked.
  1936. */
  1937. static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1938. unsigned long max_load_move,
  1939. struct sched_domain *sd, enum cpu_idle_type idle,
  1940. int *all_pinned)
  1941. {
  1942. unsigned long total_load_moved = 0, load_moved;
  1943. do {
  1944. load_moved = load_balance_fair(this_rq, this_cpu, busiest,
  1945. max_load_move - total_load_moved,
  1946. sd, idle, all_pinned);
  1947. total_load_moved += load_moved;
  1948. #ifdef CONFIG_PREEMPT
  1949. /*
  1950. * NEWIDLE balancing is a source of latency, so preemptible
  1951. * kernels will stop after the first task is pulled to minimize
  1952. * the critical section.
  1953. */
  1954. if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
  1955. break;
  1956. if (raw_spin_is_contended(&this_rq->lock) ||
  1957. raw_spin_is_contended(&busiest->lock))
  1958. break;
  1959. #endif
  1960. } while (load_moved && max_load_move > total_load_moved);
  1961. return total_load_moved > 0;
  1962. }
  1963. /********** Helpers for find_busiest_group ************************/
  1964. /*
  1965. * sd_lb_stats - Structure to store the statistics of a sched_domain
  1966. * during load balancing.
  1967. */
  1968. struct sd_lb_stats {
  1969. struct sched_group *busiest; /* Busiest group in this sd */
  1970. struct sched_group *this; /* Local group in this sd */
  1971. unsigned long total_load; /* Total load of all groups in sd */
  1972. unsigned long total_pwr; /* Total power of all groups in sd */
  1973. unsigned long avg_load; /* Average load across all groups in sd */
  1974. /** Statistics of this group */
  1975. unsigned long this_load;
  1976. unsigned long this_load_per_task;
  1977. unsigned long this_nr_running;
  1978. unsigned long this_has_capacity;
  1979. unsigned int this_idle_cpus;
  1980. /* Statistics of the busiest group */
  1981. unsigned int busiest_idle_cpus;
  1982. unsigned long max_load;
  1983. unsigned long busiest_load_per_task;
  1984. unsigned long busiest_nr_running;
  1985. unsigned long busiest_group_capacity;
  1986. unsigned long busiest_has_capacity;
  1987. unsigned int busiest_group_weight;
  1988. int group_imb; /* Is there imbalance in this sd */
  1989. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  1990. int power_savings_balance; /* Is powersave balance needed for this sd */
  1991. struct sched_group *group_min; /* Least loaded group in sd */
  1992. struct sched_group *group_leader; /* Group which relieves group_min */
  1993. unsigned long min_load_per_task; /* load_per_task in group_min */
  1994. unsigned long leader_nr_running; /* Nr running of group_leader */
  1995. unsigned long min_nr_running; /* Nr running of group_min */
  1996. #endif
  1997. };
  1998. /*
  1999. * sg_lb_stats - stats of a sched_group required for load_balancing
  2000. */
  2001. struct sg_lb_stats {
  2002. unsigned long avg_load; /*Avg load across the CPUs of the group */
  2003. unsigned long group_load; /* Total load over the CPUs of the group */
  2004. unsigned long sum_nr_running; /* Nr tasks running in the group */
  2005. unsigned long sum_weighted_load; /* Weighted load of group's tasks */
  2006. unsigned long group_capacity;
  2007. unsigned long idle_cpus;
  2008. unsigned long group_weight;
  2009. int group_imb; /* Is there an imbalance in the group ? */
  2010. int group_has_capacity; /* Is there extra capacity in the group? */
  2011. };
  2012. /**
  2013. * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
  2014. * @group: The group whose first cpu is to be returned.
  2015. */
  2016. static inline unsigned int group_first_cpu(struct sched_group *group)
  2017. {
  2018. return cpumask_first(sched_group_cpus(group));
  2019. }
  2020. /**
  2021. * get_sd_load_idx - Obtain the load index for a given sched domain.
  2022. * @sd: The sched_domain whose load_idx is to be obtained.
  2023. * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
  2024. */
  2025. static inline int get_sd_load_idx(struct sched_domain *sd,
  2026. enum cpu_idle_type idle)
  2027. {
  2028. int load_idx;
  2029. switch (idle) {
  2030. case CPU_NOT_IDLE:
  2031. load_idx = sd->busy_idx;
  2032. break;
  2033. case CPU_NEWLY_IDLE:
  2034. load_idx = sd->newidle_idx;
  2035. break;
  2036. default:
  2037. load_idx = sd->idle_idx;
  2038. break;
  2039. }
  2040. return load_idx;
  2041. }
  2042. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2043. /**
  2044. * init_sd_power_savings_stats - Initialize power savings statistics for
  2045. * the given sched_domain, during load balancing.
  2046. *
  2047. * @sd: Sched domain whose power-savings statistics are to be initialized.
  2048. * @sds: Variable containing the statistics for sd.
  2049. * @idle: Idle status of the CPU at which we're performing load-balancing.
  2050. */
  2051. static inline void init_sd_power_savings_stats(struct sched_domain *sd,
  2052. struct sd_lb_stats *sds, enum cpu_idle_type idle)
  2053. {
  2054. /*
  2055. * Busy processors will not participate in power savings
  2056. * balance.
  2057. */
  2058. if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
  2059. sds->power_savings_balance = 0;
  2060. else {
  2061. sds->power_savings_balance = 1;
  2062. sds->min_nr_running = ULONG_MAX;
  2063. sds->leader_nr_running = 0;
  2064. }
  2065. }
  2066. /**
  2067. * update_sd_power_savings_stats - Update the power saving stats for a
  2068. * sched_domain while performing load balancing.
  2069. *
  2070. * @group: sched_group belonging to the sched_domain under consideration.
  2071. * @sds: Variable containing the statistics of the sched_domain
  2072. * @local_group: Does group contain the CPU for which we're performing
  2073. * load balancing ?
  2074. * @sgs: Variable containing the statistics of the group.
  2075. */
  2076. static inline void update_sd_power_savings_stats(struct sched_group *group,
  2077. struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
  2078. {
  2079. if (!sds->power_savings_balance)
  2080. return;
  2081. /*
  2082. * If the local group is idle or completely loaded
  2083. * no need to do power savings balance at this domain
  2084. */
  2085. if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
  2086. !sds->this_nr_running))
  2087. sds->power_savings_balance = 0;
  2088. /*
  2089. * If a group is already running at full capacity or idle,
  2090. * don't include that group in power savings calculations
  2091. */
  2092. if (!sds->power_savings_balance ||
  2093. sgs->sum_nr_running >= sgs->group_capacity ||
  2094. !sgs->sum_nr_running)
  2095. return;
  2096. /*
  2097. * Calculate the group which has the least non-idle load.
  2098. * This is the group from where we need to pick up the load
  2099. * for saving power
  2100. */
  2101. if ((sgs->sum_nr_running < sds->min_nr_running) ||
  2102. (sgs->sum_nr_running == sds->min_nr_running &&
  2103. group_first_cpu(group) > group_first_cpu(sds->group_min))) {
  2104. sds->group_min = group;
  2105. sds->min_nr_running = sgs->sum_nr_running;
  2106. sds->min_load_per_task = sgs->sum_weighted_load /
  2107. sgs->sum_nr_running;
  2108. }
  2109. /*
  2110. * Calculate the group which is almost near its
  2111. * capacity but still has some space to pick up some load
  2112. * from other group and save more power
  2113. */
  2114. if (sgs->sum_nr_running + 1 > sgs->group_capacity)
  2115. return;
  2116. if (sgs->sum_nr_running > sds->leader_nr_running ||
  2117. (sgs->sum_nr_running == sds->leader_nr_running &&
  2118. group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
  2119. sds->group_leader = group;
  2120. sds->leader_nr_running = sgs->sum_nr_running;
  2121. }
  2122. }
  2123. /**
  2124. * check_power_save_busiest_group - see if there is potential for some power-savings balance
  2125. * @sds: Variable containing the statistics of the sched_domain
  2126. * under consideration.
  2127. * @this_cpu: Cpu at which we're currently performing load-balancing.
  2128. * @imbalance: Variable to store the imbalance.
  2129. *
  2130. * Description:
  2131. * Check if we have potential to perform some power-savings balance.
  2132. * If yes, set the busiest group to be the least loaded group in the
  2133. * sched_domain, so that it's CPUs can be put to idle.
  2134. *
  2135. * Returns 1 if there is potential to perform power-savings balance.
  2136. * Else returns 0.
  2137. */
  2138. static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  2139. int this_cpu, unsigned long *imbalance)
  2140. {
  2141. if (!sds->power_savings_balance)
  2142. return 0;
  2143. if (sds->this != sds->group_leader ||
  2144. sds->group_leader == sds->group_min)
  2145. return 0;
  2146. *imbalance = sds->min_load_per_task;
  2147. sds->busiest = sds->group_min;
  2148. return 1;
  2149. }
  2150. #else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  2151. static inline void init_sd_power_savings_stats(struct sched_domain *sd,
  2152. struct sd_lb_stats *sds, enum cpu_idle_type idle)
  2153. {
  2154. return;
  2155. }
  2156. static inline void update_sd_power_savings_stats(struct sched_group *group,
  2157. struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
  2158. {
  2159. return;
  2160. }
  2161. static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
  2162. int this_cpu, unsigned long *imbalance)
  2163. {
  2164. return 0;
  2165. }
  2166. #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  2167. unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
  2168. {
  2169. return SCHED_POWER_SCALE;
  2170. }
  2171. unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
  2172. {
  2173. return default_scale_freq_power(sd, cpu);
  2174. }
  2175. unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
  2176. {
  2177. unsigned long weight = sd->span_weight;
  2178. unsigned long smt_gain = sd->smt_gain;
  2179. smt_gain /= weight;
  2180. return smt_gain;
  2181. }
  2182. unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
  2183. {
  2184. return default_scale_smt_power(sd, cpu);
  2185. }
  2186. unsigned long scale_rt_power(int cpu)
  2187. {
  2188. struct rq *rq = cpu_rq(cpu);
  2189. u64 total, available;
  2190. total = sched_avg_period() + (rq->clock - rq->age_stamp);
  2191. if (unlikely(total < rq->rt_avg)) {
  2192. /* Ensures that power won't end up being negative */
  2193. available = 0;
  2194. } else {
  2195. available = total - rq->rt_avg;
  2196. }
  2197. if (unlikely((s64)total < SCHED_POWER_SCALE))
  2198. total = SCHED_POWER_SCALE;
  2199. total >>= SCHED_POWER_SHIFT;
  2200. return div_u64(available, total);
  2201. }
  2202. static void update_cpu_power(struct sched_domain *sd, int cpu)
  2203. {
  2204. unsigned long weight = sd->span_weight;
  2205. unsigned long power = SCHED_POWER_SCALE;
  2206. struct sched_group *sdg = sd->groups;
  2207. if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
  2208. if (sched_feat(ARCH_POWER))
  2209. power *= arch_scale_smt_power(sd, cpu);
  2210. else
  2211. power *= default_scale_smt_power(sd, cpu);
  2212. power >>= SCHED_POWER_SHIFT;
  2213. }
  2214. sdg->sgp->power_orig = power;
  2215. if (sched_feat(ARCH_POWER))
  2216. power *= arch_scale_freq_power(sd, cpu);
  2217. else
  2218. power *= default_scale_freq_power(sd, cpu);
  2219. power >>= SCHED_POWER_SHIFT;
  2220. power *= scale_rt_power(cpu);
  2221. power >>= SCHED_POWER_SHIFT;
  2222. if (!power)
  2223. power = 1;
  2224. cpu_rq(cpu)->cpu_power = power;
  2225. sdg->sgp->power = power;
  2226. }
  2227. static void update_group_power(struct sched_domain *sd, int cpu)
  2228. {
  2229. struct sched_domain *child = sd->child;
  2230. struct sched_group *group, *sdg = sd->groups;
  2231. unsigned long power;
  2232. if (!child) {
  2233. update_cpu_power(sd, cpu);
  2234. return;
  2235. }
  2236. power = 0;
  2237. group = child->groups;
  2238. do {
  2239. power += group->sgp->power;
  2240. group = group->next;
  2241. } while (group != child->groups);
  2242. sdg->sgp->power = power;
  2243. }
  2244. /*
  2245. * Try and fix up capacity for tiny siblings, this is needed when
  2246. * things like SD_ASYM_PACKING need f_b_g to select another sibling
  2247. * which on its own isn't powerful enough.
  2248. *
  2249. * See update_sd_pick_busiest() and check_asym_packing().
  2250. */
  2251. static inline int
  2252. fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
  2253. {
  2254. /*
  2255. * Only siblings can have significantly less than SCHED_POWER_SCALE
  2256. */
  2257. if (!(sd->flags & SD_SHARE_CPUPOWER))
  2258. return 0;
  2259. /*
  2260. * If ~90% of the cpu_power is still there, we're good.
  2261. */
  2262. if (group->sgp->power * 32 > group->sgp->power_orig * 29)
  2263. return 1;
  2264. return 0;
  2265. }
  2266. /**
  2267. * update_sg_lb_stats - Update sched_group's statistics for load balancing.
  2268. * @sd: The sched_domain whose statistics are to be updated.
  2269. * @group: sched_group whose statistics are to be updated.
  2270. * @this_cpu: Cpu for which load balance is currently performed.
  2271. * @idle: Idle status of this_cpu
  2272. * @load_idx: Load index of sched_domain of this_cpu for load calc.
  2273. * @local_group: Does group contain this_cpu.
  2274. * @cpus: Set of cpus considered for load balancing.
  2275. * @balance: Should we balance.
  2276. * @sgs: variable to hold the statistics for this group.
  2277. */
  2278. static inline void update_sg_lb_stats(struct sched_domain *sd,
  2279. struct sched_group *group, int this_cpu,
  2280. enum cpu_idle_type idle, int load_idx,
  2281. int local_group, const struct cpumask *cpus,
  2282. int *balance, struct sg_lb_stats *sgs)
  2283. {
  2284. unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
  2285. int i;
  2286. unsigned int balance_cpu = -1, first_idle_cpu = 0;
  2287. unsigned long avg_load_per_task = 0;
  2288. if (local_group)
  2289. balance_cpu = group_first_cpu(group);
  2290. /* Tally up the load of all CPUs in the group */
  2291. max_cpu_load = 0;
  2292. min_cpu_load = ~0UL;
  2293. max_nr_running = 0;
  2294. for_each_cpu_and(i, sched_group_cpus(group), cpus) {
  2295. struct rq *rq = cpu_rq(i);
  2296. /* Bias balancing toward cpus of our domain */
  2297. if (local_group) {
  2298. if (idle_cpu(i) && !first_idle_cpu) {
  2299. first_idle_cpu = 1;
  2300. balance_cpu = i;
  2301. }
  2302. load = target_load(i, load_idx);
  2303. } else {
  2304. load = source_load(i, load_idx);
  2305. if (load > max_cpu_load) {
  2306. max_cpu_load = load;
  2307. max_nr_running = rq->nr_running;
  2308. }
  2309. if (min_cpu_load > load)
  2310. min_cpu_load = load;
  2311. }
  2312. sgs->group_load += load;
  2313. sgs->sum_nr_running += rq->nr_running;
  2314. sgs->sum_weighted_load += weighted_cpuload(i);
  2315. if (idle_cpu(i))
  2316. sgs->idle_cpus++;
  2317. }
  2318. /*
  2319. * First idle cpu or the first cpu(busiest) in this sched group
  2320. * is eligible for doing load balancing at this and above
  2321. * domains. In the newly idle case, we will allow all the cpu's
  2322. * to do the newly idle load balance.
  2323. */
  2324. if (idle != CPU_NEWLY_IDLE && local_group) {
  2325. if (balance_cpu != this_cpu) {
  2326. *balance = 0;
  2327. return;
  2328. }
  2329. update_group_power(sd, this_cpu);
  2330. }
  2331. /* Adjust by relative CPU power of the group */
  2332. sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
  2333. /*
  2334. * Consider the group unbalanced when the imbalance is larger
  2335. * than the average weight of a task.
  2336. *
  2337. * APZ: with cgroup the avg task weight can vary wildly and
  2338. * might not be a suitable number - should we keep a
  2339. * normalized nr_running number somewhere that negates
  2340. * the hierarchy?
  2341. */
  2342. if (sgs->sum_nr_running)
  2343. avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
  2344. if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
  2345. sgs->group_imb = 1;
  2346. sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
  2347. SCHED_POWER_SCALE);
  2348. if (!sgs->group_capacity)
  2349. sgs->group_capacity = fix_small_capacity(sd, group);
  2350. sgs->group_weight = group->group_weight;
  2351. if (sgs->group_capacity > sgs->sum_nr_running)
  2352. sgs->group_has_capacity = 1;
  2353. }
  2354. /**
  2355. * update_sd_pick_busiest - return 1 on busiest group
  2356. * @sd: sched_domain whose statistics are to be checked
  2357. * @sds: sched_domain statistics
  2358. * @sg: sched_group candidate to be checked for being the busiest
  2359. * @sgs: sched_group statistics
  2360. * @this_cpu: the current cpu
  2361. *
  2362. * Determine if @sg is a busier group than the previously selected
  2363. * busiest group.
  2364. */
  2365. static bool update_sd_pick_busiest(struct sched_domain *sd,
  2366. struct sd_lb_stats *sds,
  2367. struct sched_group *sg,
  2368. struct sg_lb_stats *sgs,
  2369. int this_cpu)
  2370. {
  2371. if (sgs->avg_load <= sds->max_load)
  2372. return false;
  2373. if (sgs->sum_nr_running > sgs->group_capacity)
  2374. return true;
  2375. if (sgs->group_imb)
  2376. return true;
  2377. /*
  2378. * ASYM_PACKING needs to move all the work to the lowest
  2379. * numbered CPUs in the group, therefore mark all groups
  2380. * higher than ourself as busy.
  2381. */
  2382. if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
  2383. this_cpu < group_first_cpu(sg)) {
  2384. if (!sds->busiest)
  2385. return true;
  2386. if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
  2387. return true;
  2388. }
  2389. return false;
  2390. }
  2391. /**
  2392. * update_sd_lb_stats - Update sched_group's statistics for load balancing.
  2393. * @sd: sched_domain whose statistics are to be updated.
  2394. * @this_cpu: Cpu for which load balance is currently performed.
  2395. * @idle: Idle status of this_cpu
  2396. * @cpus: Set of cpus considered for load balancing.
  2397. * @balance: Should we balance.
  2398. * @sds: variable to hold the statistics for this sched_domain.
  2399. */
  2400. static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
  2401. enum cpu_idle_type idle, const struct cpumask *cpus,
  2402. int *balance, struct sd_lb_stats *sds)
  2403. {
  2404. struct sched_domain *child = sd->child;
  2405. struct sched_group *sg = sd->groups;
  2406. struct sg_lb_stats sgs;
  2407. int load_idx, prefer_sibling = 0;
  2408. if (child && child->flags & SD_PREFER_SIBLING)
  2409. prefer_sibling = 1;
  2410. init_sd_power_savings_stats(sd, sds, idle);
  2411. load_idx = get_sd_load_idx(sd, idle);
  2412. do {
  2413. int local_group;
  2414. local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
  2415. memset(&sgs, 0, sizeof(sgs));
  2416. update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
  2417. local_group, cpus, balance, &sgs);
  2418. if (local_group && !(*balance))
  2419. return;
  2420. sds->total_load += sgs.group_load;
  2421. sds->total_pwr += sg->sgp->power;
  2422. /*
  2423. * In case the child domain prefers tasks go to siblings
  2424. * first, lower the sg capacity to one so that we'll try
  2425. * and move all the excess tasks away. We lower the capacity
  2426. * of a group only if the local group has the capacity to fit
  2427. * these excess tasks, i.e. nr_running < group_capacity. The
  2428. * extra check prevents the case where you always pull from the
  2429. * heaviest group when it is already under-utilized (possible
  2430. * with a large weight task outweighs the tasks on the system).
  2431. */
  2432. if (prefer_sibling && !local_group && sds->this_has_capacity)
  2433. sgs.group_capacity = min(sgs.group_capacity, 1UL);
  2434. if (local_group) {
  2435. sds->this_load = sgs.avg_load;
  2436. sds->this = sg;
  2437. sds->this_nr_running = sgs.sum_nr_running;
  2438. sds->this_load_per_task = sgs.sum_weighted_load;
  2439. sds->this_has_capacity = sgs.group_has_capacity;
  2440. sds->this_idle_cpus = sgs.idle_cpus;
  2441. } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
  2442. sds->max_load = sgs.avg_load;
  2443. sds->busiest = sg;
  2444. sds->busiest_nr_running = sgs.sum_nr_running;
  2445. sds->busiest_idle_cpus = sgs.idle_cpus;
  2446. sds->busiest_group_capacity = sgs.group_capacity;
  2447. sds->busiest_load_per_task = sgs.sum_weighted_load;
  2448. sds->busiest_has_capacity = sgs.group_has_capacity;
  2449. sds->busiest_group_weight = sgs.group_weight;
  2450. sds->group_imb = sgs.group_imb;
  2451. }
  2452. update_sd_power_savings_stats(sg, sds, local_group, &sgs);
  2453. sg = sg->next;
  2454. } while (sg != sd->groups);
  2455. }
  2456. int __weak arch_sd_sibling_asym_packing(void)
  2457. {
  2458. return 0*SD_ASYM_PACKING;
  2459. }
  2460. /**
  2461. * check_asym_packing - Check to see if the group is packed into the
  2462. * sched doman.
  2463. *
  2464. * This is primarily intended to used at the sibling level. Some
  2465. * cores like POWER7 prefer to use lower numbered SMT threads. In the
  2466. * case of POWER7, it can move to lower SMT modes only when higher
  2467. * threads are idle. When in lower SMT modes, the threads will
  2468. * perform better since they share less core resources. Hence when we
  2469. * have idle threads, we want them to be the higher ones.
  2470. *
  2471. * This packing function is run on idle threads. It checks to see if
  2472. * the busiest CPU in this domain (core in the P7 case) has a higher
  2473. * CPU number than the packing function is being run on. Here we are
  2474. * assuming lower CPU number will be equivalent to lower a SMT thread
  2475. * number.
  2476. *
  2477. * Returns 1 when packing is required and a task should be moved to
  2478. * this CPU. The amount of the imbalance is returned in *imbalance.
  2479. *
  2480. * @sd: The sched_domain whose packing is to be checked.
  2481. * @sds: Statistics of the sched_domain which is to be packed
  2482. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
  2483. * @imbalance: returns amount of imbalanced due to packing.
  2484. */
  2485. static int check_asym_packing(struct sched_domain *sd,
  2486. struct sd_lb_stats *sds,
  2487. int this_cpu, unsigned long *imbalance)
  2488. {
  2489. int busiest_cpu;
  2490. if (!(sd->flags & SD_ASYM_PACKING))
  2491. return 0;
  2492. if (!sds->busiest)
  2493. return 0;
  2494. busiest_cpu = group_first_cpu(sds->busiest);
  2495. if (this_cpu > busiest_cpu)
  2496. return 0;
  2497. *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
  2498. SCHED_POWER_SCALE);
  2499. return 1;
  2500. }
  2501. /**
  2502. * fix_small_imbalance - Calculate the minor imbalance that exists
  2503. * amongst the groups of a sched_domain, during
  2504. * load balancing.
  2505. * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
  2506. * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
  2507. * @imbalance: Variable to store the imbalance.
  2508. */
  2509. static inline void fix_small_imbalance(struct sd_lb_stats *sds,
  2510. int this_cpu, unsigned long *imbalance)
  2511. {
  2512. unsigned long tmp, pwr_now = 0, pwr_move = 0;
  2513. unsigned int imbn = 2;
  2514. unsigned long scaled_busy_load_per_task;
  2515. if (sds->this_nr_running) {
  2516. sds->this_load_per_task /= sds->this_nr_running;
  2517. if (sds->busiest_load_per_task >
  2518. sds->this_load_per_task)
  2519. imbn = 1;
  2520. } else
  2521. sds->this_load_per_task =
  2522. cpu_avg_load_per_task(this_cpu);
  2523. scaled_busy_load_per_task = sds->busiest_load_per_task
  2524. * SCHED_POWER_SCALE;
  2525. scaled_busy_load_per_task /= sds->busiest->sgp->power;
  2526. if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
  2527. (scaled_busy_load_per_task * imbn)) {
  2528. *imbalance = sds->busiest_load_per_task;
  2529. return;
  2530. }
  2531. /*
  2532. * OK, we don't have enough imbalance to justify moving tasks,
  2533. * however we may be able to increase total CPU power used by
  2534. * moving them.
  2535. */
  2536. pwr_now += sds->busiest->sgp->power *
  2537. min(sds->busiest_load_per_task, sds->max_load);
  2538. pwr_now += sds->this->sgp->power *
  2539. min(sds->this_load_per_task, sds->this_load);
  2540. pwr_now /= SCHED_POWER_SCALE;
  2541. /* Amount of load we'd subtract */
  2542. tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
  2543. sds->busiest->sgp->power;
  2544. if (sds->max_load > tmp)
  2545. pwr_move += sds->busiest->sgp->power *
  2546. min(sds->busiest_load_per_task, sds->max_load - tmp);
  2547. /* Amount of load we'd add */
  2548. if (sds->max_load * sds->busiest->sgp->power <
  2549. sds->busiest_load_per_task * SCHED_POWER_SCALE)
  2550. tmp = (sds->max_load * sds->busiest->sgp->power) /
  2551. sds->this->sgp->power;
  2552. else
  2553. tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
  2554. sds->this->sgp->power;
  2555. pwr_move += sds->this->sgp->power *
  2556. min(sds->this_load_per_task, sds->this_load + tmp);
  2557. pwr_move /= SCHED_POWER_SCALE;
  2558. /* Move if we gain throughput */
  2559. if (pwr_move > pwr_now)
  2560. *imbalance = sds->busiest_load_per_task;
  2561. }
  2562. /**
  2563. * calculate_imbalance - Calculate the amount of imbalance present within the
  2564. * groups of a given sched_domain during load balance.
  2565. * @sds: statistics of the sched_domain whose imbalance is to be calculated.
  2566. * @this_cpu: Cpu for which currently load balance is being performed.
  2567. * @imbalance: The variable to store the imbalance.
  2568. */
  2569. static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
  2570. unsigned long *imbalance)
  2571. {
  2572. unsigned long max_pull, load_above_capacity = ~0UL;
  2573. sds->busiest_load_per_task /= sds->busiest_nr_running;
  2574. if (sds->group_imb) {
  2575. sds->busiest_load_per_task =
  2576. min(sds->busiest_load_per_task, sds->avg_load);
  2577. }
  2578. /*
  2579. * In the presence of smp nice balancing, certain scenarios can have
  2580. * max load less than avg load(as we skip the groups at or below
  2581. * its cpu_power, while calculating max_load..)
  2582. */
  2583. if (sds->max_load < sds->avg_load) {
  2584. *imbalance = 0;
  2585. return fix_small_imbalance(sds, this_cpu, imbalance);
  2586. }
  2587. if (!sds->group_imb) {
  2588. /*
  2589. * Don't want to pull so many tasks that a group would go idle.
  2590. */
  2591. load_above_capacity = (sds->busiest_nr_running -
  2592. sds->busiest_group_capacity);
  2593. load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
  2594. load_above_capacity /= sds->busiest->sgp->power;
  2595. }
  2596. /*
  2597. * We're trying to get all the cpus to the average_load, so we don't
  2598. * want to push ourselves above the average load, nor do we wish to
  2599. * reduce the max loaded cpu below the average load. At the same time,
  2600. * we also don't want to reduce the group load below the group capacity
  2601. * (so that we can implement power-savings policies etc). Thus we look
  2602. * for the minimum possible imbalance.
  2603. * Be careful of negative numbers as they'll appear as very large values
  2604. * with unsigned longs.
  2605. */
  2606. max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
  2607. /* How much load to actually move to equalise the imbalance */
  2608. *imbalance = min(max_pull * sds->busiest->sgp->power,
  2609. (sds->avg_load - sds->this_load) * sds->this->sgp->power)
  2610. / SCHED_POWER_SCALE;
  2611. /*
  2612. * if *imbalance is less than the average load per runnable task
  2613. * there is no guarantee that any tasks will be moved so we'll have
  2614. * a think about bumping its value to force at least one task to be
  2615. * moved
  2616. */
  2617. if (*imbalance < sds->busiest_load_per_task)
  2618. return fix_small_imbalance(sds, this_cpu, imbalance);
  2619. }
  2620. /******* find_busiest_group() helpers end here *********************/
  2621. /**
  2622. * find_busiest_group - Returns the busiest group within the sched_domain
  2623. * if there is an imbalance. If there isn't an imbalance, and
  2624. * the user has opted for power-savings, it returns a group whose
  2625. * CPUs can be put to idle by rebalancing those tasks elsewhere, if
  2626. * such a group exists.
  2627. *
  2628. * Also calculates the amount of weighted load which should be moved
  2629. * to restore balance.
  2630. *
  2631. * @sd: The sched_domain whose busiest group is to be returned.
  2632. * @this_cpu: The cpu for which load balancing is currently being performed.
  2633. * @imbalance: Variable which stores amount of weighted load which should
  2634. * be moved to restore balance/put a group to idle.
  2635. * @idle: The idle status of this_cpu.
  2636. * @cpus: The set of CPUs under consideration for load-balancing.
  2637. * @balance: Pointer to a variable indicating if this_cpu
  2638. * is the appropriate cpu to perform load balancing at this_level.
  2639. *
  2640. * Returns: - the busiest group if imbalance exists.
  2641. * - If no imbalance and user has opted for power-savings balance,
  2642. * return the least loaded group whose CPUs can be
  2643. * put to idle by rebalancing its tasks onto our group.
  2644. */
  2645. static struct sched_group *
  2646. find_busiest_group(struct sched_domain *sd, int this_cpu,
  2647. unsigned long *imbalance, enum cpu_idle_type idle,
  2648. const struct cpumask *cpus, int *balance)
  2649. {
  2650. struct sd_lb_stats sds;
  2651. memset(&sds, 0, sizeof(sds));
  2652. /*
  2653. * Compute the various statistics relavent for load balancing at
  2654. * this level.
  2655. */
  2656. update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
  2657. /*
  2658. * this_cpu is not the appropriate cpu to perform load balancing at
  2659. * this level.
  2660. */
  2661. if (!(*balance))
  2662. goto ret;
  2663. if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
  2664. check_asym_packing(sd, &sds, this_cpu, imbalance))
  2665. return sds.busiest;
  2666. /* There is no busy sibling group to pull tasks from */
  2667. if (!sds.busiest || sds.busiest_nr_running == 0)
  2668. goto out_balanced;
  2669. sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
  2670. /*
  2671. * If the busiest group is imbalanced the below checks don't
  2672. * work because they assumes all things are equal, which typically
  2673. * isn't true due to cpus_allowed constraints and the like.
  2674. */
  2675. if (sds.group_imb)
  2676. goto force_balance;
  2677. /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
  2678. if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
  2679. !sds.busiest_has_capacity)
  2680. goto force_balance;
  2681. /*
  2682. * If the local group is more busy than the selected busiest group
  2683. * don't try and pull any tasks.
  2684. */
  2685. if (sds.this_load >= sds.max_load)
  2686. goto out_balanced;
  2687. /*
  2688. * Don't pull any tasks if this group is already above the domain
  2689. * average load.
  2690. */
  2691. if (sds.this_load >= sds.avg_load)
  2692. goto out_balanced;
  2693. if (idle == CPU_IDLE) {
  2694. /*
  2695. * This cpu is idle. If the busiest group load doesn't
  2696. * have more tasks than the number of available cpu's and
  2697. * there is no imbalance between this and busiest group
  2698. * wrt to idle cpu's, it is balanced.
  2699. */
  2700. if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
  2701. sds.busiest_nr_running <= sds.busiest_group_weight)
  2702. goto out_balanced;
  2703. } else {
  2704. /*
  2705. * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
  2706. * imbalance_pct to be conservative.
  2707. */
  2708. if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
  2709. goto out_balanced;
  2710. }
  2711. force_balance:
  2712. /* Looks like there is an imbalance. Compute it */
  2713. calculate_imbalance(&sds, this_cpu, imbalance);
  2714. return sds.busiest;
  2715. out_balanced:
  2716. /*
  2717. * There is no obvious imbalance. But check if we can do some balancing
  2718. * to save power.
  2719. */
  2720. if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
  2721. return sds.busiest;
  2722. ret:
  2723. *imbalance = 0;
  2724. return NULL;
  2725. }
  2726. /*
  2727. * find_busiest_queue - find the busiest runqueue among the cpus in group.
  2728. */
  2729. static struct rq *
  2730. find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
  2731. enum cpu_idle_type idle, unsigned long imbalance,
  2732. const struct cpumask *cpus)
  2733. {
  2734. struct rq *busiest = NULL, *rq;
  2735. unsigned long max_load = 0;
  2736. int i;
  2737. for_each_cpu(i, sched_group_cpus(group)) {
  2738. unsigned long power = power_of(i);
  2739. unsigned long capacity = DIV_ROUND_CLOSEST(power,
  2740. SCHED_POWER_SCALE);
  2741. unsigned long wl;
  2742. if (!capacity)
  2743. capacity = fix_small_capacity(sd, group);
  2744. if (!cpumask_test_cpu(i, cpus))
  2745. continue;
  2746. rq = cpu_rq(i);
  2747. wl = weighted_cpuload(i);
  2748. /*
  2749. * When comparing with imbalance, use weighted_cpuload()
  2750. * which is not scaled with the cpu power.
  2751. */
  2752. if (capacity && rq->nr_running == 1 && wl > imbalance)
  2753. continue;
  2754. /*
  2755. * For the load comparisons with the other cpu's, consider
  2756. * the weighted_cpuload() scaled with the cpu power, so that
  2757. * the load can be moved away from the cpu that is potentially
  2758. * running at a lower capacity.
  2759. */
  2760. wl = (wl * SCHED_POWER_SCALE) / power;
  2761. if (wl > max_load) {
  2762. max_load = wl;
  2763. busiest = rq;
  2764. }
  2765. }
  2766. return busiest;
  2767. }
  2768. /*
  2769. * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  2770. * so long as it is large enough.
  2771. */
  2772. #define MAX_PINNED_INTERVAL 512
  2773. /* Working cpumask for load_balance and load_balance_newidle. */
  2774. static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
  2775. static int need_active_balance(struct sched_domain *sd, int idle,
  2776. int busiest_cpu, int this_cpu)
  2777. {
  2778. if (idle == CPU_NEWLY_IDLE) {
  2779. /*
  2780. * ASYM_PACKING needs to force migrate tasks from busy but
  2781. * higher numbered CPUs in order to pack all tasks in the
  2782. * lowest numbered CPUs.
  2783. */
  2784. if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
  2785. return 1;
  2786. /*
  2787. * The only task running in a non-idle cpu can be moved to this
  2788. * cpu in an attempt to completely freeup the other CPU
  2789. * package.
  2790. *
  2791. * The package power saving logic comes from
  2792. * find_busiest_group(). If there are no imbalance, then
  2793. * f_b_g() will return NULL. However when sched_mc={1,2} then
  2794. * f_b_g() will select a group from which a running task may be
  2795. * pulled to this cpu in order to make the other package idle.
  2796. * If there is no opportunity to make a package idle and if
  2797. * there are no imbalance, then f_b_g() will return NULL and no
  2798. * action will be taken in load_balance_newidle().
  2799. *
  2800. * Under normal task pull operation due to imbalance, there
  2801. * will be more than one task in the source run queue and
  2802. * move_tasks() will succeed. ld_moved will be true and this
  2803. * active balance code will not be triggered.
  2804. */
  2805. if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
  2806. return 0;
  2807. }
  2808. return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
  2809. }
  2810. static int active_load_balance_cpu_stop(void *data);
  2811. /*
  2812. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  2813. * tasks if there is an imbalance.
  2814. */
  2815. static int load_balance(int this_cpu, struct rq *this_rq,
  2816. struct sched_domain *sd, enum cpu_idle_type idle,
  2817. int *balance)
  2818. {
  2819. int ld_moved, all_pinned = 0, active_balance = 0;
  2820. struct sched_group *group;
  2821. unsigned long imbalance;
  2822. struct rq *busiest;
  2823. unsigned long flags;
  2824. struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
  2825. cpumask_copy(cpus, cpu_active_mask);
  2826. schedstat_inc(sd, lb_count[idle]);
  2827. redo:
  2828. group = find_busiest_group(sd, this_cpu, &imbalance, idle,
  2829. cpus, balance);
  2830. if (*balance == 0)
  2831. goto out_balanced;
  2832. if (!group) {
  2833. schedstat_inc(sd, lb_nobusyg[idle]);
  2834. goto out_balanced;
  2835. }
  2836. busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
  2837. if (!busiest) {
  2838. schedstat_inc(sd, lb_nobusyq[idle]);
  2839. goto out_balanced;
  2840. }
  2841. BUG_ON(busiest == this_rq);
  2842. schedstat_add(sd, lb_imbalance[idle], imbalance);
  2843. ld_moved = 0;
  2844. if (busiest->nr_running > 1) {
  2845. /*
  2846. * Attempt to move tasks. If find_busiest_group has found
  2847. * an imbalance but busiest->nr_running <= 1, the group is
  2848. * still unbalanced. ld_moved simply stays zero, so it is
  2849. * correctly treated as an imbalance.
  2850. */
  2851. all_pinned = 1;
  2852. local_irq_save(flags);
  2853. double_rq_lock(this_rq, busiest);
  2854. ld_moved = move_tasks(this_rq, this_cpu, busiest,
  2855. imbalance, sd, idle, &all_pinned);
  2856. double_rq_unlock(this_rq, busiest);
  2857. local_irq_restore(flags);
  2858. /*
  2859. * some other cpu did the load balance for us.
  2860. */
  2861. if (ld_moved && this_cpu != smp_processor_id())
  2862. resched_cpu(this_cpu);
  2863. /* All tasks on this runqueue were pinned by CPU affinity */
  2864. if (unlikely(all_pinned)) {
  2865. cpumask_clear_cpu(cpu_of(busiest), cpus);
  2866. if (!cpumask_empty(cpus))
  2867. goto redo;
  2868. goto out_balanced;
  2869. }
  2870. }
  2871. if (!ld_moved) {
  2872. schedstat_inc(sd, lb_failed[idle]);
  2873. /*
  2874. * Increment the failure counter only on periodic balance.
  2875. * We do not want newidle balance, which can be very
  2876. * frequent, pollute the failure counter causing
  2877. * excessive cache_hot migrations and active balances.
  2878. */
  2879. if (idle != CPU_NEWLY_IDLE)
  2880. sd->nr_balance_failed++;
  2881. if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
  2882. raw_spin_lock_irqsave(&busiest->lock, flags);
  2883. /* don't kick the active_load_balance_cpu_stop,
  2884. * if the curr task on busiest cpu can't be
  2885. * moved to this_cpu
  2886. */
  2887. if (!cpumask_test_cpu(this_cpu,
  2888. &busiest->curr->cpus_allowed)) {
  2889. raw_spin_unlock_irqrestore(&busiest->lock,
  2890. flags);
  2891. all_pinned = 1;
  2892. goto out_one_pinned;
  2893. }
  2894. /*
  2895. * ->active_balance synchronizes accesses to
  2896. * ->active_balance_work. Once set, it's cleared
  2897. * only after active load balance is finished.
  2898. */
  2899. if (!busiest->active_balance) {
  2900. busiest->active_balance = 1;
  2901. busiest->push_cpu = this_cpu;
  2902. active_balance = 1;
  2903. }
  2904. raw_spin_unlock_irqrestore(&busiest->lock, flags);
  2905. if (active_balance)
  2906. stop_one_cpu_nowait(cpu_of(busiest),
  2907. active_load_balance_cpu_stop, busiest,
  2908. &busiest->active_balance_work);
  2909. /*
  2910. * We've kicked active balancing, reset the failure
  2911. * counter.
  2912. */
  2913. sd->nr_balance_failed = sd->cache_nice_tries+1;
  2914. }
  2915. } else
  2916. sd->nr_balance_failed = 0;
  2917. if (likely(!active_balance)) {
  2918. /* We were unbalanced, so reset the balancing interval */
  2919. sd->balance_interval = sd->min_interval;
  2920. } else {
  2921. /*
  2922. * If we've begun active balancing, start to back off. This
  2923. * case may not be covered by the all_pinned logic if there
  2924. * is only 1 task on the busy runqueue (because we don't call
  2925. * move_tasks).
  2926. */
  2927. if (sd->balance_interval < sd->max_interval)
  2928. sd->balance_interval *= 2;
  2929. }
  2930. goto out;
  2931. out_balanced:
  2932. schedstat_inc(sd, lb_balanced[idle]);
  2933. sd->nr_balance_failed = 0;
  2934. out_one_pinned:
  2935. /* tune up the balancing interval */
  2936. if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
  2937. (sd->balance_interval < sd->max_interval))
  2938. sd->balance_interval *= 2;
  2939. ld_moved = 0;
  2940. out:
  2941. return ld_moved;
  2942. }
  2943. /*
  2944. * idle_balance is called by schedule() if this_cpu is about to become
  2945. * idle. Attempts to pull tasks from other CPUs.
  2946. */
  2947. static void idle_balance(int this_cpu, struct rq *this_rq)
  2948. {
  2949. struct sched_domain *sd;
  2950. int pulled_task = 0;
  2951. unsigned long next_balance = jiffies + HZ;
  2952. this_rq->idle_stamp = this_rq->clock;
  2953. if (this_rq->avg_idle < sysctl_sched_migration_cost)
  2954. return;
  2955. /*
  2956. * Drop the rq->lock, but keep IRQ/preempt disabled.
  2957. */
  2958. raw_spin_unlock(&this_rq->lock);
  2959. update_shares(this_cpu);
  2960. rcu_read_lock();
  2961. for_each_domain(this_cpu, sd) {
  2962. unsigned long interval;
  2963. int balance = 1;
  2964. if (!(sd->flags & SD_LOAD_BALANCE))
  2965. continue;
  2966. if (sd->flags & SD_BALANCE_NEWIDLE) {
  2967. /* If we've pulled tasks over stop searching: */
  2968. pulled_task = load_balance(this_cpu, this_rq,
  2969. sd, CPU_NEWLY_IDLE, &balance);
  2970. }
  2971. interval = msecs_to_jiffies(sd->balance_interval);
  2972. if (time_after(next_balance, sd->last_balance + interval))
  2973. next_balance = sd->last_balance + interval;
  2974. if (pulled_task) {
  2975. this_rq->idle_stamp = 0;
  2976. break;
  2977. }
  2978. }
  2979. rcu_read_unlock();
  2980. raw_spin_lock(&this_rq->lock);
  2981. if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
  2982. /*
  2983. * We are going idle. next_balance may be set based on
  2984. * a busy processor. So reset next_balance.
  2985. */
  2986. this_rq->next_balance = next_balance;
  2987. }
  2988. }
  2989. /*
  2990. * active_load_balance_cpu_stop is run by cpu stopper. It pushes
  2991. * running tasks off the busiest CPU onto idle CPUs. It requires at
  2992. * least 1 task to be running on each physical CPU where possible, and
  2993. * avoids physical / logical imbalances.
  2994. */
  2995. static int active_load_balance_cpu_stop(void *data)
  2996. {
  2997. struct rq *busiest_rq = data;
  2998. int busiest_cpu = cpu_of(busiest_rq);
  2999. int target_cpu = busiest_rq->push_cpu;
  3000. struct rq *target_rq = cpu_rq(target_cpu);
  3001. struct sched_domain *sd;
  3002. raw_spin_lock_irq(&busiest_rq->lock);
  3003. /* make sure the requested cpu hasn't gone down in the meantime */
  3004. if (unlikely(busiest_cpu != smp_processor_id() ||
  3005. !busiest_rq->active_balance))
  3006. goto out_unlock;
  3007. /* Is there any task to move? */
  3008. if (busiest_rq->nr_running <= 1)
  3009. goto out_unlock;
  3010. /*
  3011. * This condition is "impossible", if it occurs
  3012. * we need to fix it. Originally reported by
  3013. * Bjorn Helgaas on a 128-cpu setup.
  3014. */
  3015. BUG_ON(busiest_rq == target_rq);
  3016. /* move a task from busiest_rq to target_rq */
  3017. double_lock_balance(busiest_rq, target_rq);
  3018. /* Search for an sd spanning us and the target CPU. */
  3019. rcu_read_lock();
  3020. for_each_domain(target_cpu, sd) {
  3021. if ((sd->flags & SD_LOAD_BALANCE) &&
  3022. cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
  3023. break;
  3024. }
  3025. if (likely(sd)) {
  3026. schedstat_inc(sd, alb_count);
  3027. if (move_one_task(target_rq, target_cpu, busiest_rq,
  3028. sd, CPU_IDLE))
  3029. schedstat_inc(sd, alb_pushed);
  3030. else
  3031. schedstat_inc(sd, alb_failed);
  3032. }
  3033. rcu_read_unlock();
  3034. double_unlock_balance(busiest_rq, target_rq);
  3035. out_unlock:
  3036. busiest_rq->active_balance = 0;
  3037. raw_spin_unlock_irq(&busiest_rq->lock);
  3038. return 0;
  3039. }
  3040. #ifdef CONFIG_NO_HZ
  3041. static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
  3042. static void trigger_sched_softirq(void *data)
  3043. {
  3044. raise_softirq_irqoff(SCHED_SOFTIRQ);
  3045. }
  3046. static inline void init_sched_softirq_csd(struct call_single_data *csd)
  3047. {
  3048. csd->func = trigger_sched_softirq;
  3049. csd->info = NULL;
  3050. csd->flags = 0;
  3051. csd->priv = 0;
  3052. }
  3053. /*
  3054. * idle load balancing details
  3055. * - One of the idle CPUs nominates itself as idle load_balancer, while
  3056. * entering idle.
  3057. * - This idle load balancer CPU will also go into tickless mode when
  3058. * it is idle, just like all other idle CPUs
  3059. * - When one of the busy CPUs notice that there may be an idle rebalancing
  3060. * needed, they will kick the idle load balancer, which then does idle
  3061. * load balancing for all the idle CPUs.
  3062. */
  3063. static struct {
  3064. atomic_t load_balancer;
  3065. atomic_t first_pick_cpu;
  3066. atomic_t second_pick_cpu;
  3067. cpumask_var_t idle_cpus_mask;
  3068. cpumask_var_t grp_idle_mask;
  3069. unsigned long next_balance; /* in jiffy units */
  3070. } nohz ____cacheline_aligned;
  3071. int get_nohz_load_balancer(void)
  3072. {
  3073. return atomic_read(&nohz.load_balancer);
  3074. }
  3075. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  3076. /**
  3077. * lowest_flag_domain - Return lowest sched_domain containing flag.
  3078. * @cpu: The cpu whose lowest level of sched domain is to
  3079. * be returned.
  3080. * @flag: The flag to check for the lowest sched_domain
  3081. * for the given cpu.
  3082. *
  3083. * Returns the lowest sched_domain of a cpu which contains the given flag.
  3084. */
  3085. static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
  3086. {
  3087. struct sched_domain *sd;
  3088. for_each_domain(cpu, sd)
  3089. if (sd && (sd->flags & flag))
  3090. break;
  3091. return sd;
  3092. }
  3093. /**
  3094. * for_each_flag_domain - Iterates over sched_domains containing the flag.
  3095. * @cpu: The cpu whose domains we're iterating over.
  3096. * @sd: variable holding the value of the power_savings_sd
  3097. * for cpu.
  3098. * @flag: The flag to filter the sched_domains to be iterated.
  3099. *
  3100. * Iterates over all the scheduler domains for a given cpu that has the 'flag'
  3101. * set, starting from the lowest sched_domain to the highest.
  3102. */
  3103. #define for_each_flag_domain(cpu, sd, flag) \
  3104. for (sd = lowest_flag_domain(cpu, flag); \
  3105. (sd && (sd->flags & flag)); sd = sd->parent)
  3106. /**
  3107. * is_semi_idle_group - Checks if the given sched_group is semi-idle.
  3108. * @ilb_group: group to be checked for semi-idleness
  3109. *
  3110. * Returns: 1 if the group is semi-idle. 0 otherwise.
  3111. *
  3112. * We define a sched_group to be semi idle if it has atleast one idle-CPU
  3113. * and atleast one non-idle CPU. This helper function checks if the given
  3114. * sched_group is semi-idle or not.
  3115. */
  3116. static inline int is_semi_idle_group(struct sched_group *ilb_group)
  3117. {
  3118. cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
  3119. sched_group_cpus(ilb_group));
  3120. /*
  3121. * A sched_group is semi-idle when it has atleast one busy cpu
  3122. * and atleast one idle cpu.
  3123. */
  3124. if (cpumask_empty(nohz.grp_idle_mask))
  3125. return 0;
  3126. if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
  3127. return 0;
  3128. return 1;
  3129. }
  3130. /**
  3131. * find_new_ilb - Finds the optimum idle load balancer for nomination.
  3132. * @cpu: The cpu which is nominating a new idle_load_balancer.
  3133. *
  3134. * Returns: Returns the id of the idle load balancer if it exists,
  3135. * Else, returns >= nr_cpu_ids.
  3136. *
  3137. * This algorithm picks the idle load balancer such that it belongs to a
  3138. * semi-idle powersavings sched_domain. The idea is to try and avoid
  3139. * completely idle packages/cores just for the purpose of idle load balancing
  3140. * when there are other idle cpu's which are better suited for that job.
  3141. */
  3142. static int find_new_ilb(int cpu)
  3143. {
  3144. struct sched_domain *sd;
  3145. struct sched_group *ilb_group;
  3146. int ilb = nr_cpu_ids;
  3147. /*
  3148. * Have idle load balancer selection from semi-idle packages only
  3149. * when power-aware load balancing is enabled
  3150. */
  3151. if (!(sched_smt_power_savings || sched_mc_power_savings))
  3152. goto out_done;
  3153. /*
  3154. * Optimize for the case when we have no idle CPUs or only one
  3155. * idle CPU. Don't walk the sched_domain hierarchy in such cases
  3156. */
  3157. if (cpumask_weight(nohz.idle_cpus_mask) < 2)
  3158. goto out_done;
  3159. rcu_read_lock();
  3160. for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
  3161. ilb_group = sd->groups;
  3162. do {
  3163. if (is_semi_idle_group(ilb_group)) {
  3164. ilb = cpumask_first(nohz.grp_idle_mask);
  3165. goto unlock;
  3166. }
  3167. ilb_group = ilb_group->next;
  3168. } while (ilb_group != sd->groups);
  3169. }
  3170. unlock:
  3171. rcu_read_unlock();
  3172. out_done:
  3173. return ilb;
  3174. }
  3175. #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
  3176. static inline int find_new_ilb(int call_cpu)
  3177. {
  3178. return nr_cpu_ids;
  3179. }
  3180. #endif
  3181. /*
  3182. * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
  3183. * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
  3184. * CPU (if there is one).
  3185. */
  3186. static void nohz_balancer_kick(int cpu)
  3187. {
  3188. int ilb_cpu;
  3189. nohz.next_balance++;
  3190. ilb_cpu = get_nohz_load_balancer();
  3191. if (ilb_cpu >= nr_cpu_ids) {
  3192. ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
  3193. if (ilb_cpu >= nr_cpu_ids)
  3194. return;
  3195. }
  3196. if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
  3197. struct call_single_data *cp;
  3198. cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
  3199. cp = &per_cpu(remote_sched_softirq_cb, cpu);
  3200. __smp_call_function_single(ilb_cpu, cp, 0);
  3201. }
  3202. return;
  3203. }
  3204. /*
  3205. * This routine will try to nominate the ilb (idle load balancing)
  3206. * owner among the cpus whose ticks are stopped. ilb owner will do the idle
  3207. * load balancing on behalf of all those cpus.
  3208. *
  3209. * When the ilb owner becomes busy, we will not have new ilb owner until some
  3210. * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
  3211. * idle load balancing by kicking one of the idle CPUs.
  3212. *
  3213. * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
  3214. * ilb owner CPU in future (when there is a need for idle load balancing on
  3215. * behalf of all idle CPUs).
  3216. */
  3217. void select_nohz_load_balancer(int stop_tick)
  3218. {
  3219. int cpu = smp_processor_id();
  3220. if (stop_tick) {
  3221. if (!cpu_active(cpu)) {
  3222. if (atomic_read(&nohz.load_balancer) != cpu)
  3223. return;
  3224. /*
  3225. * If we are going offline and still the leader,
  3226. * give up!
  3227. */
  3228. if (atomic_cmpxchg(&nohz.load_balancer, cpu,
  3229. nr_cpu_ids) != cpu)
  3230. BUG();
  3231. return;
  3232. }
  3233. cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
  3234. if (atomic_read(&nohz.first_pick_cpu) == cpu)
  3235. atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
  3236. if (atomic_read(&nohz.second_pick_cpu) == cpu)
  3237. atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
  3238. if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
  3239. int new_ilb;
  3240. /* make me the ilb owner */
  3241. if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
  3242. cpu) != nr_cpu_ids)
  3243. return;
  3244. /*
  3245. * Check to see if there is a more power-efficient
  3246. * ilb.
  3247. */
  3248. new_ilb = find_new_ilb(cpu);
  3249. if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
  3250. atomic_set(&nohz.load_balancer, nr_cpu_ids);
  3251. resched_cpu(new_ilb);
  3252. return;
  3253. }
  3254. return;
  3255. }
  3256. } else {
  3257. if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
  3258. return;
  3259. cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
  3260. if (atomic_read(&nohz.load_balancer) == cpu)
  3261. if (atomic_cmpxchg(&nohz.load_balancer, cpu,
  3262. nr_cpu_ids) != cpu)
  3263. BUG();
  3264. }
  3265. return;
  3266. }
  3267. #endif
  3268. static DEFINE_SPINLOCK(balancing);
  3269. static unsigned long __read_mostly max_load_balance_interval = HZ/10;
  3270. /*
  3271. * Scale the max load_balance interval with the number of CPUs in the system.
  3272. * This trades load-balance latency on larger machines for less cross talk.
  3273. */
  3274. static void update_max_interval(void)
  3275. {
  3276. max_load_balance_interval = HZ*num_online_cpus()/10;
  3277. }
  3278. /*
  3279. * It checks each scheduling domain to see if it is due to be balanced,
  3280. * and initiates a balancing operation if so.
  3281. *
  3282. * Balancing parameters are set up in arch_init_sched_domains.
  3283. */
  3284. static void rebalance_domains(int cpu, enum cpu_idle_type idle)
  3285. {
  3286. int balance = 1;
  3287. struct rq *rq = cpu_rq(cpu);
  3288. unsigned long interval;
  3289. struct sched_domain *sd;
  3290. /* Earliest time when we have to do rebalance again */
  3291. unsigned long next_balance = jiffies + 60*HZ;
  3292. int update_next_balance = 0;
  3293. int need_serialize;
  3294. update_shares(cpu);
  3295. rcu_read_lock();
  3296. for_each_domain(cpu, sd) {
  3297. if (!(sd->flags & SD_LOAD_BALANCE))
  3298. continue;
  3299. interval = sd->balance_interval;
  3300. if (idle != CPU_IDLE)
  3301. interval *= sd->busy_factor;
  3302. /* scale ms to jiffies */
  3303. interval = msecs_to_jiffies(interval);
  3304. interval = clamp(interval, 1UL, max_load_balance_interval);
  3305. need_serialize = sd->flags & SD_SERIALIZE;
  3306. if (need_serialize) {
  3307. if (!spin_trylock(&balancing))
  3308. goto out;
  3309. }
  3310. if (time_after_eq(jiffies, sd->last_balance + interval)) {
  3311. if (load_balance(cpu, rq, sd, idle, &balance)) {
  3312. /*
  3313. * We've pulled tasks over so either we're no
  3314. * longer idle.
  3315. */
  3316. idle = CPU_NOT_IDLE;
  3317. }
  3318. sd->last_balance = jiffies;
  3319. }
  3320. if (need_serialize)
  3321. spin_unlock(&balancing);
  3322. out:
  3323. if (time_after(next_balance, sd->last_balance + interval)) {
  3324. next_balance = sd->last_balance + interval;
  3325. update_next_balance = 1;
  3326. }
  3327. /*
  3328. * Stop the load balance at this level. There is another
  3329. * CPU in our sched group which is doing load balancing more
  3330. * actively.
  3331. */
  3332. if (!balance)
  3333. break;
  3334. }
  3335. rcu_read_unlock();
  3336. /*
  3337. * next_balance will be updated only when there is a need.
  3338. * When the cpu is attached to null domain for ex, it will not be
  3339. * updated.
  3340. */
  3341. if (likely(update_next_balance))
  3342. rq->next_balance = next_balance;
  3343. }
  3344. #ifdef CONFIG_NO_HZ
  3345. /*
  3346. * In CONFIG_NO_HZ case, the idle balance kickee will do the
  3347. * rebalancing for all the cpus for whom scheduler ticks are stopped.
  3348. */
  3349. static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
  3350. {
  3351. struct rq *this_rq = cpu_rq(this_cpu);
  3352. struct rq *rq;
  3353. int balance_cpu;
  3354. if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
  3355. return;
  3356. for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
  3357. if (balance_cpu == this_cpu)
  3358. continue;
  3359. /*
  3360. * If this cpu gets work to do, stop the load balancing
  3361. * work being done for other cpus. Next load
  3362. * balancing owner will pick it up.
  3363. */
  3364. if (need_resched()) {
  3365. this_rq->nohz_balance_kick = 0;
  3366. break;
  3367. }
  3368. raw_spin_lock_irq(&this_rq->lock);
  3369. update_rq_clock(this_rq);
  3370. update_cpu_load(this_rq);
  3371. raw_spin_unlock_irq(&this_rq->lock);
  3372. rebalance_domains(balance_cpu, CPU_IDLE);
  3373. rq = cpu_rq(balance_cpu);
  3374. if (time_after(this_rq->next_balance, rq->next_balance))
  3375. this_rq->next_balance = rq->next_balance;
  3376. }
  3377. nohz.next_balance = this_rq->next_balance;
  3378. this_rq->nohz_balance_kick = 0;
  3379. }
  3380. /*
  3381. * Current heuristic for kicking the idle load balancer
  3382. * - first_pick_cpu is the one of the busy CPUs. It will kick
  3383. * idle load balancer when it has more than one process active. This
  3384. * eliminates the need for idle load balancing altogether when we have
  3385. * only one running process in the system (common case).
  3386. * - If there are more than one busy CPU, idle load balancer may have
  3387. * to run for active_load_balance to happen (i.e., two busy CPUs are
  3388. * SMT or core siblings and can run better if they move to different
  3389. * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
  3390. * which will kick idle load balancer as soon as it has any load.
  3391. */
  3392. static inline int nohz_kick_needed(struct rq *rq, int cpu)
  3393. {
  3394. unsigned long now = jiffies;
  3395. int ret;
  3396. int first_pick_cpu, second_pick_cpu;
  3397. if (time_before(now, nohz.next_balance))
  3398. return 0;
  3399. if (rq->idle_at_tick)
  3400. return 0;
  3401. first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
  3402. second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
  3403. if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
  3404. second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
  3405. return 0;
  3406. ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
  3407. if (ret == nr_cpu_ids || ret == cpu) {
  3408. atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
  3409. if (rq->nr_running > 1)
  3410. return 1;
  3411. } else {
  3412. ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
  3413. if (ret == nr_cpu_ids || ret == cpu) {
  3414. if (rq->nr_running)
  3415. return 1;
  3416. }
  3417. }
  3418. return 0;
  3419. }
  3420. #else
  3421. static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
  3422. #endif
  3423. /*
  3424. * run_rebalance_domains is triggered when needed from the scheduler tick.
  3425. * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
  3426. */
  3427. static void run_rebalance_domains(struct softirq_action *h)
  3428. {
  3429. int this_cpu = smp_processor_id();
  3430. struct rq *this_rq = cpu_rq(this_cpu);
  3431. enum cpu_idle_type idle = this_rq->idle_at_tick ?
  3432. CPU_IDLE : CPU_NOT_IDLE;
  3433. rebalance_domains(this_cpu, idle);
  3434. /*
  3435. * If this cpu has a pending nohz_balance_kick, then do the
  3436. * balancing on behalf of the other idle cpus whose ticks are
  3437. * stopped.
  3438. */
  3439. nohz_idle_balance(this_cpu, idle);
  3440. }
  3441. static inline int on_null_domain(int cpu)
  3442. {
  3443. return !rcu_dereference_sched(cpu_rq(cpu)->sd);
  3444. }
  3445. /*
  3446. * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  3447. */
  3448. static inline void trigger_load_balance(struct rq *rq, int cpu)
  3449. {
  3450. /* Don't need to rebalance while attached to NULL domain */
  3451. if (time_after_eq(jiffies, rq->next_balance) &&
  3452. likely(!on_null_domain(cpu)))
  3453. raise_softirq(SCHED_SOFTIRQ);
  3454. #ifdef CONFIG_NO_HZ
  3455. else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
  3456. nohz_balancer_kick(cpu);
  3457. #endif
  3458. }
  3459. static void rq_online_fair(struct rq *rq)
  3460. {
  3461. update_sysctl();
  3462. }
  3463. static void rq_offline_fair(struct rq *rq)
  3464. {
  3465. update_sysctl();
  3466. }
  3467. #else /* CONFIG_SMP */
  3468. /*
  3469. * on UP we do not need to balance between CPUs:
  3470. */
  3471. static inline void idle_balance(int cpu, struct rq *rq)
  3472. {
  3473. }
  3474. #endif /* CONFIG_SMP */
  3475. /*
  3476. * scheduler tick hitting a task of our scheduling class:
  3477. */
  3478. static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
  3479. {
  3480. struct cfs_rq *cfs_rq;
  3481. struct sched_entity *se = &curr->se;
  3482. for_each_sched_entity(se) {
  3483. cfs_rq = cfs_rq_of(se);
  3484. entity_tick(cfs_rq, se, queued);
  3485. }
  3486. }
  3487. /*
  3488. * called on fork with the child task as argument from the parent's context
  3489. * - child not yet on the tasklist
  3490. * - preemption disabled
  3491. */
  3492. static void task_fork_fair(struct task_struct *p)
  3493. {
  3494. struct cfs_rq *cfs_rq = task_cfs_rq(current);
  3495. struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
  3496. int this_cpu = smp_processor_id();
  3497. struct rq *rq = this_rq();
  3498. unsigned long flags;
  3499. raw_spin_lock_irqsave(&rq->lock, flags);
  3500. update_rq_clock(rq);
  3501. if (unlikely(task_cpu(p) != this_cpu)) {
  3502. rcu_read_lock();
  3503. __set_task_cpu(p, this_cpu);
  3504. rcu_read_unlock();
  3505. }
  3506. update_curr(cfs_rq);
  3507. if (curr)
  3508. se->vruntime = curr->vruntime;
  3509. place_entity(cfs_rq, se, 1);
  3510. if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
  3511. /*
  3512. * Upon rescheduling, sched_class::put_prev_task() will place
  3513. * 'current' within the tree based on its new key value.
  3514. */
  3515. swap(curr->vruntime, se->vruntime);
  3516. resched_task(rq->curr);
  3517. }
  3518. se->vruntime -= cfs_rq->min_vruntime;
  3519. raw_spin_unlock_irqrestore(&rq->lock, flags);
  3520. }
  3521. /*
  3522. * Priority of the task has changed. Check to see if we preempt
  3523. * the current task.
  3524. */
  3525. static void
  3526. prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
  3527. {
  3528. if (!p->se.on_rq)
  3529. return;
  3530. /*
  3531. * Reschedule if we are currently running on this runqueue and
  3532. * our priority decreased, or if we are not currently running on
  3533. * this runqueue and our priority is higher than the current's
  3534. */
  3535. if (rq->curr == p) {
  3536. if (p->prio > oldprio)
  3537. resched_task(rq->curr);
  3538. } else
  3539. check_preempt_curr(rq, p, 0);
  3540. }
  3541. static void switched_from_fair(struct rq *rq, struct task_struct *p)
  3542. {
  3543. struct sched_entity *se = &p->se;
  3544. struct cfs_rq *cfs_rq = cfs_rq_of(se);
  3545. /*
  3546. * Ensure the task's vruntime is normalized, so that when its
  3547. * switched back to the fair class the enqueue_entity(.flags=0) will
  3548. * do the right thing.
  3549. *
  3550. * If it was on_rq, then the dequeue_entity(.flags=0) will already
  3551. * have normalized the vruntime, if it was !on_rq, then only when
  3552. * the task is sleeping will it still have non-normalized vruntime.
  3553. */
  3554. if (!se->on_rq && p->state != TASK_RUNNING) {
  3555. /*
  3556. * Fix up our vruntime so that the current sleep doesn't
  3557. * cause 'unlimited' sleep bonus.
  3558. */
  3559. place_entity(cfs_rq, se, 0);
  3560. se->vruntime -= cfs_rq->min_vruntime;
  3561. }
  3562. }
  3563. /*
  3564. * We switched to the sched_fair class.
  3565. */
  3566. static void switched_to_fair(struct rq *rq, struct task_struct *p)
  3567. {
  3568. if (!p->se.on_rq)
  3569. return;
  3570. /*
  3571. * We were most likely switched from sched_rt, so
  3572. * kick off the schedule if running, otherwise just see
  3573. * if we can still preempt the current task.
  3574. */
  3575. if (rq->curr == p)
  3576. resched_task(rq->curr);
  3577. else
  3578. check_preempt_curr(rq, p, 0);
  3579. }
  3580. /* Account for a task changing its policy or group.
  3581. *
  3582. * This routine is mostly called to set cfs_rq->curr field when a task
  3583. * migrates between groups/classes.
  3584. */
  3585. static void set_curr_task_fair(struct rq *rq)
  3586. {
  3587. struct sched_entity *se = &rq->curr->se;
  3588. for_each_sched_entity(se)
  3589. set_next_entity(cfs_rq_of(se), se);
  3590. }
  3591. #ifdef CONFIG_FAIR_GROUP_SCHED
  3592. static void task_move_group_fair(struct task_struct *p, int on_rq)
  3593. {
  3594. /*
  3595. * If the task was not on the rq at the time of this cgroup movement
  3596. * it must have been asleep, sleeping tasks keep their ->vruntime
  3597. * absolute on their old rq until wakeup (needed for the fair sleeper
  3598. * bonus in place_entity()).
  3599. *
  3600. * If it was on the rq, we've just 'preempted' it, which does convert
  3601. * ->vruntime to a relative base.
  3602. *
  3603. * Make sure both cases convert their relative position when migrating
  3604. * to another cgroup's rq. This does somewhat interfere with the
  3605. * fair sleeper stuff for the first placement, but who cares.
  3606. */
  3607. if (!on_rq)
  3608. p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
  3609. set_task_rq(p, task_cpu(p));
  3610. if (!on_rq)
  3611. p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
  3612. }
  3613. #endif
  3614. static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
  3615. {
  3616. struct sched_entity *se = &task->se;
  3617. unsigned int rr_interval = 0;
  3618. /*
  3619. * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
  3620. * idle runqueue:
  3621. */
  3622. if (rq->cfs.load.weight)
  3623. rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
  3624. return rr_interval;
  3625. }
  3626. /*
  3627. * All the scheduling class methods:
  3628. */
  3629. static const struct sched_class fair_sched_class = {
  3630. .next = &idle_sched_class,
  3631. .enqueue_task = enqueue_task_fair,
  3632. .dequeue_task = dequeue_task_fair,
  3633. .yield_task = yield_task_fair,
  3634. .yield_to_task = yield_to_task_fair,
  3635. .check_preempt_curr = check_preempt_wakeup,
  3636. .pick_next_task = pick_next_task_fair,
  3637. .put_prev_task = put_prev_task_fair,
  3638. #ifdef CONFIG_SMP
  3639. .select_task_rq = select_task_rq_fair,
  3640. .rq_online = rq_online_fair,
  3641. .rq_offline = rq_offline_fair,
  3642. .task_waking = task_waking_fair,
  3643. #endif
  3644. .set_curr_task = set_curr_task_fair,
  3645. .task_tick = task_tick_fair,
  3646. .task_fork = task_fork_fair,
  3647. .prio_changed = prio_changed_fair,
  3648. .switched_from = switched_from_fair,
  3649. .switched_to = switched_to_fair,
  3650. .get_rr_interval = get_rr_interval_fair,
  3651. #ifdef CONFIG_FAIR_GROUP_SCHED
  3652. .task_move_group = task_move_group_fair,
  3653. #endif
  3654. };
  3655. #ifdef CONFIG_SCHED_DEBUG
  3656. static void print_cfs_stats(struct seq_file *m, int cpu)
  3657. {
  3658. struct cfs_rq *cfs_rq;
  3659. rcu_read_lock();
  3660. for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
  3661. print_cfs_rq(m, cpu, cfs_rq);
  3662. rcu_read_unlock();
  3663. }
  3664. #endif