sched.c 176 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047
  1. /*
  2. * kernel/sched.c
  3. *
  4. * Kernel scheduler and related syscalls
  5. *
  6. * Copyright (C) 1991-2002 Linus Torvalds
  7. *
  8. * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
  9. * make semaphores SMP safe
  10. * 1998-11-19 Implemented schedule_timeout() and related stuff
  11. * by Andrea Arcangeli
  12. * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
  13. * hybrid priority-list and round-robin design with
  14. * an array-switch method of distributing timeslices
  15. * and per-CPU runqueues. Cleanups and useful suggestions
  16. * by Davide Libenzi, preemptible kernel bits by Robert Love.
  17. * 2003-09-03 Interactivity tuning by Con Kolivas.
  18. * 2004-04-02 Scheduler domains code by Nick Piggin
  19. */
  20. #include <linux/mm.h>
  21. #include <linux/module.h>
  22. #include <linux/nmi.h>
  23. #include <linux/init.h>
  24. #include <asm/uaccess.h>
  25. #include <linux/highmem.h>
  26. #include <linux/smp_lock.h>
  27. #include <asm/mmu_context.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/capability.h>
  30. #include <linux/completion.h>
  31. #include <linux/kernel_stat.h>
  32. #include <linux/debug_locks.h>
  33. #include <linux/security.h>
  34. #include <linux/notifier.h>
  35. #include <linux/profile.h>
  36. #include <linux/freezer.h>
  37. #include <linux/vmalloc.h>
  38. #include <linux/blkdev.h>
  39. #include <linux/delay.h>
  40. #include <linux/smp.h>
  41. #include <linux/threads.h>
  42. #include <linux/timer.h>
  43. #include <linux/rcupdate.h>
  44. #include <linux/cpu.h>
  45. #include <linux/cpuset.h>
  46. #include <linux/percpu.h>
  47. #include <linux/kthread.h>
  48. #include <linux/seq_file.h>
  49. #include <linux/syscalls.h>
  50. #include <linux/times.h>
  51. #include <linux/tsacct_kern.h>
  52. #include <linux/kprobes.h>
  53. #include <linux/delayacct.h>
  54. #include <asm/tlb.h>
  55. #include <asm/unistd.h>
  56. /*
  57. * Convert user-nice values [ -20 ... 0 ... 19 ]
  58. * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  59. * and back.
  60. */
  61. #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
  62. #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
  63. #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
  64. /*
  65. * 'User priority' is the nice value converted to something we
  66. * can work with better when scaling various scheduler parameters,
  67. * it's a [ 0 ... 39 ] range.
  68. */
  69. #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
  70. #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
  71. #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
  72. /*
  73. * Some helpers for converting nanosecond timing to jiffy resolution
  74. */
  75. #define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
  76. #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
  77. /*
  78. * These are the 'tuning knobs' of the scheduler:
  79. *
  80. * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
  81. * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  82. * Timeslices get refilled after they expire.
  83. */
  84. #define MIN_TIMESLICE max(5 * HZ / 1000, 1)
  85. #define DEF_TIMESLICE (100 * HZ / 1000)
  86. #define ON_RUNQUEUE_WEIGHT 30
  87. #define CHILD_PENALTY 95
  88. #define PARENT_PENALTY 100
  89. #define EXIT_WEIGHT 3
  90. #define PRIO_BONUS_RATIO 25
  91. #define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
  92. #define INTERACTIVE_DELTA 2
  93. #define MAX_SLEEP_AVG (DEF_TIMESLICE * MAX_BONUS)
  94. #define STARVATION_LIMIT (MAX_SLEEP_AVG)
  95. #define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG))
  96. /*
  97. * If a task is 'interactive' then we reinsert it in the active
  98. * array after it has expired its current timeslice. (it will not
  99. * continue to run immediately, it will still roundrobin with
  100. * other interactive tasks.)
  101. *
  102. * This part scales the interactivity limit depending on niceness.
  103. *
  104. * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
  105. * Here are a few examples of different nice levels:
  106. *
  107. * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
  108. * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
  109. * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
  110. * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
  111. * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
  112. *
  113. * (the X axis represents the possible -5 ... 0 ... +5 dynamic
  114. * priority range a task can explore, a value of '1' means the
  115. * task is rated interactive.)
  116. *
  117. * Ie. nice +19 tasks can never get 'interactive' enough to be
  118. * reinserted into the active array. And only heavily CPU-hog nice -20
  119. * tasks will be expired. Default nice 0 tasks are somewhere between,
  120. * it takes some effort for them to get interactive, but it's not
  121. * too hard.
  122. */
  123. #define CURRENT_BONUS(p) \
  124. (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
  125. MAX_SLEEP_AVG)
  126. #define GRANULARITY (10 * HZ / 1000 ? : 1)
  127. #ifdef CONFIG_SMP
  128. #define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
  129. (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
  130. num_online_cpus())
  131. #else
  132. #define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
  133. (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
  134. #endif
  135. #define SCALE(v1,v1_max,v2_max) \
  136. (v1) * (v2_max) / (v1_max)
  137. #define DELTA(p) \
  138. (SCALE(TASK_NICE(p) + 20, 40, MAX_BONUS) - 20 * MAX_BONUS / 40 + \
  139. INTERACTIVE_DELTA)
  140. #define TASK_INTERACTIVE(p) \
  141. ((p)->prio <= (p)->static_prio - DELTA(p))
  142. #define INTERACTIVE_SLEEP(p) \
  143. (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
  144. (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
  145. #define TASK_PREEMPTS_CURR(p, rq) \
  146. ((p)->prio < (rq)->curr->prio)
  147. #define SCALE_PRIO(x, prio) \
  148. max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
  149. static unsigned int static_prio_timeslice(int static_prio)
  150. {
  151. if (static_prio < NICE_TO_PRIO(0))
  152. return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
  153. else
  154. return SCALE_PRIO(DEF_TIMESLICE, static_prio);
  155. }
  156. /*
  157. * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
  158. * to time slice values: [800ms ... 100ms ... 5ms]
  159. *
  160. * The higher a thread's priority, the bigger timeslices
  161. * it gets during one round of execution. But even the lowest
  162. * priority thread gets MIN_TIMESLICE worth of execution time.
  163. */
  164. static inline unsigned int task_timeslice(struct task_struct *p)
  165. {
  166. return static_prio_timeslice(p->static_prio);
  167. }
  168. /*
  169. * These are the runqueue data structures:
  170. */
  171. struct prio_array {
  172. unsigned int nr_active;
  173. DECLARE_BITMAP(bitmap, MAX_PRIO+1); /* include 1 bit for delimiter */
  174. struct list_head queue[MAX_PRIO];
  175. };
  176. /*
  177. * This is the main, per-CPU runqueue data structure.
  178. *
  179. * Locking rule: those places that want to lock multiple runqueues
  180. * (such as the load balancing or the thread migration code), lock
  181. * acquire operations must be ordered by ascending &runqueue.
  182. */
  183. struct rq {
  184. spinlock_t lock;
  185. /*
  186. * nr_running and cpu_load should be in the same cacheline because
  187. * remote CPUs use both these fields when doing load calculation.
  188. */
  189. unsigned long nr_running;
  190. unsigned long raw_weighted_load;
  191. #ifdef CONFIG_SMP
  192. unsigned long cpu_load[3];
  193. #endif
  194. unsigned long long nr_switches;
  195. /*
  196. * This is part of a global counter where only the total sum
  197. * over all CPUs matters. A task can increase this counter on
  198. * one CPU and if it got migrated afterwards it may decrease
  199. * it on another CPU. Always updated under the runqueue lock:
  200. */
  201. unsigned long nr_uninterruptible;
  202. unsigned long expired_timestamp;
  203. /* Cached timestamp set by update_cpu_clock() */
  204. unsigned long long most_recent_timestamp;
  205. struct task_struct *curr, *idle;
  206. unsigned long next_balance;
  207. struct mm_struct *prev_mm;
  208. struct prio_array *active, *expired, arrays[2];
  209. int best_expired_prio;
  210. atomic_t nr_iowait;
  211. #ifdef CONFIG_SMP
  212. struct sched_domain *sd;
  213. /* For active balancing */
  214. int active_balance;
  215. int push_cpu;
  216. int cpu; /* cpu of this runqueue */
  217. struct task_struct *migration_thread;
  218. struct list_head migration_queue;
  219. #endif
  220. #ifdef CONFIG_SCHEDSTATS
  221. /* latency stats */
  222. struct sched_info rq_sched_info;
  223. /* sys_sched_yield() stats */
  224. unsigned long yld_exp_empty;
  225. unsigned long yld_act_empty;
  226. unsigned long yld_both_empty;
  227. unsigned long yld_cnt;
  228. /* schedule() stats */
  229. unsigned long sched_switch;
  230. unsigned long sched_cnt;
  231. unsigned long sched_goidle;
  232. /* try_to_wake_up() stats */
  233. unsigned long ttwu_cnt;
  234. unsigned long ttwu_local;
  235. #endif
  236. struct lock_class_key rq_lock_key;
  237. };
  238. static DEFINE_PER_CPU(struct rq, runqueues);
  239. static inline int cpu_of(struct rq *rq)
  240. {
  241. #ifdef CONFIG_SMP
  242. return rq->cpu;
  243. #else
  244. return 0;
  245. #endif
  246. }
  247. /*
  248. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  249. * See detach_destroy_domains: synchronize_sched for details.
  250. *
  251. * The domain tree of any CPU may only be accessed from within
  252. * preempt-disabled sections.
  253. */
  254. #define for_each_domain(cpu, __sd) \
  255. for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
  256. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  257. #define this_rq() (&__get_cpu_var(runqueues))
  258. #define task_rq(p) cpu_rq(task_cpu(p))
  259. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  260. #ifndef prepare_arch_switch
  261. # define prepare_arch_switch(next) do { } while (0)
  262. #endif
  263. #ifndef finish_arch_switch
  264. # define finish_arch_switch(prev) do { } while (0)
  265. #endif
  266. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  267. static inline int task_running(struct rq *rq, struct task_struct *p)
  268. {
  269. return rq->curr == p;
  270. }
  271. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  272. {
  273. }
  274. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  275. {
  276. #ifdef CONFIG_DEBUG_SPINLOCK
  277. /* this is a valid case when another task releases the spinlock */
  278. rq->lock.owner = current;
  279. #endif
  280. /*
  281. * If we are tracking spinlock dependencies then we have to
  282. * fix up the runqueue lock - which gets 'carried over' from
  283. * prev into current:
  284. */
  285. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  286. spin_unlock_irq(&rq->lock);
  287. }
  288. #else /* __ARCH_WANT_UNLOCKED_CTXSW */
  289. static inline int task_running(struct rq *rq, struct task_struct *p)
  290. {
  291. #ifdef CONFIG_SMP
  292. return p->oncpu;
  293. #else
  294. return rq->curr == p;
  295. #endif
  296. }
  297. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  298. {
  299. #ifdef CONFIG_SMP
  300. /*
  301. * We can optimise this out completely for !SMP, because the
  302. * SMP rebalancing from interrupt is the only thing that cares
  303. * here.
  304. */
  305. next->oncpu = 1;
  306. #endif
  307. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  308. spin_unlock_irq(&rq->lock);
  309. #else
  310. spin_unlock(&rq->lock);
  311. #endif
  312. }
  313. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  314. {
  315. #ifdef CONFIG_SMP
  316. /*
  317. * After ->oncpu is cleared, the task can be moved to a different CPU.
  318. * We must ensure this doesn't happen until the switch is completely
  319. * finished.
  320. */
  321. smp_wmb();
  322. prev->oncpu = 0;
  323. #endif
  324. #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  325. local_irq_enable();
  326. #endif
  327. }
  328. #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
  329. /*
  330. * __task_rq_lock - lock the runqueue a given task resides on.
  331. * Must be called interrupts disabled.
  332. */
  333. static inline struct rq *__task_rq_lock(struct task_struct *p)
  334. __acquires(rq->lock)
  335. {
  336. struct rq *rq;
  337. repeat_lock_task:
  338. rq = task_rq(p);
  339. spin_lock(&rq->lock);
  340. if (unlikely(rq != task_rq(p))) {
  341. spin_unlock(&rq->lock);
  342. goto repeat_lock_task;
  343. }
  344. return rq;
  345. }
  346. /*
  347. * task_rq_lock - lock the runqueue a given task resides on and disable
  348. * interrupts. Note the ordering: we can safely lookup the task_rq without
  349. * explicitly disabling preemption.
  350. */
  351. static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  352. __acquires(rq->lock)
  353. {
  354. struct rq *rq;
  355. repeat_lock_task:
  356. local_irq_save(*flags);
  357. rq = task_rq(p);
  358. spin_lock(&rq->lock);
  359. if (unlikely(rq != task_rq(p))) {
  360. spin_unlock_irqrestore(&rq->lock, *flags);
  361. goto repeat_lock_task;
  362. }
  363. return rq;
  364. }
  365. static inline void __task_rq_unlock(struct rq *rq)
  366. __releases(rq->lock)
  367. {
  368. spin_unlock(&rq->lock);
  369. }
  370. static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
  371. __releases(rq->lock)
  372. {
  373. spin_unlock_irqrestore(&rq->lock, *flags);
  374. }
  375. #ifdef CONFIG_SCHEDSTATS
  376. /*
  377. * bump this up when changing the output format or the meaning of an existing
  378. * format, so that tools can adapt (or abort)
  379. */
  380. #define SCHEDSTAT_VERSION 14
  381. static int show_schedstat(struct seq_file *seq, void *v)
  382. {
  383. int cpu;
  384. seq_printf(seq, "version %d\n", SCHEDSTAT_VERSION);
  385. seq_printf(seq, "timestamp %lu\n", jiffies);
  386. for_each_online_cpu(cpu) {
  387. struct rq *rq = cpu_rq(cpu);
  388. #ifdef CONFIG_SMP
  389. struct sched_domain *sd;
  390. int dcnt = 0;
  391. #endif
  392. /* runqueue-specific stats */
  393. seq_printf(seq,
  394. "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu",
  395. cpu, rq->yld_both_empty,
  396. rq->yld_act_empty, rq->yld_exp_empty, rq->yld_cnt,
  397. rq->sched_switch, rq->sched_cnt, rq->sched_goidle,
  398. rq->ttwu_cnt, rq->ttwu_local,
  399. rq->rq_sched_info.cpu_time,
  400. rq->rq_sched_info.run_delay, rq->rq_sched_info.pcnt);
  401. seq_printf(seq, "\n");
  402. #ifdef CONFIG_SMP
  403. /* domain-specific stats */
  404. preempt_disable();
  405. for_each_domain(cpu, sd) {
  406. enum idle_type itype;
  407. char mask_str[NR_CPUS];
  408. cpumask_scnprintf(mask_str, NR_CPUS, sd->span);
  409. seq_printf(seq, "domain%d %s", dcnt++, mask_str);
  410. for (itype = SCHED_IDLE; itype < MAX_IDLE_TYPES;
  411. itype++) {
  412. seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu",
  413. sd->lb_cnt[itype],
  414. sd->lb_balanced[itype],
  415. sd->lb_failed[itype],
  416. sd->lb_imbalance[itype],
  417. sd->lb_gained[itype],
  418. sd->lb_hot_gained[itype],
  419. sd->lb_nobusyq[itype],
  420. sd->lb_nobusyg[itype]);
  421. }
  422. seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu %lu\n",
  423. sd->alb_cnt, sd->alb_failed, sd->alb_pushed,
  424. sd->sbe_cnt, sd->sbe_balanced, sd->sbe_pushed,
  425. sd->sbf_cnt, sd->sbf_balanced, sd->sbf_pushed,
  426. sd->ttwu_wake_remote, sd->ttwu_move_affine, sd->ttwu_move_balance);
  427. }
  428. preempt_enable();
  429. #endif
  430. }
  431. return 0;
  432. }
  433. static int schedstat_open(struct inode *inode, struct file *file)
  434. {
  435. unsigned int size = PAGE_SIZE * (1 + num_online_cpus() / 32);
  436. char *buf = kmalloc(size, GFP_KERNEL);
  437. struct seq_file *m;
  438. int res;
  439. if (!buf)
  440. return -ENOMEM;
  441. res = single_open(file, show_schedstat, NULL);
  442. if (!res) {
  443. m = file->private_data;
  444. m->buf = buf;
  445. m->size = size;
  446. } else
  447. kfree(buf);
  448. return res;
  449. }
  450. const struct file_operations proc_schedstat_operations = {
  451. .open = schedstat_open,
  452. .read = seq_read,
  453. .llseek = seq_lseek,
  454. .release = single_release,
  455. };
  456. /*
  457. * Expects runqueue lock to be held for atomicity of update
  458. */
  459. static inline void
  460. rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
  461. {
  462. if (rq) {
  463. rq->rq_sched_info.run_delay += delta_jiffies;
  464. rq->rq_sched_info.pcnt++;
  465. }
  466. }
  467. /*
  468. * Expects runqueue lock to be held for atomicity of update
  469. */
  470. static inline void
  471. rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
  472. {
  473. if (rq)
  474. rq->rq_sched_info.cpu_time += delta_jiffies;
  475. }
  476. # define schedstat_inc(rq, field) do { (rq)->field++; } while (0)
  477. # define schedstat_add(rq, field, amt) do { (rq)->field += (amt); } while (0)
  478. #else /* !CONFIG_SCHEDSTATS */
  479. static inline void
  480. rq_sched_info_arrive(struct rq *rq, unsigned long delta_jiffies)
  481. {}
  482. static inline void
  483. rq_sched_info_depart(struct rq *rq, unsigned long delta_jiffies)
  484. {}
  485. # define schedstat_inc(rq, field) do { } while (0)
  486. # define schedstat_add(rq, field, amt) do { } while (0)
  487. #endif
  488. /*
  489. * this_rq_lock - lock this runqueue and disable interrupts.
  490. */
  491. static inline struct rq *this_rq_lock(void)
  492. __acquires(rq->lock)
  493. {
  494. struct rq *rq;
  495. local_irq_disable();
  496. rq = this_rq();
  497. spin_lock(&rq->lock);
  498. return rq;
  499. }
  500. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  501. /*
  502. * Called when a process is dequeued from the active array and given
  503. * the cpu. We should note that with the exception of interactive
  504. * tasks, the expired queue will become the active queue after the active
  505. * queue is empty, without explicitly dequeuing and requeuing tasks in the
  506. * expired queue. (Interactive tasks may be requeued directly to the
  507. * active queue, thus delaying tasks in the expired queue from running;
  508. * see scheduler_tick()).
  509. *
  510. * This function is only called from sched_info_arrive(), rather than
  511. * dequeue_task(). Even though a task may be queued and dequeued multiple
  512. * times as it is shuffled about, we're really interested in knowing how
  513. * long it was from the *first* time it was queued to the time that it
  514. * finally hit a cpu.
  515. */
  516. static inline void sched_info_dequeued(struct task_struct *t)
  517. {
  518. t->sched_info.last_queued = 0;
  519. }
  520. /*
  521. * Called when a task finally hits the cpu. We can now calculate how
  522. * long it was waiting to run. We also note when it began so that we
  523. * can keep stats on how long its timeslice is.
  524. */
  525. static void sched_info_arrive(struct task_struct *t)
  526. {
  527. unsigned long now = jiffies, delta_jiffies = 0;
  528. if (t->sched_info.last_queued)
  529. delta_jiffies = now - t->sched_info.last_queued;
  530. sched_info_dequeued(t);
  531. t->sched_info.run_delay += delta_jiffies;
  532. t->sched_info.last_arrival = now;
  533. t->sched_info.pcnt++;
  534. rq_sched_info_arrive(task_rq(t), delta_jiffies);
  535. }
  536. /*
  537. * Called when a process is queued into either the active or expired
  538. * array. The time is noted and later used to determine how long we
  539. * had to wait for us to reach the cpu. Since the expired queue will
  540. * become the active queue after active queue is empty, without dequeuing
  541. * and requeuing any tasks, we are interested in queuing to either. It
  542. * is unusual but not impossible for tasks to be dequeued and immediately
  543. * requeued in the same or another array: this can happen in sched_yield(),
  544. * set_user_nice(), and even load_balance() as it moves tasks from runqueue
  545. * to runqueue.
  546. *
  547. * This function is only called from enqueue_task(), but also only updates
  548. * the timestamp if it is already not set. It's assumed that
  549. * sched_info_dequeued() will clear that stamp when appropriate.
  550. */
  551. static inline void sched_info_queued(struct task_struct *t)
  552. {
  553. if (unlikely(sched_info_on()))
  554. if (!t->sched_info.last_queued)
  555. t->sched_info.last_queued = jiffies;
  556. }
  557. /*
  558. * Called when a process ceases being the active-running process, either
  559. * voluntarily or involuntarily. Now we can calculate how long we ran.
  560. */
  561. static inline void sched_info_depart(struct task_struct *t)
  562. {
  563. unsigned long delta_jiffies = jiffies - t->sched_info.last_arrival;
  564. t->sched_info.cpu_time += delta_jiffies;
  565. rq_sched_info_depart(task_rq(t), delta_jiffies);
  566. }
  567. /*
  568. * Called when tasks are switched involuntarily due, typically, to expiring
  569. * their time slice. (This may also be called when switching to or from
  570. * the idle task.) We are only called when prev != next.
  571. */
  572. static inline void
  573. __sched_info_switch(struct task_struct *prev, struct task_struct *next)
  574. {
  575. struct rq *rq = task_rq(prev);
  576. /*
  577. * prev now departs the cpu. It's not interesting to record
  578. * stats about how efficient we were at scheduling the idle
  579. * process, however.
  580. */
  581. if (prev != rq->idle)
  582. sched_info_depart(prev);
  583. if (next != rq->idle)
  584. sched_info_arrive(next);
  585. }
  586. static inline void
  587. sched_info_switch(struct task_struct *prev, struct task_struct *next)
  588. {
  589. if (unlikely(sched_info_on()))
  590. __sched_info_switch(prev, next);
  591. }
  592. #else
  593. #define sched_info_queued(t) do { } while (0)
  594. #define sched_info_switch(t, next) do { } while (0)
  595. #endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
  596. /*
  597. * Adding/removing a task to/from a priority array:
  598. */
  599. static void dequeue_task(struct task_struct *p, struct prio_array *array)
  600. {
  601. array->nr_active--;
  602. list_del(&p->run_list);
  603. if (list_empty(array->queue + p->prio))
  604. __clear_bit(p->prio, array->bitmap);
  605. }
  606. static void enqueue_task(struct task_struct *p, struct prio_array *array)
  607. {
  608. sched_info_queued(p);
  609. list_add_tail(&p->run_list, array->queue + p->prio);
  610. __set_bit(p->prio, array->bitmap);
  611. array->nr_active++;
  612. p->array = array;
  613. }
  614. /*
  615. * Put task to the end of the run list without the overhead of dequeue
  616. * followed by enqueue.
  617. */
  618. static void requeue_task(struct task_struct *p, struct prio_array *array)
  619. {
  620. list_move_tail(&p->run_list, array->queue + p->prio);
  621. }
  622. static inline void
  623. enqueue_task_head(struct task_struct *p, struct prio_array *array)
  624. {
  625. list_add(&p->run_list, array->queue + p->prio);
  626. __set_bit(p->prio, array->bitmap);
  627. array->nr_active++;
  628. p->array = array;
  629. }
  630. /*
  631. * __normal_prio - return the priority that is based on the static
  632. * priority but is modified by bonuses/penalties.
  633. *
  634. * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
  635. * into the -5 ... 0 ... +5 bonus/penalty range.
  636. *
  637. * We use 25% of the full 0...39 priority range so that:
  638. *
  639. * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
  640. * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
  641. *
  642. * Both properties are important to certain workloads.
  643. */
  644. static inline int __normal_prio(struct task_struct *p)
  645. {
  646. int bonus, prio;
  647. bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
  648. prio = p->static_prio - bonus;
  649. if (prio < MAX_RT_PRIO)
  650. prio = MAX_RT_PRIO;
  651. if (prio > MAX_PRIO-1)
  652. prio = MAX_PRIO-1;
  653. return prio;
  654. }
  655. /*
  656. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  657. * of tasks with abnormal "nice" values across CPUs the contribution that
  658. * each task makes to its run queue's load is weighted according to its
  659. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  660. * scaled version of the new time slice allocation that they receive on time
  661. * slice expiry etc.
  662. */
  663. /*
  664. * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
  665. * If static_prio_timeslice() is ever changed to break this assumption then
  666. * this code will need modification
  667. */
  668. #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
  669. #define LOAD_WEIGHT(lp) \
  670. (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
  671. #define PRIO_TO_LOAD_WEIGHT(prio) \
  672. LOAD_WEIGHT(static_prio_timeslice(prio))
  673. #define RTPRIO_TO_LOAD_WEIGHT(rp) \
  674. (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
  675. static void set_load_weight(struct task_struct *p)
  676. {
  677. if (has_rt_policy(p)) {
  678. #ifdef CONFIG_SMP
  679. if (p == task_rq(p)->migration_thread)
  680. /*
  681. * The migration thread does the actual balancing.
  682. * Giving its load any weight will skew balancing
  683. * adversely.
  684. */
  685. p->load_weight = 0;
  686. else
  687. #endif
  688. p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
  689. } else
  690. p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
  691. }
  692. static inline void
  693. inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
  694. {
  695. rq->raw_weighted_load += p->load_weight;
  696. }
  697. static inline void
  698. dec_raw_weighted_load(struct rq *rq, const struct task_struct *p)
  699. {
  700. rq->raw_weighted_load -= p->load_weight;
  701. }
  702. static inline void inc_nr_running(struct task_struct *p, struct rq *rq)
  703. {
  704. rq->nr_running++;
  705. inc_raw_weighted_load(rq, p);
  706. }
  707. static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
  708. {
  709. rq->nr_running--;
  710. dec_raw_weighted_load(rq, p);
  711. }
  712. /*
  713. * Calculate the expected normal priority: i.e. priority
  714. * without taking RT-inheritance into account. Might be
  715. * boosted by interactivity modifiers. Changes upon fork,
  716. * setprio syscalls, and whenever the interactivity
  717. * estimator recalculates.
  718. */
  719. static inline int normal_prio(struct task_struct *p)
  720. {
  721. int prio;
  722. if (has_rt_policy(p))
  723. prio = MAX_RT_PRIO-1 - p->rt_priority;
  724. else
  725. prio = __normal_prio(p);
  726. return prio;
  727. }
  728. /*
  729. * Calculate the current priority, i.e. the priority
  730. * taken into account by the scheduler. This value might
  731. * be boosted by RT tasks, or might be boosted by
  732. * interactivity modifiers. Will be RT if the task got
  733. * RT-boosted. If not then it returns p->normal_prio.
  734. */
  735. static int effective_prio(struct task_struct *p)
  736. {
  737. p->normal_prio = normal_prio(p);
  738. /*
  739. * If we are RT tasks or we were boosted to RT priority,
  740. * keep the priority unchanged. Otherwise, update priority
  741. * to the normal priority:
  742. */
  743. if (!rt_prio(p->prio))
  744. return p->normal_prio;
  745. return p->prio;
  746. }
  747. /*
  748. * __activate_task - move a task to the runqueue.
  749. */
  750. static void __activate_task(struct task_struct *p, struct rq *rq)
  751. {
  752. struct prio_array *target = rq->active;
  753. if (batch_task(p))
  754. target = rq->expired;
  755. enqueue_task(p, target);
  756. inc_nr_running(p, rq);
  757. }
  758. /*
  759. * __activate_idle_task - move idle task to the _front_ of runqueue.
  760. */
  761. static inline void __activate_idle_task(struct task_struct *p, struct rq *rq)
  762. {
  763. enqueue_task_head(p, rq->active);
  764. inc_nr_running(p, rq);
  765. }
  766. /*
  767. * Recalculate p->normal_prio and p->prio after having slept,
  768. * updating the sleep-average too:
  769. */
  770. static int recalc_task_prio(struct task_struct *p, unsigned long long now)
  771. {
  772. /* Caller must always ensure 'now >= p->timestamp' */
  773. unsigned long sleep_time = now - p->timestamp;
  774. if (batch_task(p))
  775. sleep_time = 0;
  776. if (likely(sleep_time > 0)) {
  777. /*
  778. * This ceiling is set to the lowest priority that would allow
  779. * a task to be reinserted into the active array on timeslice
  780. * completion.
  781. */
  782. unsigned long ceiling = INTERACTIVE_SLEEP(p);
  783. if (p->mm && sleep_time > ceiling && p->sleep_avg < ceiling) {
  784. /*
  785. * Prevents user tasks from achieving best priority
  786. * with one single large enough sleep.
  787. */
  788. p->sleep_avg = ceiling;
  789. /*
  790. * Using INTERACTIVE_SLEEP() as a ceiling places a
  791. * nice(0) task 1ms sleep away from promotion, and
  792. * gives it 700ms to round-robin with no chance of
  793. * being demoted. This is more than generous, so
  794. * mark this sleep as non-interactive to prevent the
  795. * on-runqueue bonus logic from intervening should
  796. * this task not receive cpu immediately.
  797. */
  798. p->sleep_type = SLEEP_NONINTERACTIVE;
  799. } else {
  800. /*
  801. * Tasks waking from uninterruptible sleep are
  802. * limited in their sleep_avg rise as they
  803. * are likely to be waiting on I/O
  804. */
  805. if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) {
  806. if (p->sleep_avg >= ceiling)
  807. sleep_time = 0;
  808. else if (p->sleep_avg + sleep_time >=
  809. ceiling) {
  810. p->sleep_avg = ceiling;
  811. sleep_time = 0;
  812. }
  813. }
  814. /*
  815. * This code gives a bonus to interactive tasks.
  816. *
  817. * The boost works by updating the 'average sleep time'
  818. * value here, based on ->timestamp. The more time a
  819. * task spends sleeping, the higher the average gets -
  820. * and the higher the priority boost gets as well.
  821. */
  822. p->sleep_avg += sleep_time;
  823. }
  824. if (p->sleep_avg > NS_MAX_SLEEP_AVG)
  825. p->sleep_avg = NS_MAX_SLEEP_AVG;
  826. }
  827. return effective_prio(p);
  828. }
  829. /*
  830. * activate_task - move a task to the runqueue and do priority recalculation
  831. *
  832. * Update all the scheduling statistics stuff. (sleep average
  833. * calculation, priority modifiers, etc.)
  834. */
  835. static void activate_task(struct task_struct *p, struct rq *rq, int local)
  836. {
  837. unsigned long long now;
  838. if (rt_task(p))
  839. goto out;
  840. now = sched_clock();
  841. #ifdef CONFIG_SMP
  842. if (!local) {
  843. /* Compensate for drifting sched_clock */
  844. struct rq *this_rq = this_rq();
  845. now = (now - this_rq->most_recent_timestamp)
  846. + rq->most_recent_timestamp;
  847. }
  848. #endif
  849. /*
  850. * Sleep time is in units of nanosecs, so shift by 20 to get a
  851. * milliseconds-range estimation of the amount of time that the task
  852. * spent sleeping:
  853. */
  854. if (unlikely(prof_on == SLEEP_PROFILING)) {
  855. if (p->state == TASK_UNINTERRUPTIBLE)
  856. profile_hits(SLEEP_PROFILING, (void *)get_wchan(p),
  857. (now - p->timestamp) >> 20);
  858. }
  859. p->prio = recalc_task_prio(p, now);
  860. /*
  861. * This checks to make sure it's not an uninterruptible task
  862. * that is now waking up.
  863. */
  864. if (p->sleep_type == SLEEP_NORMAL) {
  865. /*
  866. * Tasks which were woken up by interrupts (ie. hw events)
  867. * are most likely of interactive nature. So we give them
  868. * the credit of extending their sleep time to the period
  869. * of time they spend on the runqueue, waiting for execution
  870. * on a CPU, first time around:
  871. */
  872. if (in_interrupt())
  873. p->sleep_type = SLEEP_INTERRUPTED;
  874. else {
  875. /*
  876. * Normal first-time wakeups get a credit too for
  877. * on-runqueue time, but it will be weighted down:
  878. */
  879. p->sleep_type = SLEEP_INTERACTIVE;
  880. }
  881. }
  882. p->timestamp = now;
  883. out:
  884. __activate_task(p, rq);
  885. }
  886. /*
  887. * deactivate_task - remove a task from the runqueue.
  888. */
  889. static void deactivate_task(struct task_struct *p, struct rq *rq)
  890. {
  891. dec_nr_running(p, rq);
  892. dequeue_task(p, p->array);
  893. p->array = NULL;
  894. }
  895. /*
  896. * resched_task - mark a task 'to be rescheduled now'.
  897. *
  898. * On UP this means the setting of the need_resched flag, on SMP it
  899. * might also involve a cross-CPU call to trigger the scheduler on
  900. * the target CPU.
  901. */
  902. #ifdef CONFIG_SMP
  903. #ifndef tsk_is_polling
  904. #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
  905. #endif
  906. static void resched_task(struct task_struct *p)
  907. {
  908. int cpu;
  909. assert_spin_locked(&task_rq(p)->lock);
  910. if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
  911. return;
  912. set_tsk_thread_flag(p, TIF_NEED_RESCHED);
  913. cpu = task_cpu(p);
  914. if (cpu == smp_processor_id())
  915. return;
  916. /* NEED_RESCHED must be visible before we test polling */
  917. smp_mb();
  918. if (!tsk_is_polling(p))
  919. smp_send_reschedule(cpu);
  920. }
  921. #else
  922. static inline void resched_task(struct task_struct *p)
  923. {
  924. assert_spin_locked(&task_rq(p)->lock);
  925. set_tsk_need_resched(p);
  926. }
  927. #endif
  928. /**
  929. * task_curr - is this task currently executing on a CPU?
  930. * @p: the task in question.
  931. */
  932. inline int task_curr(const struct task_struct *p)
  933. {
  934. return cpu_curr(task_cpu(p)) == p;
  935. }
  936. /* Used instead of source_load when we know the type == 0 */
  937. unsigned long weighted_cpuload(const int cpu)
  938. {
  939. return cpu_rq(cpu)->raw_weighted_load;
  940. }
  941. #ifdef CONFIG_SMP
  942. struct migration_req {
  943. struct list_head list;
  944. struct task_struct *task;
  945. int dest_cpu;
  946. struct completion done;
  947. };
  948. /*
  949. * The task's runqueue lock must be held.
  950. * Returns true if you have to wait for migration thread.
  951. */
  952. static int
  953. migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
  954. {
  955. struct rq *rq = task_rq(p);
  956. /*
  957. * If the task is not on a runqueue (and not running), then
  958. * it is sufficient to simply update the task's cpu field.
  959. */
  960. if (!p->array && !task_running(rq, p)) {
  961. set_task_cpu(p, dest_cpu);
  962. return 0;
  963. }
  964. init_completion(&req->done);
  965. req->task = p;
  966. req->dest_cpu = dest_cpu;
  967. list_add(&req->list, &rq->migration_queue);
  968. return 1;
  969. }
  970. /*
  971. * wait_task_inactive - wait for a thread to unschedule.
  972. *
  973. * The caller must ensure that the task *will* unschedule sometime soon,
  974. * else this function might spin for a *long* time. This function can't
  975. * be called with interrupts off, or it may introduce deadlock with
  976. * smp_call_function() if an IPI is sent by the same process we are
  977. * waiting to become inactive.
  978. */
  979. void wait_task_inactive(struct task_struct *p)
  980. {
  981. unsigned long flags;
  982. struct rq *rq;
  983. int preempted;
  984. repeat:
  985. rq = task_rq_lock(p, &flags);
  986. /* Must be off runqueue entirely, not preempted. */
  987. if (unlikely(p->array || task_running(rq, p))) {
  988. /* If it's preempted, we yield. It could be a while. */
  989. preempted = !task_running(rq, p);
  990. task_rq_unlock(rq, &flags);
  991. cpu_relax();
  992. if (preempted)
  993. yield();
  994. goto repeat;
  995. }
  996. task_rq_unlock(rq, &flags);
  997. }
  998. /***
  999. * kick_process - kick a running thread to enter/exit the kernel
  1000. * @p: the to-be-kicked thread
  1001. *
  1002. * Cause a process which is running on another CPU to enter
  1003. * kernel-mode, without any delay. (to get signals handled.)
  1004. *
  1005. * NOTE: this function doesnt have to take the runqueue lock,
  1006. * because all it wants to ensure is that the remote task enters
  1007. * the kernel. If the IPI races and the task has been migrated
  1008. * to another CPU then no harm is done and the purpose has been
  1009. * achieved as well.
  1010. */
  1011. void kick_process(struct task_struct *p)
  1012. {
  1013. int cpu;
  1014. preempt_disable();
  1015. cpu = task_cpu(p);
  1016. if ((cpu != smp_processor_id()) && task_curr(p))
  1017. smp_send_reschedule(cpu);
  1018. preempt_enable();
  1019. }
  1020. /*
  1021. * Return a low guess at the load of a migration-source cpu weighted
  1022. * according to the scheduling class and "nice" value.
  1023. *
  1024. * We want to under-estimate the load of migration sources, to
  1025. * balance conservatively.
  1026. */
  1027. static inline unsigned long source_load(int cpu, int type)
  1028. {
  1029. struct rq *rq = cpu_rq(cpu);
  1030. if (type == 0)
  1031. return rq->raw_weighted_load;
  1032. return min(rq->cpu_load[type-1], rq->raw_weighted_load);
  1033. }
  1034. /*
  1035. * Return a high guess at the load of a migration-target cpu weighted
  1036. * according to the scheduling class and "nice" value.
  1037. */
  1038. static inline unsigned long target_load(int cpu, int type)
  1039. {
  1040. struct rq *rq = cpu_rq(cpu);
  1041. if (type == 0)
  1042. return rq->raw_weighted_load;
  1043. return max(rq->cpu_load[type-1], rq->raw_weighted_load);
  1044. }
  1045. /*
  1046. * Return the average load per task on the cpu's run queue
  1047. */
  1048. static inline unsigned long cpu_avg_load_per_task(int cpu)
  1049. {
  1050. struct rq *rq = cpu_rq(cpu);
  1051. unsigned long n = rq->nr_running;
  1052. return n ? rq->raw_weighted_load / n : SCHED_LOAD_SCALE;
  1053. }
  1054. /*
  1055. * find_idlest_group finds and returns the least busy CPU group within the
  1056. * domain.
  1057. */
  1058. static struct sched_group *
  1059. find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
  1060. {
  1061. struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
  1062. unsigned long min_load = ULONG_MAX, this_load = 0;
  1063. int load_idx = sd->forkexec_idx;
  1064. int imbalance = 100 + (sd->imbalance_pct-100)/2;
  1065. do {
  1066. unsigned long load, avg_load;
  1067. int local_group;
  1068. int i;
  1069. /* Skip over this group if it has no CPUs allowed */
  1070. if (!cpus_intersects(group->cpumask, p->cpus_allowed))
  1071. goto nextgroup;
  1072. local_group = cpu_isset(this_cpu, group->cpumask);
  1073. /* Tally up the load of all CPUs in the group */
  1074. avg_load = 0;
  1075. for_each_cpu_mask(i, group->cpumask) {
  1076. /* Bias balancing toward cpus of our domain */
  1077. if (local_group)
  1078. load = source_load(i, load_idx);
  1079. else
  1080. load = target_load(i, load_idx);
  1081. avg_load += load;
  1082. }
  1083. /* Adjust by relative CPU power of the group */
  1084. avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
  1085. if (local_group) {
  1086. this_load = avg_load;
  1087. this = group;
  1088. } else if (avg_load < min_load) {
  1089. min_load = avg_load;
  1090. idlest = group;
  1091. }
  1092. nextgroup:
  1093. group = group->next;
  1094. } while (group != sd->groups);
  1095. if (!idlest || 100*this_load < imbalance*min_load)
  1096. return NULL;
  1097. return idlest;
  1098. }
  1099. /*
  1100. * find_idlest_cpu - find the idlest cpu among the cpus in group.
  1101. */
  1102. static int
  1103. find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  1104. {
  1105. cpumask_t tmp;
  1106. unsigned long load, min_load = ULONG_MAX;
  1107. int idlest = -1;
  1108. int i;
  1109. /* Traverse only the allowed CPUs */
  1110. cpus_and(tmp, group->cpumask, p->cpus_allowed);
  1111. for_each_cpu_mask(i, tmp) {
  1112. load = weighted_cpuload(i);
  1113. if (load < min_load || (load == min_load && i == this_cpu)) {
  1114. min_load = load;
  1115. idlest = i;
  1116. }
  1117. }
  1118. return idlest;
  1119. }
  1120. /*
  1121. * sched_balance_self: balance the current task (running on cpu) in domains
  1122. * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
  1123. * SD_BALANCE_EXEC.
  1124. *
  1125. * Balance, ie. select the least loaded group.
  1126. *
  1127. * Returns the target CPU number, or the same CPU if no balancing is needed.
  1128. *
  1129. * preempt must be disabled.
  1130. */
  1131. static int sched_balance_self(int cpu, int flag)
  1132. {
  1133. struct task_struct *t = current;
  1134. struct sched_domain *tmp, *sd = NULL;
  1135. for_each_domain(cpu, tmp) {
  1136. /*
  1137. * If power savings logic is enabled for a domain, stop there.
  1138. */
  1139. if (tmp->flags & SD_POWERSAVINGS_BALANCE)
  1140. break;
  1141. if (tmp->flags & flag)
  1142. sd = tmp;
  1143. }
  1144. while (sd) {
  1145. cpumask_t span;
  1146. struct sched_group *group;
  1147. int new_cpu, weight;
  1148. if (!(sd->flags & flag)) {
  1149. sd = sd->child;
  1150. continue;
  1151. }
  1152. span = sd->span;
  1153. group = find_idlest_group(sd, t, cpu);
  1154. if (!group) {
  1155. sd = sd->child;
  1156. continue;
  1157. }
  1158. new_cpu = find_idlest_cpu(group, t, cpu);
  1159. if (new_cpu == -1 || new_cpu == cpu) {
  1160. /* Now try balancing at a lower domain level of cpu */
  1161. sd = sd->child;
  1162. continue;
  1163. }
  1164. /* Now try balancing at a lower domain level of new_cpu */
  1165. cpu = new_cpu;
  1166. sd = NULL;
  1167. weight = cpus_weight(span);
  1168. for_each_domain(cpu, tmp) {
  1169. if (weight <= cpus_weight(tmp->span))
  1170. break;
  1171. if (tmp->flags & flag)
  1172. sd = tmp;
  1173. }
  1174. /* while loop will break here if sd == NULL */
  1175. }
  1176. return cpu;
  1177. }
  1178. #endif /* CONFIG_SMP */
  1179. /*
  1180. * wake_idle() will wake a task on an idle cpu if task->cpu is
  1181. * not idle and an idle cpu is available. The span of cpus to
  1182. * search starts with cpus closest then further out as needed,
  1183. * so we always favor a closer, idle cpu.
  1184. *
  1185. * Returns the CPU we should wake onto.
  1186. */
  1187. #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
  1188. static int wake_idle(int cpu, struct task_struct *p)
  1189. {
  1190. cpumask_t tmp;
  1191. struct sched_domain *sd;
  1192. int i;
  1193. if (idle_cpu(cpu))
  1194. return cpu;
  1195. for_each_domain(cpu, sd) {
  1196. if (sd->flags & SD_WAKE_IDLE) {
  1197. cpus_and(tmp, sd->span, p->cpus_allowed);
  1198. for_each_cpu_mask(i, tmp) {
  1199. if (idle_cpu(i))
  1200. return i;
  1201. }
  1202. }
  1203. else
  1204. break;
  1205. }
  1206. return cpu;
  1207. }
  1208. #else
  1209. static inline int wake_idle(int cpu, struct task_struct *p)
  1210. {
  1211. return cpu;
  1212. }
  1213. #endif
  1214. /***
  1215. * try_to_wake_up - wake up a thread
  1216. * @p: the to-be-woken-up thread
  1217. * @state: the mask of task states that can be woken
  1218. * @sync: do a synchronous wakeup?
  1219. *
  1220. * Put it on the run-queue if it's not already there. The "current"
  1221. * thread is always on the run-queue (except when the actual
  1222. * re-schedule is in progress), and as such you're allowed to do
  1223. * the simpler "current->state = TASK_RUNNING" to mark yourself
  1224. * runnable without the overhead of this.
  1225. *
  1226. * returns failure only if the task is already active.
  1227. */
  1228. static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
  1229. {
  1230. int cpu, this_cpu, success = 0;
  1231. unsigned long flags;
  1232. long old_state;
  1233. struct rq *rq;
  1234. #ifdef CONFIG_SMP
  1235. struct sched_domain *sd, *this_sd = NULL;
  1236. unsigned long load, this_load;
  1237. int new_cpu;
  1238. #endif
  1239. rq = task_rq_lock(p, &flags);
  1240. old_state = p->state;
  1241. if (!(old_state & state))
  1242. goto out;
  1243. if (p->array)
  1244. goto out_running;
  1245. cpu = task_cpu(p);
  1246. this_cpu = smp_processor_id();
  1247. #ifdef CONFIG_SMP
  1248. if (unlikely(task_running(rq, p)))
  1249. goto out_activate;
  1250. new_cpu = cpu;
  1251. schedstat_inc(rq, ttwu_cnt);
  1252. if (cpu == this_cpu) {
  1253. schedstat_inc(rq, ttwu_local);
  1254. goto out_set_cpu;
  1255. }
  1256. for_each_domain(this_cpu, sd) {
  1257. if (cpu_isset(cpu, sd->span)) {
  1258. schedstat_inc(sd, ttwu_wake_remote);
  1259. this_sd = sd;
  1260. break;
  1261. }
  1262. }
  1263. if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
  1264. goto out_set_cpu;
  1265. /*
  1266. * Check for affine wakeup and passive balancing possibilities.
  1267. */
  1268. if (this_sd) {
  1269. int idx = this_sd->wake_idx;
  1270. unsigned int imbalance;
  1271. imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
  1272. load = source_load(cpu, idx);
  1273. this_load = target_load(this_cpu, idx);
  1274. new_cpu = this_cpu; /* Wake to this CPU if we can */
  1275. if (this_sd->flags & SD_WAKE_AFFINE) {
  1276. unsigned long tl = this_load;
  1277. unsigned long tl_per_task = cpu_avg_load_per_task(this_cpu);
  1278. /*
  1279. * If sync wakeup then subtract the (maximum possible)
  1280. * effect of the currently running task from the load
  1281. * of the current CPU:
  1282. */
  1283. if (sync)
  1284. tl -= current->load_weight;
  1285. if ((tl <= load &&
  1286. tl + target_load(cpu, idx) <= tl_per_task) ||
  1287. 100*(tl + p->load_weight) <= imbalance*load) {
  1288. /*
  1289. * This domain has SD_WAKE_AFFINE and
  1290. * p is cache cold in this domain, and
  1291. * there is no bad imbalance.
  1292. */
  1293. schedstat_inc(this_sd, ttwu_move_affine);
  1294. goto out_set_cpu;
  1295. }
  1296. }
  1297. /*
  1298. * Start passive balancing when half the imbalance_pct
  1299. * limit is reached.
  1300. */
  1301. if (this_sd->flags & SD_WAKE_BALANCE) {
  1302. if (imbalance*this_load <= 100*load) {
  1303. schedstat_inc(this_sd, ttwu_move_balance);
  1304. goto out_set_cpu;
  1305. }
  1306. }
  1307. }
  1308. new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
  1309. out_set_cpu:
  1310. new_cpu = wake_idle(new_cpu, p);
  1311. if (new_cpu != cpu) {
  1312. set_task_cpu(p, new_cpu);
  1313. task_rq_unlock(rq, &flags);
  1314. /* might preempt at this point */
  1315. rq = task_rq_lock(p, &flags);
  1316. old_state = p->state;
  1317. if (!(old_state & state))
  1318. goto out;
  1319. if (p->array)
  1320. goto out_running;
  1321. this_cpu = smp_processor_id();
  1322. cpu = task_cpu(p);
  1323. }
  1324. out_activate:
  1325. #endif /* CONFIG_SMP */
  1326. if (old_state == TASK_UNINTERRUPTIBLE) {
  1327. rq->nr_uninterruptible--;
  1328. /*
  1329. * Tasks on involuntary sleep don't earn
  1330. * sleep_avg beyond just interactive state.
  1331. */
  1332. p->sleep_type = SLEEP_NONINTERACTIVE;
  1333. } else
  1334. /*
  1335. * Tasks that have marked their sleep as noninteractive get
  1336. * woken up with their sleep average not weighted in an
  1337. * interactive way.
  1338. */
  1339. if (old_state & TASK_NONINTERACTIVE)
  1340. p->sleep_type = SLEEP_NONINTERACTIVE;
  1341. activate_task(p, rq, cpu == this_cpu);
  1342. /*
  1343. * Sync wakeups (i.e. those types of wakeups where the waker
  1344. * has indicated that it will leave the CPU in short order)
  1345. * don't trigger a preemption, if the woken up task will run on
  1346. * this cpu. (in this case the 'I will reschedule' promise of
  1347. * the waker guarantees that the freshly woken up task is going
  1348. * to be considered on this CPU.)
  1349. */
  1350. if (!sync || cpu != this_cpu) {
  1351. if (TASK_PREEMPTS_CURR(p, rq))
  1352. resched_task(rq->curr);
  1353. }
  1354. success = 1;
  1355. out_running:
  1356. p->state = TASK_RUNNING;
  1357. out:
  1358. task_rq_unlock(rq, &flags);
  1359. return success;
  1360. }
  1361. int fastcall wake_up_process(struct task_struct *p)
  1362. {
  1363. return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
  1364. TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
  1365. }
  1366. EXPORT_SYMBOL(wake_up_process);
  1367. int fastcall wake_up_state(struct task_struct *p, unsigned int state)
  1368. {
  1369. return try_to_wake_up(p, state, 0);
  1370. }
  1371. /*
  1372. * Perform scheduler related setup for a newly forked process p.
  1373. * p is forked by current.
  1374. */
  1375. void fastcall sched_fork(struct task_struct *p, int clone_flags)
  1376. {
  1377. int cpu = get_cpu();
  1378. #ifdef CONFIG_SMP
  1379. cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
  1380. #endif
  1381. set_task_cpu(p, cpu);
  1382. /*
  1383. * We mark the process as running here, but have not actually
  1384. * inserted it onto the runqueue yet. This guarantees that
  1385. * nobody will actually run it, and a signal or other external
  1386. * event cannot wake it up and insert it on the runqueue either.
  1387. */
  1388. p->state = TASK_RUNNING;
  1389. /*
  1390. * Make sure we do not leak PI boosting priority to the child:
  1391. */
  1392. p->prio = current->normal_prio;
  1393. INIT_LIST_HEAD(&p->run_list);
  1394. p->array = NULL;
  1395. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  1396. if (unlikely(sched_info_on()))
  1397. memset(&p->sched_info, 0, sizeof(p->sched_info));
  1398. #endif
  1399. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  1400. p->oncpu = 0;
  1401. #endif
  1402. #ifdef CONFIG_PREEMPT
  1403. /* Want to start with kernel preemption disabled. */
  1404. task_thread_info(p)->preempt_count = 1;
  1405. #endif
  1406. /*
  1407. * Share the timeslice between parent and child, thus the
  1408. * total amount of pending timeslices in the system doesn't change,
  1409. * resulting in more scheduling fairness.
  1410. */
  1411. local_irq_disable();
  1412. p->time_slice = (current->time_slice + 1) >> 1;
  1413. /*
  1414. * The remainder of the first timeslice might be recovered by
  1415. * the parent if the child exits early enough.
  1416. */
  1417. p->first_time_slice = 1;
  1418. current->time_slice >>= 1;
  1419. p->timestamp = sched_clock();
  1420. if (unlikely(!current->time_slice)) {
  1421. /*
  1422. * This case is rare, it happens when the parent has only
  1423. * a single jiffy left from its timeslice. Taking the
  1424. * runqueue lock is not a problem.
  1425. */
  1426. current->time_slice = 1;
  1427. scheduler_tick();
  1428. }
  1429. local_irq_enable();
  1430. put_cpu();
  1431. }
  1432. /*
  1433. * wake_up_new_task - wake up a newly created task for the first time.
  1434. *
  1435. * This function will do some initial scheduler statistics housekeeping
  1436. * that must be done for every newly created context, then puts the task
  1437. * on the runqueue and wakes it.
  1438. */
  1439. void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  1440. {
  1441. struct rq *rq, *this_rq;
  1442. unsigned long flags;
  1443. int this_cpu, cpu;
  1444. rq = task_rq_lock(p, &flags);
  1445. BUG_ON(p->state != TASK_RUNNING);
  1446. this_cpu = smp_processor_id();
  1447. cpu = task_cpu(p);
  1448. /*
  1449. * We decrease the sleep average of forking parents
  1450. * and children as well, to keep max-interactive tasks
  1451. * from forking tasks that are max-interactive. The parent
  1452. * (current) is done further down, under its lock.
  1453. */
  1454. p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
  1455. CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
  1456. p->prio = effective_prio(p);
  1457. if (likely(cpu == this_cpu)) {
  1458. if (!(clone_flags & CLONE_VM)) {
  1459. /*
  1460. * The VM isn't cloned, so we're in a good position to
  1461. * do child-runs-first in anticipation of an exec. This
  1462. * usually avoids a lot of COW overhead.
  1463. */
  1464. if (unlikely(!current->array))
  1465. __activate_task(p, rq);
  1466. else {
  1467. p->prio = current->prio;
  1468. p->normal_prio = current->normal_prio;
  1469. list_add_tail(&p->run_list, &current->run_list);
  1470. p->array = current->array;
  1471. p->array->nr_active++;
  1472. inc_nr_running(p, rq);
  1473. }
  1474. set_need_resched();
  1475. } else
  1476. /* Run child last */
  1477. __activate_task(p, rq);
  1478. /*
  1479. * We skip the following code due to cpu == this_cpu
  1480. *
  1481. * task_rq_unlock(rq, &flags);
  1482. * this_rq = task_rq_lock(current, &flags);
  1483. */
  1484. this_rq = rq;
  1485. } else {
  1486. this_rq = cpu_rq(this_cpu);
  1487. /*
  1488. * Not the local CPU - must adjust timestamp. This should
  1489. * get optimised away in the !CONFIG_SMP case.
  1490. */
  1491. p->timestamp = (p->timestamp - this_rq->most_recent_timestamp)
  1492. + rq->most_recent_timestamp;
  1493. __activate_task(p, rq);
  1494. if (TASK_PREEMPTS_CURR(p, rq))
  1495. resched_task(rq->curr);
  1496. /*
  1497. * Parent and child are on different CPUs, now get the
  1498. * parent runqueue to update the parent's ->sleep_avg:
  1499. */
  1500. task_rq_unlock(rq, &flags);
  1501. this_rq = task_rq_lock(current, &flags);
  1502. }
  1503. current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
  1504. PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
  1505. task_rq_unlock(this_rq, &flags);
  1506. }
  1507. /*
  1508. * Potentially available exiting-child timeslices are
  1509. * retrieved here - this way the parent does not get
  1510. * penalized for creating too many threads.
  1511. *
  1512. * (this cannot be used to 'generate' timeslices
  1513. * artificially, because any timeslice recovered here
  1514. * was given away by the parent in the first place.)
  1515. */
  1516. void fastcall sched_exit(struct task_struct *p)
  1517. {
  1518. unsigned long flags;
  1519. struct rq *rq;
  1520. /*
  1521. * If the child was a (relative-) CPU hog then decrease
  1522. * the sleep_avg of the parent as well.
  1523. */
  1524. rq = task_rq_lock(p->parent, &flags);
  1525. if (p->first_time_slice && task_cpu(p) == task_cpu(p->parent)) {
  1526. p->parent->time_slice += p->time_slice;
  1527. if (unlikely(p->parent->time_slice > task_timeslice(p)))
  1528. p->parent->time_slice = task_timeslice(p);
  1529. }
  1530. if (p->sleep_avg < p->parent->sleep_avg)
  1531. p->parent->sleep_avg = p->parent->sleep_avg /
  1532. (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
  1533. (EXIT_WEIGHT + 1);
  1534. task_rq_unlock(rq, &flags);
  1535. }
  1536. /**
  1537. * prepare_task_switch - prepare to switch tasks
  1538. * @rq: the runqueue preparing to switch
  1539. * @next: the task we are going to switch to.
  1540. *
  1541. * This is called with the rq lock held and interrupts off. It must
  1542. * be paired with a subsequent finish_task_switch after the context
  1543. * switch.
  1544. *
  1545. * prepare_task_switch sets up locking and calls architecture specific
  1546. * hooks.
  1547. */
  1548. static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
  1549. {
  1550. prepare_lock_switch(rq, next);
  1551. prepare_arch_switch(next);
  1552. }
  1553. /**
  1554. * finish_task_switch - clean up after a task-switch
  1555. * @rq: runqueue associated with task-switch
  1556. * @prev: the thread we just switched away from.
  1557. *
  1558. * finish_task_switch must be called after the context switch, paired
  1559. * with a prepare_task_switch call before the context switch.
  1560. * finish_task_switch will reconcile locking set up by prepare_task_switch,
  1561. * and do any other architecture-specific cleanup actions.
  1562. *
  1563. * Note that we may have delayed dropping an mm in context_switch(). If
  1564. * so, we finish that here outside of the runqueue lock. (Doing it
  1565. * with the lock held can cause deadlocks; see schedule() for
  1566. * details.)
  1567. */
  1568. static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
  1569. __releases(rq->lock)
  1570. {
  1571. struct mm_struct *mm = rq->prev_mm;
  1572. long prev_state;
  1573. rq->prev_mm = NULL;
  1574. /*
  1575. * A task struct has one reference for the use as "current".
  1576. * If a task dies, then it sets TASK_DEAD in tsk->state and calls
  1577. * schedule one last time. The schedule call will never return, and
  1578. * the scheduled task must drop that reference.
  1579. * The test for TASK_DEAD must occur while the runqueue locks are
  1580. * still held, otherwise prev could be scheduled on another cpu, die
  1581. * there before we look at prev->state, and then the reference would
  1582. * be dropped twice.
  1583. * Manfred Spraul <manfred@colorfullife.com>
  1584. */
  1585. prev_state = prev->state;
  1586. finish_arch_switch(prev);
  1587. finish_lock_switch(rq, prev);
  1588. if (mm)
  1589. mmdrop(mm);
  1590. if (unlikely(prev_state == TASK_DEAD)) {
  1591. /*
  1592. * Remove function-return probe instances associated with this
  1593. * task and put them back on the free list.
  1594. */
  1595. kprobe_flush_task(prev);
  1596. put_task_struct(prev);
  1597. }
  1598. }
  1599. /**
  1600. * schedule_tail - first thing a freshly forked thread must call.
  1601. * @prev: the thread we just switched away from.
  1602. */
  1603. asmlinkage void schedule_tail(struct task_struct *prev)
  1604. __releases(rq->lock)
  1605. {
  1606. struct rq *rq = this_rq();
  1607. finish_task_switch(rq, prev);
  1608. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  1609. /* In this case, finish_task_switch does not reenable preemption */
  1610. preempt_enable();
  1611. #endif
  1612. if (current->set_child_tid)
  1613. put_user(current->pid, current->set_child_tid);
  1614. }
  1615. /*
  1616. * context_switch - switch to the new MM and the new
  1617. * thread's register state.
  1618. */
  1619. static inline struct task_struct *
  1620. context_switch(struct rq *rq, struct task_struct *prev,
  1621. struct task_struct *next)
  1622. {
  1623. struct mm_struct *mm = next->mm;
  1624. struct mm_struct *oldmm = prev->active_mm;
  1625. if (!mm) {
  1626. next->active_mm = oldmm;
  1627. atomic_inc(&oldmm->mm_count);
  1628. enter_lazy_tlb(oldmm, next);
  1629. } else
  1630. switch_mm(oldmm, mm, next);
  1631. if (!prev->mm) {
  1632. prev->active_mm = NULL;
  1633. WARN_ON(rq->prev_mm);
  1634. rq->prev_mm = oldmm;
  1635. }
  1636. /*
  1637. * Since the runqueue lock will be released by the next
  1638. * task (which is an invalid locking op but in the case
  1639. * of the scheduler it's an obvious special-case), so we
  1640. * do an early lockdep release here:
  1641. */
  1642. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  1643. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  1644. #endif
  1645. /* Here we just switch the register state and the stack. */
  1646. switch_to(prev, next, prev);
  1647. return prev;
  1648. }
  1649. /*
  1650. * nr_running, nr_uninterruptible and nr_context_switches:
  1651. *
  1652. * externally visible scheduler statistics: current number of runnable
  1653. * threads, current number of uninterruptible-sleeping threads, total
  1654. * number of context switches performed since bootup.
  1655. */
  1656. unsigned long nr_running(void)
  1657. {
  1658. unsigned long i, sum = 0;
  1659. for_each_online_cpu(i)
  1660. sum += cpu_rq(i)->nr_running;
  1661. return sum;
  1662. }
  1663. unsigned long nr_uninterruptible(void)
  1664. {
  1665. unsigned long i, sum = 0;
  1666. for_each_possible_cpu(i)
  1667. sum += cpu_rq(i)->nr_uninterruptible;
  1668. /*
  1669. * Since we read the counters lockless, it might be slightly
  1670. * inaccurate. Do not allow it to go below zero though:
  1671. */
  1672. if (unlikely((long)sum < 0))
  1673. sum = 0;
  1674. return sum;
  1675. }
  1676. unsigned long long nr_context_switches(void)
  1677. {
  1678. int i;
  1679. unsigned long long sum = 0;
  1680. for_each_possible_cpu(i)
  1681. sum += cpu_rq(i)->nr_switches;
  1682. return sum;
  1683. }
  1684. unsigned long nr_iowait(void)
  1685. {
  1686. unsigned long i, sum = 0;
  1687. for_each_possible_cpu(i)
  1688. sum += atomic_read(&cpu_rq(i)->nr_iowait);
  1689. return sum;
  1690. }
  1691. unsigned long nr_active(void)
  1692. {
  1693. unsigned long i, running = 0, uninterruptible = 0;
  1694. for_each_online_cpu(i) {
  1695. running += cpu_rq(i)->nr_running;
  1696. uninterruptible += cpu_rq(i)->nr_uninterruptible;
  1697. }
  1698. if (unlikely((long)uninterruptible < 0))
  1699. uninterruptible = 0;
  1700. return running + uninterruptible;
  1701. }
  1702. #ifdef CONFIG_SMP
  1703. /*
  1704. * Is this task likely cache-hot:
  1705. */
  1706. static inline int
  1707. task_hot(struct task_struct *p, unsigned long long now, struct sched_domain *sd)
  1708. {
  1709. return (long long)(now - p->last_ran) < (long long)sd->cache_hot_time;
  1710. }
  1711. /*
  1712. * double_rq_lock - safely lock two runqueues
  1713. *
  1714. * Note this does not disable interrupts like task_rq_lock,
  1715. * you need to do so manually before calling.
  1716. */
  1717. static void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1718. __acquires(rq1->lock)
  1719. __acquires(rq2->lock)
  1720. {
  1721. BUG_ON(!irqs_disabled());
  1722. if (rq1 == rq2) {
  1723. spin_lock(&rq1->lock);
  1724. __acquire(rq2->lock); /* Fake it out ;) */
  1725. } else {
  1726. if (rq1 < rq2) {
  1727. spin_lock(&rq1->lock);
  1728. spin_lock(&rq2->lock);
  1729. } else {
  1730. spin_lock(&rq2->lock);
  1731. spin_lock(&rq1->lock);
  1732. }
  1733. }
  1734. }
  1735. /*
  1736. * double_rq_unlock - safely unlock two runqueues
  1737. *
  1738. * Note this does not restore interrupts like task_rq_unlock,
  1739. * you need to do so manually after calling.
  1740. */
  1741. static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1742. __releases(rq1->lock)
  1743. __releases(rq2->lock)
  1744. {
  1745. spin_unlock(&rq1->lock);
  1746. if (rq1 != rq2)
  1747. spin_unlock(&rq2->lock);
  1748. else
  1749. __release(rq2->lock);
  1750. }
  1751. /*
  1752. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  1753. */
  1754. static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1755. __releases(this_rq->lock)
  1756. __acquires(busiest->lock)
  1757. __acquires(this_rq->lock)
  1758. {
  1759. if (unlikely(!irqs_disabled())) {
  1760. /* printk() doesn't work good under rq->lock */
  1761. spin_unlock(&this_rq->lock);
  1762. BUG_ON(1);
  1763. }
  1764. if (unlikely(!spin_trylock(&busiest->lock))) {
  1765. if (busiest < this_rq) {
  1766. spin_unlock(&this_rq->lock);
  1767. spin_lock(&busiest->lock);
  1768. spin_lock(&this_rq->lock);
  1769. } else
  1770. spin_lock(&busiest->lock);
  1771. }
  1772. }
  1773. /*
  1774. * If dest_cpu is allowed for this process, migrate the task to it.
  1775. * This is accomplished by forcing the cpu_allowed mask to only
  1776. * allow dest_cpu, which will force the cpu onto dest_cpu. Then
  1777. * the cpu_allowed mask is restored.
  1778. */
  1779. static void sched_migrate_task(struct task_struct *p, int dest_cpu)
  1780. {
  1781. struct migration_req req;
  1782. unsigned long flags;
  1783. struct rq *rq;
  1784. rq = task_rq_lock(p, &flags);
  1785. if (!cpu_isset(dest_cpu, p->cpus_allowed)
  1786. || unlikely(cpu_is_offline(dest_cpu)))
  1787. goto out;
  1788. /* force the process onto the specified CPU */
  1789. if (migrate_task(p, dest_cpu, &req)) {
  1790. /* Need to wait for migration thread (might exit: take ref). */
  1791. struct task_struct *mt = rq->migration_thread;
  1792. get_task_struct(mt);
  1793. task_rq_unlock(rq, &flags);
  1794. wake_up_process(mt);
  1795. put_task_struct(mt);
  1796. wait_for_completion(&req.done);
  1797. return;
  1798. }
  1799. out:
  1800. task_rq_unlock(rq, &flags);
  1801. }
  1802. /*
  1803. * sched_exec - execve() is a valuable balancing opportunity, because at
  1804. * this point the task has the smallest effective memory and cache footprint.
  1805. */
  1806. void sched_exec(void)
  1807. {
  1808. int new_cpu, this_cpu = get_cpu();
  1809. new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
  1810. put_cpu();
  1811. if (new_cpu != this_cpu)
  1812. sched_migrate_task(current, new_cpu);
  1813. }
  1814. /*
  1815. * pull_task - move a task from a remote runqueue to the local runqueue.
  1816. * Both runqueues must be locked.
  1817. */
  1818. static void pull_task(struct rq *src_rq, struct prio_array *src_array,
  1819. struct task_struct *p, struct rq *this_rq,
  1820. struct prio_array *this_array, int this_cpu)
  1821. {
  1822. dequeue_task(p, src_array);
  1823. dec_nr_running(p, src_rq);
  1824. set_task_cpu(p, this_cpu);
  1825. inc_nr_running(p, this_rq);
  1826. enqueue_task(p, this_array);
  1827. p->timestamp = (p->timestamp - src_rq->most_recent_timestamp)
  1828. + this_rq->most_recent_timestamp;
  1829. /*
  1830. * Note that idle threads have a prio of MAX_PRIO, for this test
  1831. * to be always true for them.
  1832. */
  1833. if (TASK_PREEMPTS_CURR(p, this_rq))
  1834. resched_task(this_rq->curr);
  1835. }
  1836. /*
  1837. * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  1838. */
  1839. static
  1840. int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
  1841. struct sched_domain *sd, enum idle_type idle,
  1842. int *all_pinned)
  1843. {
  1844. /*
  1845. * We do not migrate tasks that are:
  1846. * 1) running (obviously), or
  1847. * 2) cannot be migrated to this CPU due to cpus_allowed, or
  1848. * 3) are cache-hot on their current CPU.
  1849. */
  1850. if (!cpu_isset(this_cpu, p->cpus_allowed))
  1851. return 0;
  1852. *all_pinned = 0;
  1853. if (task_running(rq, p))
  1854. return 0;
  1855. /*
  1856. * Aggressive migration if:
  1857. * 1) task is cache cold, or
  1858. * 2) too many balance attempts have failed.
  1859. */
  1860. if (sd->nr_balance_failed > sd->cache_nice_tries) {
  1861. #ifdef CONFIG_SCHEDSTATS
  1862. if (task_hot(p, rq->most_recent_timestamp, sd))
  1863. schedstat_inc(sd, lb_hot_gained[idle]);
  1864. #endif
  1865. return 1;
  1866. }
  1867. if (task_hot(p, rq->most_recent_timestamp, sd))
  1868. return 0;
  1869. return 1;
  1870. }
  1871. #define rq_best_prio(rq) min((rq)->curr->prio, (rq)->best_expired_prio)
  1872. /*
  1873. * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
  1874. * load from busiest to this_rq, as part of a balancing operation within
  1875. * "domain". Returns the number of tasks moved.
  1876. *
  1877. * Called with both runqueues locked.
  1878. */
  1879. static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1880. unsigned long max_nr_move, unsigned long max_load_move,
  1881. struct sched_domain *sd, enum idle_type idle,
  1882. int *all_pinned)
  1883. {
  1884. int idx, pulled = 0, pinned = 0, this_best_prio, best_prio,
  1885. best_prio_seen, skip_for_load;
  1886. struct prio_array *array, *dst_array;
  1887. struct list_head *head, *curr;
  1888. struct task_struct *tmp;
  1889. long rem_load_move;
  1890. if (max_nr_move == 0 || max_load_move == 0)
  1891. goto out;
  1892. rem_load_move = max_load_move;
  1893. pinned = 1;
  1894. this_best_prio = rq_best_prio(this_rq);
  1895. best_prio = rq_best_prio(busiest);
  1896. /*
  1897. * Enable handling of the case where there is more than one task
  1898. * with the best priority. If the current running task is one
  1899. * of those with prio==best_prio we know it won't be moved
  1900. * and therefore it's safe to override the skip (based on load) of
  1901. * any task we find with that prio.
  1902. */
  1903. best_prio_seen = best_prio == busiest->curr->prio;
  1904. /*
  1905. * We first consider expired tasks. Those will likely not be
  1906. * executed in the near future, and they are most likely to
  1907. * be cache-cold, thus switching CPUs has the least effect
  1908. * on them.
  1909. */
  1910. if (busiest->expired->nr_active) {
  1911. array = busiest->expired;
  1912. dst_array = this_rq->expired;
  1913. } else {
  1914. array = busiest->active;
  1915. dst_array = this_rq->active;
  1916. }
  1917. new_array:
  1918. /* Start searching at priority 0: */
  1919. idx = 0;
  1920. skip_bitmap:
  1921. if (!idx)
  1922. idx = sched_find_first_bit(array->bitmap);
  1923. else
  1924. idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
  1925. if (idx >= MAX_PRIO) {
  1926. if (array == busiest->expired && busiest->active->nr_active) {
  1927. array = busiest->active;
  1928. dst_array = this_rq->active;
  1929. goto new_array;
  1930. }
  1931. goto out;
  1932. }
  1933. head = array->queue + idx;
  1934. curr = head->prev;
  1935. skip_queue:
  1936. tmp = list_entry(curr, struct task_struct, run_list);
  1937. curr = curr->prev;
  1938. /*
  1939. * To help distribute high priority tasks accross CPUs we don't
  1940. * skip a task if it will be the highest priority task (i.e. smallest
  1941. * prio value) on its new queue regardless of its load weight
  1942. */
  1943. skip_for_load = tmp->load_weight > rem_load_move;
  1944. if (skip_for_load && idx < this_best_prio)
  1945. skip_for_load = !best_prio_seen && idx == best_prio;
  1946. if (skip_for_load ||
  1947. !can_migrate_task(tmp, busiest, this_cpu, sd, idle, &pinned)) {
  1948. best_prio_seen |= idx == best_prio;
  1949. if (curr != head)
  1950. goto skip_queue;
  1951. idx++;
  1952. goto skip_bitmap;
  1953. }
  1954. pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
  1955. pulled++;
  1956. rem_load_move -= tmp->load_weight;
  1957. /*
  1958. * We only want to steal up to the prescribed number of tasks
  1959. * and the prescribed amount of weighted load.
  1960. */
  1961. if (pulled < max_nr_move && rem_load_move > 0) {
  1962. if (idx < this_best_prio)
  1963. this_best_prio = idx;
  1964. if (curr != head)
  1965. goto skip_queue;
  1966. idx++;
  1967. goto skip_bitmap;
  1968. }
  1969. out:
  1970. /*
  1971. * Right now, this is the only place pull_task() is called,
  1972. * so we can safely collect pull_task() stats here rather than
  1973. * inside pull_task().
  1974. */
  1975. schedstat_add(sd, lb_gained[idle], pulled);
  1976. if (all_pinned)
  1977. *all_pinned = pinned;
  1978. return pulled;
  1979. }
  1980. /*
  1981. * find_busiest_group finds and returns the busiest CPU group within the
  1982. * domain. It calculates and returns the amount of weighted load which
  1983. * should be moved to restore balance via the imbalance parameter.
  1984. */
  1985. static struct sched_group *
  1986. find_busiest_group(struct sched_domain *sd, int this_cpu,
  1987. unsigned long *imbalance, enum idle_type idle, int *sd_idle,
  1988. cpumask_t *cpus, int *balance)
  1989. {
  1990. struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
  1991. unsigned long max_load, avg_load, total_load, this_load, total_pwr;
  1992. unsigned long max_pull;
  1993. unsigned long busiest_load_per_task, busiest_nr_running;
  1994. unsigned long this_load_per_task, this_nr_running;
  1995. int load_idx;
  1996. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  1997. int power_savings_balance = 1;
  1998. unsigned long leader_nr_running = 0, min_load_per_task = 0;
  1999. unsigned long min_nr_running = ULONG_MAX;
  2000. struct sched_group *group_min = NULL, *group_leader = NULL;
  2001. #endif
  2002. max_load = this_load = total_load = total_pwr = 0;
  2003. busiest_load_per_task = busiest_nr_running = 0;
  2004. this_load_per_task = this_nr_running = 0;
  2005. if (idle == NOT_IDLE)
  2006. load_idx = sd->busy_idx;
  2007. else if (idle == NEWLY_IDLE)
  2008. load_idx = sd->newidle_idx;
  2009. else
  2010. load_idx = sd->idle_idx;
  2011. do {
  2012. unsigned long load, group_capacity;
  2013. int local_group;
  2014. int i;
  2015. unsigned int balance_cpu = -1, first_idle_cpu = 0;
  2016. unsigned long sum_nr_running, sum_weighted_load;
  2017. local_group = cpu_isset(this_cpu, group->cpumask);
  2018. if (local_group)
  2019. balance_cpu = first_cpu(group->cpumask);
  2020. /* Tally up the load of all CPUs in the group */
  2021. sum_weighted_load = sum_nr_running = avg_load = 0;
  2022. for_each_cpu_mask(i, group->cpumask) {
  2023. struct rq *rq;
  2024. if (!cpu_isset(i, *cpus))
  2025. continue;
  2026. rq = cpu_rq(i);
  2027. if (*sd_idle && !idle_cpu(i))
  2028. *sd_idle = 0;
  2029. /* Bias balancing toward cpus of our domain */
  2030. if (local_group) {
  2031. if (idle_cpu(i) && !first_idle_cpu) {
  2032. first_idle_cpu = 1;
  2033. balance_cpu = i;
  2034. }
  2035. load = target_load(i, load_idx);
  2036. } else
  2037. load = source_load(i, load_idx);
  2038. avg_load += load;
  2039. sum_nr_running += rq->nr_running;
  2040. sum_weighted_load += rq->raw_weighted_load;
  2041. }
  2042. /*
  2043. * First idle cpu or the first cpu(busiest) in this sched group
  2044. * is eligible for doing load balancing at this and above
  2045. * domains.
  2046. */
  2047. if (local_group && balance_cpu != this_cpu && balance) {
  2048. *balance = 0;
  2049. goto ret;
  2050. }
  2051. total_load += avg_load;
  2052. total_pwr += group->cpu_power;
  2053. /* Adjust by relative CPU power of the group */
  2054. avg_load = (avg_load * SCHED_LOAD_SCALE) / group->cpu_power;
  2055. group_capacity = group->cpu_power / SCHED_LOAD_SCALE;
  2056. if (local_group) {
  2057. this_load = avg_load;
  2058. this = group;
  2059. this_nr_running = sum_nr_running;
  2060. this_load_per_task = sum_weighted_load;
  2061. } else if (avg_load > max_load &&
  2062. sum_nr_running > group_capacity) {
  2063. max_load = avg_load;
  2064. busiest = group;
  2065. busiest_nr_running = sum_nr_running;
  2066. busiest_load_per_task = sum_weighted_load;
  2067. }
  2068. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2069. /*
  2070. * Busy processors will not participate in power savings
  2071. * balance.
  2072. */
  2073. if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
  2074. goto group_next;
  2075. /*
  2076. * If the local group is idle or completely loaded
  2077. * no need to do power savings balance at this domain
  2078. */
  2079. if (local_group && (this_nr_running >= group_capacity ||
  2080. !this_nr_running))
  2081. power_savings_balance = 0;
  2082. /*
  2083. * If a group is already running at full capacity or idle,
  2084. * don't include that group in power savings calculations
  2085. */
  2086. if (!power_savings_balance || sum_nr_running >= group_capacity
  2087. || !sum_nr_running)
  2088. goto group_next;
  2089. /*
  2090. * Calculate the group which has the least non-idle load.
  2091. * This is the group from where we need to pick up the load
  2092. * for saving power
  2093. */
  2094. if ((sum_nr_running < min_nr_running) ||
  2095. (sum_nr_running == min_nr_running &&
  2096. first_cpu(group->cpumask) <
  2097. first_cpu(group_min->cpumask))) {
  2098. group_min = group;
  2099. min_nr_running = sum_nr_running;
  2100. min_load_per_task = sum_weighted_load /
  2101. sum_nr_running;
  2102. }
  2103. /*
  2104. * Calculate the group which is almost near its
  2105. * capacity but still has some space to pick up some load
  2106. * from other group and save more power
  2107. */
  2108. if (sum_nr_running <= group_capacity - 1) {
  2109. if (sum_nr_running > leader_nr_running ||
  2110. (sum_nr_running == leader_nr_running &&
  2111. first_cpu(group->cpumask) >
  2112. first_cpu(group_leader->cpumask))) {
  2113. group_leader = group;
  2114. leader_nr_running = sum_nr_running;
  2115. }
  2116. }
  2117. group_next:
  2118. #endif
  2119. group = group->next;
  2120. } while (group != sd->groups);
  2121. if (!busiest || this_load >= max_load || busiest_nr_running == 0)
  2122. goto out_balanced;
  2123. avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
  2124. if (this_load >= avg_load ||
  2125. 100*max_load <= sd->imbalance_pct*this_load)
  2126. goto out_balanced;
  2127. busiest_load_per_task /= busiest_nr_running;
  2128. /*
  2129. * We're trying to get all the cpus to the average_load, so we don't
  2130. * want to push ourselves above the average load, nor do we wish to
  2131. * reduce the max loaded cpu below the average load, as either of these
  2132. * actions would just result in more rebalancing later, and ping-pong
  2133. * tasks around. Thus we look for the minimum possible imbalance.
  2134. * Negative imbalances (*we* are more loaded than anyone else) will
  2135. * be counted as no imbalance for these purposes -- we can't fix that
  2136. * by pulling tasks to us. Be careful of negative numbers as they'll
  2137. * appear as very large values with unsigned longs.
  2138. */
  2139. if (max_load <= busiest_load_per_task)
  2140. goto out_balanced;
  2141. /*
  2142. * In the presence of smp nice balancing, certain scenarios can have
  2143. * max load less than avg load(as we skip the groups at or below
  2144. * its cpu_power, while calculating max_load..)
  2145. */
  2146. if (max_load < avg_load) {
  2147. *imbalance = 0;
  2148. goto small_imbalance;
  2149. }
  2150. /* Don't want to pull so many tasks that a group would go idle */
  2151. max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
  2152. /* How much load to actually move to equalise the imbalance */
  2153. *imbalance = min(max_pull * busiest->cpu_power,
  2154. (avg_load - this_load) * this->cpu_power)
  2155. / SCHED_LOAD_SCALE;
  2156. /*
  2157. * if *imbalance is less than the average load per runnable task
  2158. * there is no gaurantee that any tasks will be moved so we'll have
  2159. * a think about bumping its value to force at least one task to be
  2160. * moved
  2161. */
  2162. if (*imbalance < busiest_load_per_task) {
  2163. unsigned long tmp, pwr_now, pwr_move;
  2164. unsigned int imbn;
  2165. small_imbalance:
  2166. pwr_move = pwr_now = 0;
  2167. imbn = 2;
  2168. if (this_nr_running) {
  2169. this_load_per_task /= this_nr_running;
  2170. if (busiest_load_per_task > this_load_per_task)
  2171. imbn = 1;
  2172. } else
  2173. this_load_per_task = SCHED_LOAD_SCALE;
  2174. if (max_load - this_load >= busiest_load_per_task * imbn) {
  2175. *imbalance = busiest_load_per_task;
  2176. return busiest;
  2177. }
  2178. /*
  2179. * OK, we don't have enough imbalance to justify moving tasks,
  2180. * however we may be able to increase total CPU power used by
  2181. * moving them.
  2182. */
  2183. pwr_now += busiest->cpu_power *
  2184. min(busiest_load_per_task, max_load);
  2185. pwr_now += this->cpu_power *
  2186. min(this_load_per_task, this_load);
  2187. pwr_now /= SCHED_LOAD_SCALE;
  2188. /* Amount of load we'd subtract */
  2189. tmp = busiest_load_per_task*SCHED_LOAD_SCALE/busiest->cpu_power;
  2190. if (max_load > tmp)
  2191. pwr_move += busiest->cpu_power *
  2192. min(busiest_load_per_task, max_load - tmp);
  2193. /* Amount of load we'd add */
  2194. if (max_load*busiest->cpu_power <
  2195. busiest_load_per_task*SCHED_LOAD_SCALE)
  2196. tmp = max_load*busiest->cpu_power/this->cpu_power;
  2197. else
  2198. tmp = busiest_load_per_task*SCHED_LOAD_SCALE/this->cpu_power;
  2199. pwr_move += this->cpu_power*min(this_load_per_task, this_load + tmp);
  2200. pwr_move /= SCHED_LOAD_SCALE;
  2201. /* Move if we gain throughput */
  2202. if (pwr_move <= pwr_now)
  2203. goto out_balanced;
  2204. *imbalance = busiest_load_per_task;
  2205. }
  2206. return busiest;
  2207. out_balanced:
  2208. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2209. if (idle == NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
  2210. goto ret;
  2211. if (this == group_leader && group_leader != group_min) {
  2212. *imbalance = min_load_per_task;
  2213. return group_min;
  2214. }
  2215. #endif
  2216. ret:
  2217. *imbalance = 0;
  2218. return NULL;
  2219. }
  2220. /*
  2221. * find_busiest_queue - find the busiest runqueue among the cpus in group.
  2222. */
  2223. static struct rq *
  2224. find_busiest_queue(struct sched_group *group, enum idle_type idle,
  2225. unsigned long imbalance, cpumask_t *cpus)
  2226. {
  2227. struct rq *busiest = NULL, *rq;
  2228. unsigned long max_load = 0;
  2229. int i;
  2230. for_each_cpu_mask(i, group->cpumask) {
  2231. if (!cpu_isset(i, *cpus))
  2232. continue;
  2233. rq = cpu_rq(i);
  2234. if (rq->nr_running == 1 && rq->raw_weighted_load > imbalance)
  2235. continue;
  2236. if (rq->raw_weighted_load > max_load) {
  2237. max_load = rq->raw_weighted_load;
  2238. busiest = rq;
  2239. }
  2240. }
  2241. return busiest;
  2242. }
  2243. /*
  2244. * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  2245. * so long as it is large enough.
  2246. */
  2247. #define MAX_PINNED_INTERVAL 512
  2248. static inline unsigned long minus_1_or_zero(unsigned long n)
  2249. {
  2250. return n > 0 ? n - 1 : 0;
  2251. }
  2252. /*
  2253. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  2254. * tasks if there is an imbalance.
  2255. */
  2256. static int load_balance(int this_cpu, struct rq *this_rq,
  2257. struct sched_domain *sd, enum idle_type idle,
  2258. int *balance)
  2259. {
  2260. int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
  2261. struct sched_group *group;
  2262. unsigned long imbalance;
  2263. struct rq *busiest;
  2264. cpumask_t cpus = CPU_MASK_ALL;
  2265. unsigned long flags;
  2266. /*
  2267. * When power savings policy is enabled for the parent domain, idle
  2268. * sibling can pick up load irrespective of busy siblings. In this case,
  2269. * let the state of idle sibling percolate up as IDLE, instead of
  2270. * portraying it as NOT_IDLE.
  2271. */
  2272. if (idle != NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
  2273. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2274. sd_idle = 1;
  2275. schedstat_inc(sd, lb_cnt[idle]);
  2276. redo:
  2277. group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
  2278. &cpus, balance);
  2279. if (*balance == 0)
  2280. goto out_balanced;
  2281. if (!group) {
  2282. schedstat_inc(sd, lb_nobusyg[idle]);
  2283. goto out_balanced;
  2284. }
  2285. busiest = find_busiest_queue(group, idle, imbalance, &cpus);
  2286. if (!busiest) {
  2287. schedstat_inc(sd, lb_nobusyq[idle]);
  2288. goto out_balanced;
  2289. }
  2290. BUG_ON(busiest == this_rq);
  2291. schedstat_add(sd, lb_imbalance[idle], imbalance);
  2292. nr_moved = 0;
  2293. if (busiest->nr_running > 1) {
  2294. /*
  2295. * Attempt to move tasks. If find_busiest_group has found
  2296. * an imbalance but busiest->nr_running <= 1, the group is
  2297. * still unbalanced. nr_moved simply stays zero, so it is
  2298. * correctly treated as an imbalance.
  2299. */
  2300. local_irq_save(flags);
  2301. double_rq_lock(this_rq, busiest);
  2302. nr_moved = move_tasks(this_rq, this_cpu, busiest,
  2303. minus_1_or_zero(busiest->nr_running),
  2304. imbalance, sd, idle, &all_pinned);
  2305. double_rq_unlock(this_rq, busiest);
  2306. local_irq_restore(flags);
  2307. /* All tasks on this runqueue were pinned by CPU affinity */
  2308. if (unlikely(all_pinned)) {
  2309. cpu_clear(cpu_of(busiest), cpus);
  2310. if (!cpus_empty(cpus))
  2311. goto redo;
  2312. goto out_balanced;
  2313. }
  2314. }
  2315. if (!nr_moved) {
  2316. schedstat_inc(sd, lb_failed[idle]);
  2317. sd->nr_balance_failed++;
  2318. if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
  2319. spin_lock_irqsave(&busiest->lock, flags);
  2320. /* don't kick the migration_thread, if the curr
  2321. * task on busiest cpu can't be moved to this_cpu
  2322. */
  2323. if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
  2324. spin_unlock_irqrestore(&busiest->lock, flags);
  2325. all_pinned = 1;
  2326. goto out_one_pinned;
  2327. }
  2328. if (!busiest->active_balance) {
  2329. busiest->active_balance = 1;
  2330. busiest->push_cpu = this_cpu;
  2331. active_balance = 1;
  2332. }
  2333. spin_unlock_irqrestore(&busiest->lock, flags);
  2334. if (active_balance)
  2335. wake_up_process(busiest->migration_thread);
  2336. /*
  2337. * We've kicked active balancing, reset the failure
  2338. * counter.
  2339. */
  2340. sd->nr_balance_failed = sd->cache_nice_tries+1;
  2341. }
  2342. } else
  2343. sd->nr_balance_failed = 0;
  2344. if (likely(!active_balance)) {
  2345. /* We were unbalanced, so reset the balancing interval */
  2346. sd->balance_interval = sd->min_interval;
  2347. } else {
  2348. /*
  2349. * If we've begun active balancing, start to back off. This
  2350. * case may not be covered by the all_pinned logic if there
  2351. * is only 1 task on the busy runqueue (because we don't call
  2352. * move_tasks).
  2353. */
  2354. if (sd->balance_interval < sd->max_interval)
  2355. sd->balance_interval *= 2;
  2356. }
  2357. if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2358. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2359. return -1;
  2360. return nr_moved;
  2361. out_balanced:
  2362. schedstat_inc(sd, lb_balanced[idle]);
  2363. sd->nr_balance_failed = 0;
  2364. out_one_pinned:
  2365. /* tune up the balancing interval */
  2366. if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
  2367. (sd->balance_interval < sd->max_interval))
  2368. sd->balance_interval *= 2;
  2369. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2370. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2371. return -1;
  2372. return 0;
  2373. }
  2374. /*
  2375. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  2376. * tasks if there is an imbalance.
  2377. *
  2378. * Called from schedule when this_rq is about to become idle (NEWLY_IDLE).
  2379. * this_rq is locked.
  2380. */
  2381. static int
  2382. load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
  2383. {
  2384. struct sched_group *group;
  2385. struct rq *busiest = NULL;
  2386. unsigned long imbalance;
  2387. int nr_moved = 0;
  2388. int sd_idle = 0;
  2389. cpumask_t cpus = CPU_MASK_ALL;
  2390. /*
  2391. * When power savings policy is enabled for the parent domain, idle
  2392. * sibling can pick up load irrespective of busy siblings. In this case,
  2393. * let the state of idle sibling percolate up as IDLE, instead of
  2394. * portraying it as NOT_IDLE.
  2395. */
  2396. if (sd->flags & SD_SHARE_CPUPOWER &&
  2397. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2398. sd_idle = 1;
  2399. schedstat_inc(sd, lb_cnt[NEWLY_IDLE]);
  2400. redo:
  2401. group = find_busiest_group(sd, this_cpu, &imbalance, NEWLY_IDLE,
  2402. &sd_idle, &cpus, NULL);
  2403. if (!group) {
  2404. schedstat_inc(sd, lb_nobusyg[NEWLY_IDLE]);
  2405. goto out_balanced;
  2406. }
  2407. busiest = find_busiest_queue(group, NEWLY_IDLE, imbalance,
  2408. &cpus);
  2409. if (!busiest) {
  2410. schedstat_inc(sd, lb_nobusyq[NEWLY_IDLE]);
  2411. goto out_balanced;
  2412. }
  2413. BUG_ON(busiest == this_rq);
  2414. schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance);
  2415. nr_moved = 0;
  2416. if (busiest->nr_running > 1) {
  2417. /* Attempt to move tasks */
  2418. double_lock_balance(this_rq, busiest);
  2419. nr_moved = move_tasks(this_rq, this_cpu, busiest,
  2420. minus_1_or_zero(busiest->nr_running),
  2421. imbalance, sd, NEWLY_IDLE, NULL);
  2422. spin_unlock(&busiest->lock);
  2423. if (!nr_moved) {
  2424. cpu_clear(cpu_of(busiest), cpus);
  2425. if (!cpus_empty(cpus))
  2426. goto redo;
  2427. }
  2428. }
  2429. if (!nr_moved) {
  2430. schedstat_inc(sd, lb_failed[NEWLY_IDLE]);
  2431. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2432. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2433. return -1;
  2434. } else
  2435. sd->nr_balance_failed = 0;
  2436. return nr_moved;
  2437. out_balanced:
  2438. schedstat_inc(sd, lb_balanced[NEWLY_IDLE]);
  2439. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2440. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2441. return -1;
  2442. sd->nr_balance_failed = 0;
  2443. return 0;
  2444. }
  2445. /*
  2446. * idle_balance is called by schedule() if this_cpu is about to become
  2447. * idle. Attempts to pull tasks from other CPUs.
  2448. */
  2449. static void idle_balance(int this_cpu, struct rq *this_rq)
  2450. {
  2451. struct sched_domain *sd;
  2452. int pulled_task = 0;
  2453. unsigned long next_balance = jiffies + 60 * HZ;
  2454. for_each_domain(this_cpu, sd) {
  2455. if (sd->flags & SD_BALANCE_NEWIDLE) {
  2456. /* If we've pulled tasks over stop searching: */
  2457. pulled_task = load_balance_newidle(this_cpu,
  2458. this_rq, sd);
  2459. if (time_after(next_balance,
  2460. sd->last_balance + sd->balance_interval))
  2461. next_balance = sd->last_balance
  2462. + sd->balance_interval;
  2463. if (pulled_task)
  2464. break;
  2465. }
  2466. }
  2467. if (!pulled_task)
  2468. /*
  2469. * We are going idle. next_balance may be set based on
  2470. * a busy processor. So reset next_balance.
  2471. */
  2472. this_rq->next_balance = next_balance;
  2473. }
  2474. /*
  2475. * active_load_balance is run by migration threads. It pushes running tasks
  2476. * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
  2477. * running on each physical CPU where possible, and avoids physical /
  2478. * logical imbalances.
  2479. *
  2480. * Called with busiest_rq locked.
  2481. */
  2482. static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
  2483. {
  2484. int target_cpu = busiest_rq->push_cpu;
  2485. struct sched_domain *sd;
  2486. struct rq *target_rq;
  2487. /* Is there any task to move? */
  2488. if (busiest_rq->nr_running <= 1)
  2489. return;
  2490. target_rq = cpu_rq(target_cpu);
  2491. /*
  2492. * This condition is "impossible", if it occurs
  2493. * we need to fix it. Originally reported by
  2494. * Bjorn Helgaas on a 128-cpu setup.
  2495. */
  2496. BUG_ON(busiest_rq == target_rq);
  2497. /* move a task from busiest_rq to target_rq */
  2498. double_lock_balance(busiest_rq, target_rq);
  2499. /* Search for an sd spanning us and the target CPU. */
  2500. for_each_domain(target_cpu, sd) {
  2501. if ((sd->flags & SD_LOAD_BALANCE) &&
  2502. cpu_isset(busiest_cpu, sd->span))
  2503. break;
  2504. }
  2505. if (likely(sd)) {
  2506. schedstat_inc(sd, alb_cnt);
  2507. if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
  2508. RTPRIO_TO_LOAD_WEIGHT(100), sd, SCHED_IDLE,
  2509. NULL))
  2510. schedstat_inc(sd, alb_pushed);
  2511. else
  2512. schedstat_inc(sd, alb_failed);
  2513. }
  2514. spin_unlock(&target_rq->lock);
  2515. }
  2516. static void update_load(struct rq *this_rq)
  2517. {
  2518. unsigned long this_load;
  2519. int i, scale;
  2520. this_load = this_rq->raw_weighted_load;
  2521. /* Update our load: */
  2522. for (i = 0, scale = 1; i < 3; i++, scale <<= 1) {
  2523. unsigned long old_load, new_load;
  2524. old_load = this_rq->cpu_load[i];
  2525. new_load = this_load;
  2526. /*
  2527. * Round up the averaging division if load is increasing. This
  2528. * prevents us from getting stuck on 9 if the load is 10, for
  2529. * example.
  2530. */
  2531. if (new_load > old_load)
  2532. new_load += scale-1;
  2533. this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale;
  2534. }
  2535. }
  2536. /*
  2537. * run_rebalance_domains is triggered when needed from the scheduler tick.
  2538. *
  2539. * It checks each scheduling domain to see if it is due to be balanced,
  2540. * and initiates a balancing operation if so.
  2541. *
  2542. * Balancing parameters are set up in arch_init_sched_domains.
  2543. */
  2544. static DEFINE_SPINLOCK(balancing);
  2545. static void run_rebalance_domains(struct softirq_action *h)
  2546. {
  2547. int this_cpu = smp_processor_id(), balance = 1;
  2548. struct rq *this_rq = cpu_rq(this_cpu);
  2549. unsigned long interval;
  2550. struct sched_domain *sd;
  2551. /*
  2552. * We are idle if there are no processes running. This
  2553. * is valid even if we are the idle process (SMT).
  2554. */
  2555. enum idle_type idle = !this_rq->nr_running ?
  2556. SCHED_IDLE : NOT_IDLE;
  2557. /* Earliest time when we have to call run_rebalance_domains again */
  2558. unsigned long next_balance = jiffies + 60*HZ;
  2559. for_each_domain(this_cpu, sd) {
  2560. if (!(sd->flags & SD_LOAD_BALANCE))
  2561. continue;
  2562. interval = sd->balance_interval;
  2563. if (idle != SCHED_IDLE)
  2564. interval *= sd->busy_factor;
  2565. /* scale ms to jiffies */
  2566. interval = msecs_to_jiffies(interval);
  2567. if (unlikely(!interval))
  2568. interval = 1;
  2569. if (sd->flags & SD_SERIALIZE) {
  2570. if (!spin_trylock(&balancing))
  2571. goto out;
  2572. }
  2573. if (time_after_eq(jiffies, sd->last_balance + interval)) {
  2574. if (load_balance(this_cpu, this_rq, sd, idle, &balance)) {
  2575. /*
  2576. * We've pulled tasks over so either we're no
  2577. * longer idle, or one of our SMT siblings is
  2578. * not idle.
  2579. */
  2580. idle = NOT_IDLE;
  2581. }
  2582. sd->last_balance = jiffies;
  2583. }
  2584. if (sd->flags & SD_SERIALIZE)
  2585. spin_unlock(&balancing);
  2586. out:
  2587. if (time_after(next_balance, sd->last_balance + interval))
  2588. next_balance = sd->last_balance + interval;
  2589. /*
  2590. * Stop the load balance at this level. There is another
  2591. * CPU in our sched group which is doing load balancing more
  2592. * actively.
  2593. */
  2594. if (!balance)
  2595. break;
  2596. }
  2597. this_rq->next_balance = next_balance;
  2598. }
  2599. #else
  2600. /*
  2601. * on UP we do not need to balance between CPUs:
  2602. */
  2603. static inline void idle_balance(int cpu, struct rq *rq)
  2604. {
  2605. }
  2606. #endif
  2607. static inline void wake_priority_sleeper(struct rq *rq)
  2608. {
  2609. #ifdef CONFIG_SCHED_SMT
  2610. if (!rq->nr_running)
  2611. return;
  2612. spin_lock(&rq->lock);
  2613. /*
  2614. * If an SMT sibling task has been put to sleep for priority
  2615. * reasons reschedule the idle task to see if it can now run.
  2616. */
  2617. if (rq->nr_running)
  2618. resched_task(rq->idle);
  2619. spin_unlock(&rq->lock);
  2620. #endif
  2621. }
  2622. DEFINE_PER_CPU(struct kernel_stat, kstat);
  2623. EXPORT_PER_CPU_SYMBOL(kstat);
  2624. /*
  2625. * This is called on clock ticks and on context switches.
  2626. * Bank in p->sched_time the ns elapsed since the last tick or switch.
  2627. */
  2628. static inline void
  2629. update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now)
  2630. {
  2631. p->sched_time += now - p->last_ran;
  2632. p->last_ran = rq->most_recent_timestamp = now;
  2633. }
  2634. /*
  2635. * Return current->sched_time plus any more ns on the sched_clock
  2636. * that have not yet been banked.
  2637. */
  2638. unsigned long long current_sched_time(const struct task_struct *p)
  2639. {
  2640. unsigned long long ns;
  2641. unsigned long flags;
  2642. local_irq_save(flags);
  2643. ns = p->sched_time + sched_clock() - p->last_ran;
  2644. local_irq_restore(flags);
  2645. return ns;
  2646. }
  2647. /*
  2648. * We place interactive tasks back into the active array, if possible.
  2649. *
  2650. * To guarantee that this does not starve expired tasks we ignore the
  2651. * interactivity of a task if the first expired task had to wait more
  2652. * than a 'reasonable' amount of time. This deadline timeout is
  2653. * load-dependent, as the frequency of array switched decreases with
  2654. * increasing number of running tasks. We also ignore the interactivity
  2655. * if a better static_prio task has expired:
  2656. */
  2657. static inline int expired_starving(struct rq *rq)
  2658. {
  2659. if (rq->curr->static_prio > rq->best_expired_prio)
  2660. return 1;
  2661. if (!STARVATION_LIMIT || !rq->expired_timestamp)
  2662. return 0;
  2663. if (jiffies - rq->expired_timestamp > STARVATION_LIMIT * rq->nr_running)
  2664. return 1;
  2665. return 0;
  2666. }
  2667. /*
  2668. * Account user cpu time to a process.
  2669. * @p: the process that the cpu time gets accounted to
  2670. * @hardirq_offset: the offset to subtract from hardirq_count()
  2671. * @cputime: the cpu time spent in user space since the last update
  2672. */
  2673. void account_user_time(struct task_struct *p, cputime_t cputime)
  2674. {
  2675. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2676. cputime64_t tmp;
  2677. p->utime = cputime_add(p->utime, cputime);
  2678. /* Add user time to cpustat. */
  2679. tmp = cputime_to_cputime64(cputime);
  2680. if (TASK_NICE(p) > 0)
  2681. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  2682. else
  2683. cpustat->user = cputime64_add(cpustat->user, tmp);
  2684. }
  2685. /*
  2686. * Account system cpu time to a process.
  2687. * @p: the process that the cpu time gets accounted to
  2688. * @hardirq_offset: the offset to subtract from hardirq_count()
  2689. * @cputime: the cpu time spent in kernel space since the last update
  2690. */
  2691. void account_system_time(struct task_struct *p, int hardirq_offset,
  2692. cputime_t cputime)
  2693. {
  2694. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2695. struct rq *rq = this_rq();
  2696. cputime64_t tmp;
  2697. p->stime = cputime_add(p->stime, cputime);
  2698. /* Add system time to cpustat. */
  2699. tmp = cputime_to_cputime64(cputime);
  2700. if (hardirq_count() - hardirq_offset)
  2701. cpustat->irq = cputime64_add(cpustat->irq, tmp);
  2702. else if (softirq_count())
  2703. cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
  2704. else if (p != rq->idle)
  2705. cpustat->system = cputime64_add(cpustat->system, tmp);
  2706. else if (atomic_read(&rq->nr_iowait) > 0)
  2707. cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
  2708. else
  2709. cpustat->idle = cputime64_add(cpustat->idle, tmp);
  2710. /* Account for system time used */
  2711. acct_update_integrals(p);
  2712. }
  2713. /*
  2714. * Account for involuntary wait time.
  2715. * @p: the process from which the cpu time has been stolen
  2716. * @steal: the cpu time spent in involuntary wait
  2717. */
  2718. void account_steal_time(struct task_struct *p, cputime_t steal)
  2719. {
  2720. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2721. cputime64_t tmp = cputime_to_cputime64(steal);
  2722. struct rq *rq = this_rq();
  2723. if (p == rq->idle) {
  2724. p->stime = cputime_add(p->stime, steal);
  2725. if (atomic_read(&rq->nr_iowait) > 0)
  2726. cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
  2727. else
  2728. cpustat->idle = cputime64_add(cpustat->idle, tmp);
  2729. } else
  2730. cpustat->steal = cputime64_add(cpustat->steal, tmp);
  2731. }
  2732. static void task_running_tick(struct rq *rq, struct task_struct *p)
  2733. {
  2734. if (p->array != rq->active) {
  2735. /* Task has expired but was not scheduled yet */
  2736. set_tsk_need_resched(p);
  2737. return;
  2738. }
  2739. spin_lock(&rq->lock);
  2740. /*
  2741. * The task was running during this tick - update the
  2742. * time slice counter. Note: we do not update a thread's
  2743. * priority until it either goes to sleep or uses up its
  2744. * timeslice. This makes it possible for interactive tasks
  2745. * to use up their timeslices at their highest priority levels.
  2746. */
  2747. if (rt_task(p)) {
  2748. /*
  2749. * RR tasks need a special form of timeslice management.
  2750. * FIFO tasks have no timeslices.
  2751. */
  2752. if ((p->policy == SCHED_RR) && !--p->time_slice) {
  2753. p->time_slice = task_timeslice(p);
  2754. p->first_time_slice = 0;
  2755. set_tsk_need_resched(p);
  2756. /* put it at the end of the queue: */
  2757. requeue_task(p, rq->active);
  2758. }
  2759. goto out_unlock;
  2760. }
  2761. if (!--p->time_slice) {
  2762. dequeue_task(p, rq->active);
  2763. set_tsk_need_resched(p);
  2764. p->prio = effective_prio(p);
  2765. p->time_slice = task_timeslice(p);
  2766. p->first_time_slice = 0;
  2767. if (!rq->expired_timestamp)
  2768. rq->expired_timestamp = jiffies;
  2769. if (!TASK_INTERACTIVE(p) || expired_starving(rq)) {
  2770. enqueue_task(p, rq->expired);
  2771. if (p->static_prio < rq->best_expired_prio)
  2772. rq->best_expired_prio = p->static_prio;
  2773. } else
  2774. enqueue_task(p, rq->active);
  2775. } else {
  2776. /*
  2777. * Prevent a too long timeslice allowing a task to monopolize
  2778. * the CPU. We do this by splitting up the timeslice into
  2779. * smaller pieces.
  2780. *
  2781. * Note: this does not mean the task's timeslices expire or
  2782. * get lost in any way, they just might be preempted by
  2783. * another task of equal priority. (one with higher
  2784. * priority would have preempted this task already.) We
  2785. * requeue this task to the end of the list on this priority
  2786. * level, which is in essence a round-robin of tasks with
  2787. * equal priority.
  2788. *
  2789. * This only applies to tasks in the interactive
  2790. * delta range with at least TIMESLICE_GRANULARITY to requeue.
  2791. */
  2792. if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
  2793. p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
  2794. (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
  2795. (p->array == rq->active)) {
  2796. requeue_task(p, rq->active);
  2797. set_tsk_need_resched(p);
  2798. }
  2799. }
  2800. out_unlock:
  2801. spin_unlock(&rq->lock);
  2802. }
  2803. /*
  2804. * This function gets called by the timer code, with HZ frequency.
  2805. * We call it with interrupts disabled.
  2806. *
  2807. * It also gets called by the fork code, when changing the parent's
  2808. * timeslices.
  2809. */
  2810. void scheduler_tick(void)
  2811. {
  2812. unsigned long long now = sched_clock();
  2813. struct task_struct *p = current;
  2814. int cpu = smp_processor_id();
  2815. struct rq *rq = cpu_rq(cpu);
  2816. update_cpu_clock(p, rq, now);
  2817. if (p == rq->idle)
  2818. /* Task on the idle queue */
  2819. wake_priority_sleeper(rq);
  2820. else
  2821. task_running_tick(rq, p);
  2822. #ifdef CONFIG_SMP
  2823. update_load(rq);
  2824. if (time_after_eq(jiffies, rq->next_balance))
  2825. raise_softirq(SCHED_SOFTIRQ);
  2826. #endif
  2827. }
  2828. #ifdef CONFIG_SCHED_SMT
  2829. static inline void wakeup_busy_runqueue(struct rq *rq)
  2830. {
  2831. /* If an SMT runqueue is sleeping due to priority reasons wake it up */
  2832. if (rq->curr == rq->idle && rq->nr_running)
  2833. resched_task(rq->idle);
  2834. }
  2835. /*
  2836. * Called with interrupt disabled and this_rq's runqueue locked.
  2837. */
  2838. static void wake_sleeping_dependent(int this_cpu)
  2839. {
  2840. struct sched_domain *tmp, *sd = NULL;
  2841. int i;
  2842. for_each_domain(this_cpu, tmp) {
  2843. if (tmp->flags & SD_SHARE_CPUPOWER) {
  2844. sd = tmp;
  2845. break;
  2846. }
  2847. }
  2848. if (!sd)
  2849. return;
  2850. for_each_cpu_mask(i, sd->span) {
  2851. struct rq *smt_rq = cpu_rq(i);
  2852. if (i == this_cpu)
  2853. continue;
  2854. if (unlikely(!spin_trylock(&smt_rq->lock)))
  2855. continue;
  2856. wakeup_busy_runqueue(smt_rq);
  2857. spin_unlock(&smt_rq->lock);
  2858. }
  2859. }
  2860. /*
  2861. * number of 'lost' timeslices this task wont be able to fully
  2862. * utilize, if another task runs on a sibling. This models the
  2863. * slowdown effect of other tasks running on siblings:
  2864. */
  2865. static inline unsigned long
  2866. smt_slice(struct task_struct *p, struct sched_domain *sd)
  2867. {
  2868. return p->time_slice * (100 - sd->per_cpu_gain) / 100;
  2869. }
  2870. /*
  2871. * To minimise lock contention and not have to drop this_rq's runlock we only
  2872. * trylock the sibling runqueues and bypass those runqueues if we fail to
  2873. * acquire their lock. As we only trylock the normal locking order does not
  2874. * need to be obeyed.
  2875. */
  2876. static int
  2877. dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
  2878. {
  2879. struct sched_domain *tmp, *sd = NULL;
  2880. int ret = 0, i;
  2881. /* kernel/rt threads do not participate in dependent sleeping */
  2882. if (!p->mm || rt_task(p))
  2883. return 0;
  2884. for_each_domain(this_cpu, tmp) {
  2885. if (tmp->flags & SD_SHARE_CPUPOWER) {
  2886. sd = tmp;
  2887. break;
  2888. }
  2889. }
  2890. if (!sd)
  2891. return 0;
  2892. for_each_cpu_mask(i, sd->span) {
  2893. struct task_struct *smt_curr;
  2894. struct rq *smt_rq;
  2895. if (i == this_cpu)
  2896. continue;
  2897. smt_rq = cpu_rq(i);
  2898. if (unlikely(!spin_trylock(&smt_rq->lock)))
  2899. continue;
  2900. smt_curr = smt_rq->curr;
  2901. if (!smt_curr->mm)
  2902. goto unlock;
  2903. /*
  2904. * If a user task with lower static priority than the
  2905. * running task on the SMT sibling is trying to schedule,
  2906. * delay it till there is proportionately less timeslice
  2907. * left of the sibling task to prevent a lower priority
  2908. * task from using an unfair proportion of the
  2909. * physical cpu's resources. -ck
  2910. */
  2911. if (rt_task(smt_curr)) {
  2912. /*
  2913. * With real time tasks we run non-rt tasks only
  2914. * per_cpu_gain% of the time.
  2915. */
  2916. if ((jiffies % DEF_TIMESLICE) >
  2917. (sd->per_cpu_gain * DEF_TIMESLICE / 100))
  2918. ret = 1;
  2919. } else {
  2920. if (smt_curr->static_prio < p->static_prio &&
  2921. !TASK_PREEMPTS_CURR(p, smt_rq) &&
  2922. smt_slice(smt_curr, sd) > task_timeslice(p))
  2923. ret = 1;
  2924. }
  2925. unlock:
  2926. spin_unlock(&smt_rq->lock);
  2927. }
  2928. return ret;
  2929. }
  2930. #else
  2931. static inline void wake_sleeping_dependent(int this_cpu)
  2932. {
  2933. }
  2934. static inline int
  2935. dependent_sleeper(int this_cpu, struct rq *this_rq, struct task_struct *p)
  2936. {
  2937. return 0;
  2938. }
  2939. #endif
  2940. #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
  2941. void fastcall add_preempt_count(int val)
  2942. {
  2943. /*
  2944. * Underflow?
  2945. */
  2946. if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  2947. return;
  2948. preempt_count() += val;
  2949. /*
  2950. * Spinlock count overflowing soon?
  2951. */
  2952. DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
  2953. }
  2954. EXPORT_SYMBOL(add_preempt_count);
  2955. void fastcall sub_preempt_count(int val)
  2956. {
  2957. /*
  2958. * Underflow?
  2959. */
  2960. if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  2961. return;
  2962. /*
  2963. * Is the spinlock portion underflowing?
  2964. */
  2965. if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  2966. !(preempt_count() & PREEMPT_MASK)))
  2967. return;
  2968. preempt_count() -= val;
  2969. }
  2970. EXPORT_SYMBOL(sub_preempt_count);
  2971. #endif
  2972. static inline int interactive_sleep(enum sleep_type sleep_type)
  2973. {
  2974. return (sleep_type == SLEEP_INTERACTIVE ||
  2975. sleep_type == SLEEP_INTERRUPTED);
  2976. }
  2977. /*
  2978. * schedule() is the main scheduler function.
  2979. */
  2980. asmlinkage void __sched schedule(void)
  2981. {
  2982. struct task_struct *prev, *next;
  2983. struct prio_array *array;
  2984. struct list_head *queue;
  2985. unsigned long long now;
  2986. unsigned long run_time;
  2987. int cpu, idx, new_prio;
  2988. long *switch_count;
  2989. struct rq *rq;
  2990. /*
  2991. * Test if we are atomic. Since do_exit() needs to call into
  2992. * schedule() atomically, we ignore that path for now.
  2993. * Otherwise, whine if we are scheduling when we should not be.
  2994. */
  2995. if (unlikely(in_atomic() && !current->exit_state)) {
  2996. printk(KERN_ERR "BUG: scheduling while atomic: "
  2997. "%s/0x%08x/%d\n",
  2998. current->comm, preempt_count(), current->pid);
  2999. debug_show_held_locks(current);
  3000. dump_stack();
  3001. }
  3002. profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  3003. need_resched:
  3004. preempt_disable();
  3005. prev = current;
  3006. release_kernel_lock(prev);
  3007. need_resched_nonpreemptible:
  3008. rq = this_rq();
  3009. /*
  3010. * The idle thread is not allowed to schedule!
  3011. * Remove this check after it has been exercised a bit.
  3012. */
  3013. if (unlikely(prev == rq->idle) && prev->state != TASK_RUNNING) {
  3014. printk(KERN_ERR "bad: scheduling from the idle thread!\n");
  3015. dump_stack();
  3016. }
  3017. schedstat_inc(rq, sched_cnt);
  3018. now = sched_clock();
  3019. if (likely((long long)(now - prev->timestamp) < NS_MAX_SLEEP_AVG)) {
  3020. run_time = now - prev->timestamp;
  3021. if (unlikely((long long)(now - prev->timestamp) < 0))
  3022. run_time = 0;
  3023. } else
  3024. run_time = NS_MAX_SLEEP_AVG;
  3025. /*
  3026. * Tasks charged proportionately less run_time at high sleep_avg to
  3027. * delay them losing their interactive status
  3028. */
  3029. run_time /= (CURRENT_BONUS(prev) ? : 1);
  3030. spin_lock_irq(&rq->lock);
  3031. switch_count = &prev->nivcsw;
  3032. if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  3033. switch_count = &prev->nvcsw;
  3034. if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
  3035. unlikely(signal_pending(prev))))
  3036. prev->state = TASK_RUNNING;
  3037. else {
  3038. if (prev->state == TASK_UNINTERRUPTIBLE)
  3039. rq->nr_uninterruptible++;
  3040. deactivate_task(prev, rq);
  3041. }
  3042. }
  3043. cpu = smp_processor_id();
  3044. if (unlikely(!rq->nr_running)) {
  3045. idle_balance(cpu, rq);
  3046. if (!rq->nr_running) {
  3047. next = rq->idle;
  3048. rq->expired_timestamp = 0;
  3049. wake_sleeping_dependent(cpu);
  3050. goto switch_tasks;
  3051. }
  3052. }
  3053. array = rq->active;
  3054. if (unlikely(!array->nr_active)) {
  3055. /*
  3056. * Switch the active and expired arrays.
  3057. */
  3058. schedstat_inc(rq, sched_switch);
  3059. rq->active = rq->expired;
  3060. rq->expired = array;
  3061. array = rq->active;
  3062. rq->expired_timestamp = 0;
  3063. rq->best_expired_prio = MAX_PRIO;
  3064. }
  3065. idx = sched_find_first_bit(array->bitmap);
  3066. queue = array->queue + idx;
  3067. next = list_entry(queue->next, struct task_struct, run_list);
  3068. if (!rt_task(next) && interactive_sleep(next->sleep_type)) {
  3069. unsigned long long delta = now - next->timestamp;
  3070. if (unlikely((long long)(now - next->timestamp) < 0))
  3071. delta = 0;
  3072. if (next->sleep_type == SLEEP_INTERACTIVE)
  3073. delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
  3074. array = next->array;
  3075. new_prio = recalc_task_prio(next, next->timestamp + delta);
  3076. if (unlikely(next->prio != new_prio)) {
  3077. dequeue_task(next, array);
  3078. next->prio = new_prio;
  3079. enqueue_task(next, array);
  3080. }
  3081. }
  3082. next->sleep_type = SLEEP_NORMAL;
  3083. if (dependent_sleeper(cpu, rq, next))
  3084. next = rq->idle;
  3085. switch_tasks:
  3086. if (next == rq->idle)
  3087. schedstat_inc(rq, sched_goidle);
  3088. prefetch(next);
  3089. prefetch_stack(next);
  3090. clear_tsk_need_resched(prev);
  3091. rcu_qsctr_inc(task_cpu(prev));
  3092. update_cpu_clock(prev, rq, now);
  3093. prev->sleep_avg -= run_time;
  3094. if ((long)prev->sleep_avg <= 0)
  3095. prev->sleep_avg = 0;
  3096. prev->timestamp = prev->last_ran = now;
  3097. sched_info_switch(prev, next);
  3098. if (likely(prev != next)) {
  3099. next->timestamp = now;
  3100. rq->nr_switches++;
  3101. rq->curr = next;
  3102. ++*switch_count;
  3103. prepare_task_switch(rq, next);
  3104. prev = context_switch(rq, prev, next);
  3105. barrier();
  3106. /*
  3107. * this_rq must be evaluated again because prev may have moved
  3108. * CPUs since it called schedule(), thus the 'rq' on its stack
  3109. * frame will be invalid.
  3110. */
  3111. finish_task_switch(this_rq(), prev);
  3112. } else
  3113. spin_unlock_irq(&rq->lock);
  3114. prev = current;
  3115. if (unlikely(reacquire_kernel_lock(prev) < 0))
  3116. goto need_resched_nonpreemptible;
  3117. preempt_enable_no_resched();
  3118. if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  3119. goto need_resched;
  3120. }
  3121. EXPORT_SYMBOL(schedule);
  3122. #ifdef CONFIG_PREEMPT
  3123. /*
  3124. * this is the entry point to schedule() from in-kernel preemption
  3125. * off of preempt_enable. Kernel preemptions off return from interrupt
  3126. * occur there and call schedule directly.
  3127. */
  3128. asmlinkage void __sched preempt_schedule(void)
  3129. {
  3130. struct thread_info *ti = current_thread_info();
  3131. #ifdef CONFIG_PREEMPT_BKL
  3132. struct task_struct *task = current;
  3133. int saved_lock_depth;
  3134. #endif
  3135. /*
  3136. * If there is a non-zero preempt_count or interrupts are disabled,
  3137. * we do not want to preempt the current task. Just return..
  3138. */
  3139. if (likely(ti->preempt_count || irqs_disabled()))
  3140. return;
  3141. need_resched:
  3142. add_preempt_count(PREEMPT_ACTIVE);
  3143. /*
  3144. * We keep the big kernel semaphore locked, but we
  3145. * clear ->lock_depth so that schedule() doesnt
  3146. * auto-release the semaphore:
  3147. */
  3148. #ifdef CONFIG_PREEMPT_BKL
  3149. saved_lock_depth = task->lock_depth;
  3150. task->lock_depth = -1;
  3151. #endif
  3152. schedule();
  3153. #ifdef CONFIG_PREEMPT_BKL
  3154. task->lock_depth = saved_lock_depth;
  3155. #endif
  3156. sub_preempt_count(PREEMPT_ACTIVE);
  3157. /* we could miss a preemption opportunity between schedule and now */
  3158. barrier();
  3159. if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  3160. goto need_resched;
  3161. }
  3162. EXPORT_SYMBOL(preempt_schedule);
  3163. /*
  3164. * this is the entry point to schedule() from kernel preemption
  3165. * off of irq context.
  3166. * Note, that this is called and return with irqs disabled. This will
  3167. * protect us against recursive calling from irq.
  3168. */
  3169. asmlinkage void __sched preempt_schedule_irq(void)
  3170. {
  3171. struct thread_info *ti = current_thread_info();
  3172. #ifdef CONFIG_PREEMPT_BKL
  3173. struct task_struct *task = current;
  3174. int saved_lock_depth;
  3175. #endif
  3176. /* Catch callers which need to be fixed */
  3177. BUG_ON(ti->preempt_count || !irqs_disabled());
  3178. need_resched:
  3179. add_preempt_count(PREEMPT_ACTIVE);
  3180. /*
  3181. * We keep the big kernel semaphore locked, but we
  3182. * clear ->lock_depth so that schedule() doesnt
  3183. * auto-release the semaphore:
  3184. */
  3185. #ifdef CONFIG_PREEMPT_BKL
  3186. saved_lock_depth = task->lock_depth;
  3187. task->lock_depth = -1;
  3188. #endif
  3189. local_irq_enable();
  3190. schedule();
  3191. local_irq_disable();
  3192. #ifdef CONFIG_PREEMPT_BKL
  3193. task->lock_depth = saved_lock_depth;
  3194. #endif
  3195. sub_preempt_count(PREEMPT_ACTIVE);
  3196. /* we could miss a preemption opportunity between schedule and now */
  3197. barrier();
  3198. if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  3199. goto need_resched;
  3200. }
  3201. #endif /* CONFIG_PREEMPT */
  3202. int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
  3203. void *key)
  3204. {
  3205. return try_to_wake_up(curr->private, mode, sync);
  3206. }
  3207. EXPORT_SYMBOL(default_wake_function);
  3208. /*
  3209. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  3210. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  3211. * number) then we wake all the non-exclusive tasks and one exclusive task.
  3212. *
  3213. * There are circumstances in which we can try to wake a task which has already
  3214. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  3215. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  3216. */
  3217. static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  3218. int nr_exclusive, int sync, void *key)
  3219. {
  3220. struct list_head *tmp, *next;
  3221. list_for_each_safe(tmp, next, &q->task_list) {
  3222. wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
  3223. unsigned flags = curr->flags;
  3224. if (curr->func(curr, mode, sync, key) &&
  3225. (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  3226. break;
  3227. }
  3228. }
  3229. /**
  3230. * __wake_up - wake up threads blocked on a waitqueue.
  3231. * @q: the waitqueue
  3232. * @mode: which threads
  3233. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3234. * @key: is directly passed to the wakeup function
  3235. */
  3236. void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
  3237. int nr_exclusive, void *key)
  3238. {
  3239. unsigned long flags;
  3240. spin_lock_irqsave(&q->lock, flags);
  3241. __wake_up_common(q, mode, nr_exclusive, 0, key);
  3242. spin_unlock_irqrestore(&q->lock, flags);
  3243. }
  3244. EXPORT_SYMBOL(__wake_up);
  3245. /*
  3246. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  3247. */
  3248. void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  3249. {
  3250. __wake_up_common(q, mode, 1, 0, NULL);
  3251. }
  3252. /**
  3253. * __wake_up_sync - wake up threads blocked on a waitqueue.
  3254. * @q: the waitqueue
  3255. * @mode: which threads
  3256. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3257. *
  3258. * The sync wakeup differs that the waker knows that it will schedule
  3259. * away soon, so while the target thread will be woken up, it will not
  3260. * be migrated to another CPU - ie. the two threads are 'synchronized'
  3261. * with each other. This can prevent needless bouncing between CPUs.
  3262. *
  3263. * On UP it can prevent extra preemption.
  3264. */
  3265. void fastcall
  3266. __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  3267. {
  3268. unsigned long flags;
  3269. int sync = 1;
  3270. if (unlikely(!q))
  3271. return;
  3272. if (unlikely(!nr_exclusive))
  3273. sync = 0;
  3274. spin_lock_irqsave(&q->lock, flags);
  3275. __wake_up_common(q, mode, nr_exclusive, sync, NULL);
  3276. spin_unlock_irqrestore(&q->lock, flags);
  3277. }
  3278. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  3279. void fastcall complete(struct completion *x)
  3280. {
  3281. unsigned long flags;
  3282. spin_lock_irqsave(&x->wait.lock, flags);
  3283. x->done++;
  3284. __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
  3285. 1, 0, NULL);
  3286. spin_unlock_irqrestore(&x->wait.lock, flags);
  3287. }
  3288. EXPORT_SYMBOL(complete);
  3289. void fastcall complete_all(struct completion *x)
  3290. {
  3291. unsigned long flags;
  3292. spin_lock_irqsave(&x->wait.lock, flags);
  3293. x->done += UINT_MAX/2;
  3294. __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
  3295. 0, 0, NULL);
  3296. spin_unlock_irqrestore(&x->wait.lock, flags);
  3297. }
  3298. EXPORT_SYMBOL(complete_all);
  3299. void fastcall __sched wait_for_completion(struct completion *x)
  3300. {
  3301. might_sleep();
  3302. spin_lock_irq(&x->wait.lock);
  3303. if (!x->done) {
  3304. DECLARE_WAITQUEUE(wait, current);
  3305. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3306. __add_wait_queue_tail(&x->wait, &wait);
  3307. do {
  3308. __set_current_state(TASK_UNINTERRUPTIBLE);
  3309. spin_unlock_irq(&x->wait.lock);
  3310. schedule();
  3311. spin_lock_irq(&x->wait.lock);
  3312. } while (!x->done);
  3313. __remove_wait_queue(&x->wait, &wait);
  3314. }
  3315. x->done--;
  3316. spin_unlock_irq(&x->wait.lock);
  3317. }
  3318. EXPORT_SYMBOL(wait_for_completion);
  3319. unsigned long fastcall __sched
  3320. wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  3321. {
  3322. might_sleep();
  3323. spin_lock_irq(&x->wait.lock);
  3324. if (!x->done) {
  3325. DECLARE_WAITQUEUE(wait, current);
  3326. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3327. __add_wait_queue_tail(&x->wait, &wait);
  3328. do {
  3329. __set_current_state(TASK_UNINTERRUPTIBLE);
  3330. spin_unlock_irq(&x->wait.lock);
  3331. timeout = schedule_timeout(timeout);
  3332. spin_lock_irq(&x->wait.lock);
  3333. if (!timeout) {
  3334. __remove_wait_queue(&x->wait, &wait);
  3335. goto out;
  3336. }
  3337. } while (!x->done);
  3338. __remove_wait_queue(&x->wait, &wait);
  3339. }
  3340. x->done--;
  3341. out:
  3342. spin_unlock_irq(&x->wait.lock);
  3343. return timeout;
  3344. }
  3345. EXPORT_SYMBOL(wait_for_completion_timeout);
  3346. int fastcall __sched wait_for_completion_interruptible(struct completion *x)
  3347. {
  3348. int ret = 0;
  3349. might_sleep();
  3350. spin_lock_irq(&x->wait.lock);
  3351. if (!x->done) {
  3352. DECLARE_WAITQUEUE(wait, current);
  3353. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3354. __add_wait_queue_tail(&x->wait, &wait);
  3355. do {
  3356. if (signal_pending(current)) {
  3357. ret = -ERESTARTSYS;
  3358. __remove_wait_queue(&x->wait, &wait);
  3359. goto out;
  3360. }
  3361. __set_current_state(TASK_INTERRUPTIBLE);
  3362. spin_unlock_irq(&x->wait.lock);
  3363. schedule();
  3364. spin_lock_irq(&x->wait.lock);
  3365. } while (!x->done);
  3366. __remove_wait_queue(&x->wait, &wait);
  3367. }
  3368. x->done--;
  3369. out:
  3370. spin_unlock_irq(&x->wait.lock);
  3371. return ret;
  3372. }
  3373. EXPORT_SYMBOL(wait_for_completion_interruptible);
  3374. unsigned long fastcall __sched
  3375. wait_for_completion_interruptible_timeout(struct completion *x,
  3376. unsigned long timeout)
  3377. {
  3378. might_sleep();
  3379. spin_lock_irq(&x->wait.lock);
  3380. if (!x->done) {
  3381. DECLARE_WAITQUEUE(wait, current);
  3382. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3383. __add_wait_queue_tail(&x->wait, &wait);
  3384. do {
  3385. if (signal_pending(current)) {
  3386. timeout = -ERESTARTSYS;
  3387. __remove_wait_queue(&x->wait, &wait);
  3388. goto out;
  3389. }
  3390. __set_current_state(TASK_INTERRUPTIBLE);
  3391. spin_unlock_irq(&x->wait.lock);
  3392. timeout = schedule_timeout(timeout);
  3393. spin_lock_irq(&x->wait.lock);
  3394. if (!timeout) {
  3395. __remove_wait_queue(&x->wait, &wait);
  3396. goto out;
  3397. }
  3398. } while (!x->done);
  3399. __remove_wait_queue(&x->wait, &wait);
  3400. }
  3401. x->done--;
  3402. out:
  3403. spin_unlock_irq(&x->wait.lock);
  3404. return timeout;
  3405. }
  3406. EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  3407. #define SLEEP_ON_VAR \
  3408. unsigned long flags; \
  3409. wait_queue_t wait; \
  3410. init_waitqueue_entry(&wait, current);
  3411. #define SLEEP_ON_HEAD \
  3412. spin_lock_irqsave(&q->lock,flags); \
  3413. __add_wait_queue(q, &wait); \
  3414. spin_unlock(&q->lock);
  3415. #define SLEEP_ON_TAIL \
  3416. spin_lock_irq(&q->lock); \
  3417. __remove_wait_queue(q, &wait); \
  3418. spin_unlock_irqrestore(&q->lock, flags);
  3419. void fastcall __sched interruptible_sleep_on(wait_queue_head_t *q)
  3420. {
  3421. SLEEP_ON_VAR
  3422. current->state = TASK_INTERRUPTIBLE;
  3423. SLEEP_ON_HEAD
  3424. schedule();
  3425. SLEEP_ON_TAIL
  3426. }
  3427. EXPORT_SYMBOL(interruptible_sleep_on);
  3428. long fastcall __sched
  3429. interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3430. {
  3431. SLEEP_ON_VAR
  3432. current->state = TASK_INTERRUPTIBLE;
  3433. SLEEP_ON_HEAD
  3434. timeout = schedule_timeout(timeout);
  3435. SLEEP_ON_TAIL
  3436. return timeout;
  3437. }
  3438. EXPORT_SYMBOL(interruptible_sleep_on_timeout);
  3439. void fastcall __sched sleep_on(wait_queue_head_t *q)
  3440. {
  3441. SLEEP_ON_VAR
  3442. current->state = TASK_UNINTERRUPTIBLE;
  3443. SLEEP_ON_HEAD
  3444. schedule();
  3445. SLEEP_ON_TAIL
  3446. }
  3447. EXPORT_SYMBOL(sleep_on);
  3448. long fastcall __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3449. {
  3450. SLEEP_ON_VAR
  3451. current->state = TASK_UNINTERRUPTIBLE;
  3452. SLEEP_ON_HEAD
  3453. timeout = schedule_timeout(timeout);
  3454. SLEEP_ON_TAIL
  3455. return timeout;
  3456. }
  3457. EXPORT_SYMBOL(sleep_on_timeout);
  3458. #ifdef CONFIG_RT_MUTEXES
  3459. /*
  3460. * rt_mutex_setprio - set the current priority of a task
  3461. * @p: task
  3462. * @prio: prio value (kernel-internal form)
  3463. *
  3464. * This function changes the 'effective' priority of a task. It does
  3465. * not touch ->normal_prio like __setscheduler().
  3466. *
  3467. * Used by the rt_mutex code to implement priority inheritance logic.
  3468. */
  3469. void rt_mutex_setprio(struct task_struct *p, int prio)
  3470. {
  3471. struct prio_array *array;
  3472. unsigned long flags;
  3473. struct rq *rq;
  3474. int oldprio;
  3475. BUG_ON(prio < 0 || prio > MAX_PRIO);
  3476. rq = task_rq_lock(p, &flags);
  3477. oldprio = p->prio;
  3478. array = p->array;
  3479. if (array)
  3480. dequeue_task(p, array);
  3481. p->prio = prio;
  3482. if (array) {
  3483. /*
  3484. * If changing to an RT priority then queue it
  3485. * in the active array!
  3486. */
  3487. if (rt_task(p))
  3488. array = rq->active;
  3489. enqueue_task(p, array);
  3490. /*
  3491. * Reschedule if we are currently running on this runqueue and
  3492. * our priority decreased, or if we are not currently running on
  3493. * this runqueue and our priority is higher than the current's
  3494. */
  3495. if (task_running(rq, p)) {
  3496. if (p->prio > oldprio)
  3497. resched_task(rq->curr);
  3498. } else if (TASK_PREEMPTS_CURR(p, rq))
  3499. resched_task(rq->curr);
  3500. }
  3501. task_rq_unlock(rq, &flags);
  3502. }
  3503. #endif
  3504. void set_user_nice(struct task_struct *p, long nice)
  3505. {
  3506. struct prio_array *array;
  3507. int old_prio, delta;
  3508. unsigned long flags;
  3509. struct rq *rq;
  3510. if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  3511. return;
  3512. /*
  3513. * We have to be careful, if called from sys_setpriority(),
  3514. * the task might be in the middle of scheduling on another CPU.
  3515. */
  3516. rq = task_rq_lock(p, &flags);
  3517. /*
  3518. * The RT priorities are set via sched_setscheduler(), but we still
  3519. * allow the 'normal' nice value to be set - but as expected
  3520. * it wont have any effect on scheduling until the task is
  3521. * not SCHED_NORMAL/SCHED_BATCH:
  3522. */
  3523. if (has_rt_policy(p)) {
  3524. p->static_prio = NICE_TO_PRIO(nice);
  3525. goto out_unlock;
  3526. }
  3527. array = p->array;
  3528. if (array) {
  3529. dequeue_task(p, array);
  3530. dec_raw_weighted_load(rq, p);
  3531. }
  3532. p->static_prio = NICE_TO_PRIO(nice);
  3533. set_load_weight(p);
  3534. old_prio = p->prio;
  3535. p->prio = effective_prio(p);
  3536. delta = p->prio - old_prio;
  3537. if (array) {
  3538. enqueue_task(p, array);
  3539. inc_raw_weighted_load(rq, p);
  3540. /*
  3541. * If the task increased its priority or is running and
  3542. * lowered its priority, then reschedule its CPU:
  3543. */
  3544. if (delta < 0 || (delta > 0 && task_running(rq, p)))
  3545. resched_task(rq->curr);
  3546. }
  3547. out_unlock:
  3548. task_rq_unlock(rq, &flags);
  3549. }
  3550. EXPORT_SYMBOL(set_user_nice);
  3551. /*
  3552. * can_nice - check if a task can reduce its nice value
  3553. * @p: task
  3554. * @nice: nice value
  3555. */
  3556. int can_nice(const struct task_struct *p, const int nice)
  3557. {
  3558. /* convert nice value [19,-20] to rlimit style value [1,40] */
  3559. int nice_rlim = 20 - nice;
  3560. return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
  3561. capable(CAP_SYS_NICE));
  3562. }
  3563. #ifdef __ARCH_WANT_SYS_NICE
  3564. /*
  3565. * sys_nice - change the priority of the current process.
  3566. * @increment: priority increment
  3567. *
  3568. * sys_setpriority is a more generic, but much slower function that
  3569. * does similar things.
  3570. */
  3571. asmlinkage long sys_nice(int increment)
  3572. {
  3573. long nice, retval;
  3574. /*
  3575. * Setpriority might change our priority at the same moment.
  3576. * We don't have to worry. Conceptually one call occurs first
  3577. * and we have a single winner.
  3578. */
  3579. if (increment < -40)
  3580. increment = -40;
  3581. if (increment > 40)
  3582. increment = 40;
  3583. nice = PRIO_TO_NICE(current->static_prio) + increment;
  3584. if (nice < -20)
  3585. nice = -20;
  3586. if (nice > 19)
  3587. nice = 19;
  3588. if (increment < 0 && !can_nice(current, nice))
  3589. return -EPERM;
  3590. retval = security_task_setnice(current, nice);
  3591. if (retval)
  3592. return retval;
  3593. set_user_nice(current, nice);
  3594. return 0;
  3595. }
  3596. #endif
  3597. /**
  3598. * task_prio - return the priority value of a given task.
  3599. * @p: the task in question.
  3600. *
  3601. * This is the priority value as seen by users in /proc.
  3602. * RT tasks are offset by -200. Normal tasks are centered
  3603. * around 0, value goes from -16 to +15.
  3604. */
  3605. int task_prio(const struct task_struct *p)
  3606. {
  3607. return p->prio - MAX_RT_PRIO;
  3608. }
  3609. /**
  3610. * task_nice - return the nice value of a given task.
  3611. * @p: the task in question.
  3612. */
  3613. int task_nice(const struct task_struct *p)
  3614. {
  3615. return TASK_NICE(p);
  3616. }
  3617. EXPORT_SYMBOL_GPL(task_nice);
  3618. /**
  3619. * idle_cpu - is a given cpu idle currently?
  3620. * @cpu: the processor in question.
  3621. */
  3622. int idle_cpu(int cpu)
  3623. {
  3624. return cpu_curr(cpu) == cpu_rq(cpu)->idle;
  3625. }
  3626. /**
  3627. * idle_task - return the idle task for a given cpu.
  3628. * @cpu: the processor in question.
  3629. */
  3630. struct task_struct *idle_task(int cpu)
  3631. {
  3632. return cpu_rq(cpu)->idle;
  3633. }
  3634. /**
  3635. * find_process_by_pid - find a process with a matching PID value.
  3636. * @pid: the pid in question.
  3637. */
  3638. static inline struct task_struct *find_process_by_pid(pid_t pid)
  3639. {
  3640. return pid ? find_task_by_pid(pid) : current;
  3641. }
  3642. /* Actually do priority change: must hold rq lock. */
  3643. static void __setscheduler(struct task_struct *p, int policy, int prio)
  3644. {
  3645. BUG_ON(p->array);
  3646. p->policy = policy;
  3647. p->rt_priority = prio;
  3648. p->normal_prio = normal_prio(p);
  3649. /* we are holding p->pi_lock already */
  3650. p->prio = rt_mutex_getprio(p);
  3651. /*
  3652. * SCHED_BATCH tasks are treated as perpetual CPU hogs:
  3653. */
  3654. if (policy == SCHED_BATCH)
  3655. p->sleep_avg = 0;
  3656. set_load_weight(p);
  3657. }
  3658. /**
  3659. * sched_setscheduler - change the scheduling policy and/or RT priority of
  3660. * a thread.
  3661. * @p: the task in question.
  3662. * @policy: new policy.
  3663. * @param: structure containing the new RT priority.
  3664. *
  3665. * NOTE: the task may be already dead
  3666. */
  3667. int sched_setscheduler(struct task_struct *p, int policy,
  3668. struct sched_param *param)
  3669. {
  3670. int retval, oldprio, oldpolicy = -1;
  3671. struct prio_array *array;
  3672. unsigned long flags;
  3673. struct rq *rq;
  3674. /* may grab non-irq protected spin_locks */
  3675. BUG_ON(in_interrupt());
  3676. recheck:
  3677. /* double check policy once rq lock held */
  3678. if (policy < 0)
  3679. policy = oldpolicy = p->policy;
  3680. else if (policy != SCHED_FIFO && policy != SCHED_RR &&
  3681. policy != SCHED_NORMAL && policy != SCHED_BATCH)
  3682. return -EINVAL;
  3683. /*
  3684. * Valid priorities for SCHED_FIFO and SCHED_RR are
  3685. * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and
  3686. * SCHED_BATCH is 0.
  3687. */
  3688. if (param->sched_priority < 0 ||
  3689. (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
  3690. (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
  3691. return -EINVAL;
  3692. if (is_rt_policy(policy) != (param->sched_priority != 0))
  3693. return -EINVAL;
  3694. /*
  3695. * Allow unprivileged RT tasks to decrease priority:
  3696. */
  3697. if (!capable(CAP_SYS_NICE)) {
  3698. if (is_rt_policy(policy)) {
  3699. unsigned long rlim_rtprio;
  3700. unsigned long flags;
  3701. if (!lock_task_sighand(p, &flags))
  3702. return -ESRCH;
  3703. rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
  3704. unlock_task_sighand(p, &flags);
  3705. /* can't set/change the rt policy */
  3706. if (policy != p->policy && !rlim_rtprio)
  3707. return -EPERM;
  3708. /* can't increase priority */
  3709. if (param->sched_priority > p->rt_priority &&
  3710. param->sched_priority > rlim_rtprio)
  3711. return -EPERM;
  3712. }
  3713. /* can't change other user's priorities */
  3714. if ((current->euid != p->euid) &&
  3715. (current->euid != p->uid))
  3716. return -EPERM;
  3717. }
  3718. retval = security_task_setscheduler(p, policy, param);
  3719. if (retval)
  3720. return retval;
  3721. /*
  3722. * make sure no PI-waiters arrive (or leave) while we are
  3723. * changing the priority of the task:
  3724. */
  3725. spin_lock_irqsave(&p->pi_lock, flags);
  3726. /*
  3727. * To be able to change p->policy safely, the apropriate
  3728. * runqueue lock must be held.
  3729. */
  3730. rq = __task_rq_lock(p);
  3731. /* recheck policy now with rq lock held */
  3732. if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
  3733. policy = oldpolicy = -1;
  3734. __task_rq_unlock(rq);
  3735. spin_unlock_irqrestore(&p->pi_lock, flags);
  3736. goto recheck;
  3737. }
  3738. array = p->array;
  3739. if (array)
  3740. deactivate_task(p, rq);
  3741. oldprio = p->prio;
  3742. __setscheduler(p, policy, param->sched_priority);
  3743. if (array) {
  3744. __activate_task(p, rq);
  3745. /*
  3746. * Reschedule if we are currently running on this runqueue and
  3747. * our priority decreased, or if we are not currently running on
  3748. * this runqueue and our priority is higher than the current's
  3749. */
  3750. if (task_running(rq, p)) {
  3751. if (p->prio > oldprio)
  3752. resched_task(rq->curr);
  3753. } else if (TASK_PREEMPTS_CURR(p, rq))
  3754. resched_task(rq->curr);
  3755. }
  3756. __task_rq_unlock(rq);
  3757. spin_unlock_irqrestore(&p->pi_lock, flags);
  3758. rt_mutex_adjust_pi(p);
  3759. return 0;
  3760. }
  3761. EXPORT_SYMBOL_GPL(sched_setscheduler);
  3762. static int
  3763. do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  3764. {
  3765. struct sched_param lparam;
  3766. struct task_struct *p;
  3767. int retval;
  3768. if (!param || pid < 0)
  3769. return -EINVAL;
  3770. if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
  3771. return -EFAULT;
  3772. rcu_read_lock();
  3773. retval = -ESRCH;
  3774. p = find_process_by_pid(pid);
  3775. if (p != NULL)
  3776. retval = sched_setscheduler(p, policy, &lparam);
  3777. rcu_read_unlock();
  3778. return retval;
  3779. }
  3780. /**
  3781. * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  3782. * @pid: the pid in question.
  3783. * @policy: new policy.
  3784. * @param: structure containing the new RT priority.
  3785. */
  3786. asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
  3787. struct sched_param __user *param)
  3788. {
  3789. /* negative values for policy are not valid */
  3790. if (policy < 0)
  3791. return -EINVAL;
  3792. return do_sched_setscheduler(pid, policy, param);
  3793. }
  3794. /**
  3795. * sys_sched_setparam - set/change the RT priority of a thread
  3796. * @pid: the pid in question.
  3797. * @param: structure containing the new RT priority.
  3798. */
  3799. asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
  3800. {
  3801. return do_sched_setscheduler(pid, -1, param);
  3802. }
  3803. /**
  3804. * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  3805. * @pid: the pid in question.
  3806. */
  3807. asmlinkage long sys_sched_getscheduler(pid_t pid)
  3808. {
  3809. struct task_struct *p;
  3810. int retval = -EINVAL;
  3811. if (pid < 0)
  3812. goto out_nounlock;
  3813. retval = -ESRCH;
  3814. read_lock(&tasklist_lock);
  3815. p = find_process_by_pid(pid);
  3816. if (p) {
  3817. retval = security_task_getscheduler(p);
  3818. if (!retval)
  3819. retval = p->policy;
  3820. }
  3821. read_unlock(&tasklist_lock);
  3822. out_nounlock:
  3823. return retval;
  3824. }
  3825. /**
  3826. * sys_sched_getscheduler - get the RT priority of a thread
  3827. * @pid: the pid in question.
  3828. * @param: structure containing the RT priority.
  3829. */
  3830. asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
  3831. {
  3832. struct sched_param lp;
  3833. struct task_struct *p;
  3834. int retval = -EINVAL;
  3835. if (!param || pid < 0)
  3836. goto out_nounlock;
  3837. read_lock(&tasklist_lock);
  3838. p = find_process_by_pid(pid);
  3839. retval = -ESRCH;
  3840. if (!p)
  3841. goto out_unlock;
  3842. retval = security_task_getscheduler(p);
  3843. if (retval)
  3844. goto out_unlock;
  3845. lp.sched_priority = p->rt_priority;
  3846. read_unlock(&tasklist_lock);
  3847. /*
  3848. * This one might sleep, we cannot do it with a spinlock held ...
  3849. */
  3850. retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
  3851. out_nounlock:
  3852. return retval;
  3853. out_unlock:
  3854. read_unlock(&tasklist_lock);
  3855. return retval;
  3856. }
  3857. long sched_setaffinity(pid_t pid, cpumask_t new_mask)
  3858. {
  3859. cpumask_t cpus_allowed;
  3860. struct task_struct *p;
  3861. int retval;
  3862. lock_cpu_hotplug();
  3863. read_lock(&tasklist_lock);
  3864. p = find_process_by_pid(pid);
  3865. if (!p) {
  3866. read_unlock(&tasklist_lock);
  3867. unlock_cpu_hotplug();
  3868. return -ESRCH;
  3869. }
  3870. /*
  3871. * It is not safe to call set_cpus_allowed with the
  3872. * tasklist_lock held. We will bump the task_struct's
  3873. * usage count and then drop tasklist_lock.
  3874. */
  3875. get_task_struct(p);
  3876. read_unlock(&tasklist_lock);
  3877. retval = -EPERM;
  3878. if ((current->euid != p->euid) && (current->euid != p->uid) &&
  3879. !capable(CAP_SYS_NICE))
  3880. goto out_unlock;
  3881. retval = security_task_setscheduler(p, 0, NULL);
  3882. if (retval)
  3883. goto out_unlock;
  3884. cpus_allowed = cpuset_cpus_allowed(p);
  3885. cpus_and(new_mask, new_mask, cpus_allowed);
  3886. retval = set_cpus_allowed(p, new_mask);
  3887. out_unlock:
  3888. put_task_struct(p);
  3889. unlock_cpu_hotplug();
  3890. return retval;
  3891. }
  3892. static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  3893. cpumask_t *new_mask)
  3894. {
  3895. if (len < sizeof(cpumask_t)) {
  3896. memset(new_mask, 0, sizeof(cpumask_t));
  3897. } else if (len > sizeof(cpumask_t)) {
  3898. len = sizeof(cpumask_t);
  3899. }
  3900. return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
  3901. }
  3902. /**
  3903. * sys_sched_setaffinity - set the cpu affinity of a process
  3904. * @pid: pid of the process
  3905. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  3906. * @user_mask_ptr: user-space pointer to the new cpu mask
  3907. */
  3908. asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
  3909. unsigned long __user *user_mask_ptr)
  3910. {
  3911. cpumask_t new_mask;
  3912. int retval;
  3913. retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
  3914. if (retval)
  3915. return retval;
  3916. return sched_setaffinity(pid, new_mask);
  3917. }
  3918. /*
  3919. * Represents all cpu's present in the system
  3920. * In systems capable of hotplug, this map could dynamically grow
  3921. * as new cpu's are detected in the system via any platform specific
  3922. * method, such as ACPI for e.g.
  3923. */
  3924. cpumask_t cpu_present_map __read_mostly;
  3925. EXPORT_SYMBOL(cpu_present_map);
  3926. #ifndef CONFIG_SMP
  3927. cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
  3928. EXPORT_SYMBOL(cpu_online_map);
  3929. cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
  3930. EXPORT_SYMBOL(cpu_possible_map);
  3931. #endif
  3932. long sched_getaffinity(pid_t pid, cpumask_t *mask)
  3933. {
  3934. struct task_struct *p;
  3935. int retval;
  3936. lock_cpu_hotplug();
  3937. read_lock(&tasklist_lock);
  3938. retval = -ESRCH;
  3939. p = find_process_by_pid(pid);
  3940. if (!p)
  3941. goto out_unlock;
  3942. retval = security_task_getscheduler(p);
  3943. if (retval)
  3944. goto out_unlock;
  3945. cpus_and(*mask, p->cpus_allowed, cpu_online_map);
  3946. out_unlock:
  3947. read_unlock(&tasklist_lock);
  3948. unlock_cpu_hotplug();
  3949. if (retval)
  3950. return retval;
  3951. return 0;
  3952. }
  3953. /**
  3954. * sys_sched_getaffinity - get the cpu affinity of a process
  3955. * @pid: pid of the process
  3956. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  3957. * @user_mask_ptr: user-space pointer to hold the current cpu mask
  3958. */
  3959. asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
  3960. unsigned long __user *user_mask_ptr)
  3961. {
  3962. int ret;
  3963. cpumask_t mask;
  3964. if (len < sizeof(cpumask_t))
  3965. return -EINVAL;
  3966. ret = sched_getaffinity(pid, &mask);
  3967. if (ret < 0)
  3968. return ret;
  3969. if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
  3970. return -EFAULT;
  3971. return sizeof(cpumask_t);
  3972. }
  3973. /**
  3974. * sys_sched_yield - yield the current processor to other threads.
  3975. *
  3976. * this function yields the current CPU by moving the calling thread
  3977. * to the expired array. If there are no other threads running on this
  3978. * CPU then this function will return.
  3979. */
  3980. asmlinkage long sys_sched_yield(void)
  3981. {
  3982. struct rq *rq = this_rq_lock();
  3983. struct prio_array *array = current->array, *target = rq->expired;
  3984. schedstat_inc(rq, yld_cnt);
  3985. /*
  3986. * We implement yielding by moving the task into the expired
  3987. * queue.
  3988. *
  3989. * (special rule: RT tasks will just roundrobin in the active
  3990. * array.)
  3991. */
  3992. if (rt_task(current))
  3993. target = rq->active;
  3994. if (array->nr_active == 1) {
  3995. schedstat_inc(rq, yld_act_empty);
  3996. if (!rq->expired->nr_active)
  3997. schedstat_inc(rq, yld_both_empty);
  3998. } else if (!rq->expired->nr_active)
  3999. schedstat_inc(rq, yld_exp_empty);
  4000. if (array != target) {
  4001. dequeue_task(current, array);
  4002. enqueue_task(current, target);
  4003. } else
  4004. /*
  4005. * requeue_task is cheaper so perform that if possible.
  4006. */
  4007. requeue_task(current, array);
  4008. /*
  4009. * Since we are going to call schedule() anyway, there's
  4010. * no need to preempt or enable interrupts:
  4011. */
  4012. __release(rq->lock);
  4013. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  4014. _raw_spin_unlock(&rq->lock);
  4015. preempt_enable_no_resched();
  4016. schedule();
  4017. return 0;
  4018. }
  4019. static inline int __resched_legal(int expected_preempt_count)
  4020. {
  4021. if (unlikely(preempt_count() != expected_preempt_count))
  4022. return 0;
  4023. if (unlikely(system_state != SYSTEM_RUNNING))
  4024. return 0;
  4025. return 1;
  4026. }
  4027. static void __cond_resched(void)
  4028. {
  4029. #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  4030. __might_sleep(__FILE__, __LINE__);
  4031. #endif
  4032. /*
  4033. * The BKS might be reacquired before we have dropped
  4034. * PREEMPT_ACTIVE, which could trigger a second
  4035. * cond_resched() call.
  4036. */
  4037. do {
  4038. add_preempt_count(PREEMPT_ACTIVE);
  4039. schedule();
  4040. sub_preempt_count(PREEMPT_ACTIVE);
  4041. } while (need_resched());
  4042. }
  4043. int __sched cond_resched(void)
  4044. {
  4045. if (need_resched() && __resched_legal(0)) {
  4046. __cond_resched();
  4047. return 1;
  4048. }
  4049. return 0;
  4050. }
  4051. EXPORT_SYMBOL(cond_resched);
  4052. /*
  4053. * cond_resched_lock() - if a reschedule is pending, drop the given lock,
  4054. * call schedule, and on return reacquire the lock.
  4055. *
  4056. * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  4057. * operations here to prevent schedule() from being called twice (once via
  4058. * spin_unlock(), once by hand).
  4059. */
  4060. int cond_resched_lock(spinlock_t *lock)
  4061. {
  4062. int ret = 0;
  4063. if (need_lockbreak(lock)) {
  4064. spin_unlock(lock);
  4065. cpu_relax();
  4066. ret = 1;
  4067. spin_lock(lock);
  4068. }
  4069. if (need_resched() && __resched_legal(1)) {
  4070. spin_release(&lock->dep_map, 1, _THIS_IP_);
  4071. _raw_spin_unlock(lock);
  4072. preempt_enable_no_resched();
  4073. __cond_resched();
  4074. ret = 1;
  4075. spin_lock(lock);
  4076. }
  4077. return ret;
  4078. }
  4079. EXPORT_SYMBOL(cond_resched_lock);
  4080. int __sched cond_resched_softirq(void)
  4081. {
  4082. BUG_ON(!in_softirq());
  4083. if (need_resched() && __resched_legal(0)) {
  4084. raw_local_irq_disable();
  4085. _local_bh_enable();
  4086. raw_local_irq_enable();
  4087. __cond_resched();
  4088. local_bh_disable();
  4089. return 1;
  4090. }
  4091. return 0;
  4092. }
  4093. EXPORT_SYMBOL(cond_resched_softirq);
  4094. /**
  4095. * yield - yield the current processor to other threads.
  4096. *
  4097. * this is a shortcut for kernel-space yielding - it marks the
  4098. * thread runnable and calls sys_sched_yield().
  4099. */
  4100. void __sched yield(void)
  4101. {
  4102. set_current_state(TASK_RUNNING);
  4103. sys_sched_yield();
  4104. }
  4105. EXPORT_SYMBOL(yield);
  4106. /*
  4107. * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  4108. * that process accounting knows that this is a task in IO wait state.
  4109. *
  4110. * But don't do that if it is a deliberate, throttling IO wait (this task
  4111. * has set its backing_dev_info: the queue against which it should throttle)
  4112. */
  4113. void __sched io_schedule(void)
  4114. {
  4115. struct rq *rq = &__raw_get_cpu_var(runqueues);
  4116. delayacct_blkio_start();
  4117. atomic_inc(&rq->nr_iowait);
  4118. schedule();
  4119. atomic_dec(&rq->nr_iowait);
  4120. delayacct_blkio_end();
  4121. }
  4122. EXPORT_SYMBOL(io_schedule);
  4123. long __sched io_schedule_timeout(long timeout)
  4124. {
  4125. struct rq *rq = &__raw_get_cpu_var(runqueues);
  4126. long ret;
  4127. delayacct_blkio_start();
  4128. atomic_inc(&rq->nr_iowait);
  4129. ret = schedule_timeout(timeout);
  4130. atomic_dec(&rq->nr_iowait);
  4131. delayacct_blkio_end();
  4132. return ret;
  4133. }
  4134. /**
  4135. * sys_sched_get_priority_max - return maximum RT priority.
  4136. * @policy: scheduling class.
  4137. *
  4138. * this syscall returns the maximum rt_priority that can be used
  4139. * by a given scheduling class.
  4140. */
  4141. asmlinkage long sys_sched_get_priority_max(int policy)
  4142. {
  4143. int ret = -EINVAL;
  4144. switch (policy) {
  4145. case SCHED_FIFO:
  4146. case SCHED_RR:
  4147. ret = MAX_USER_RT_PRIO-1;
  4148. break;
  4149. case SCHED_NORMAL:
  4150. case SCHED_BATCH:
  4151. ret = 0;
  4152. break;
  4153. }
  4154. return ret;
  4155. }
  4156. /**
  4157. * sys_sched_get_priority_min - return minimum RT priority.
  4158. * @policy: scheduling class.
  4159. *
  4160. * this syscall returns the minimum rt_priority that can be used
  4161. * by a given scheduling class.
  4162. */
  4163. asmlinkage long sys_sched_get_priority_min(int policy)
  4164. {
  4165. int ret = -EINVAL;
  4166. switch (policy) {
  4167. case SCHED_FIFO:
  4168. case SCHED_RR:
  4169. ret = 1;
  4170. break;
  4171. case SCHED_NORMAL:
  4172. case SCHED_BATCH:
  4173. ret = 0;
  4174. }
  4175. return ret;
  4176. }
  4177. /**
  4178. * sys_sched_rr_get_interval - return the default timeslice of a process.
  4179. * @pid: pid of the process.
  4180. * @interval: userspace pointer to the timeslice value.
  4181. *
  4182. * this syscall writes the default timeslice value of a given process
  4183. * into the user-space timespec buffer. A value of '0' means infinity.
  4184. */
  4185. asmlinkage
  4186. long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
  4187. {
  4188. struct task_struct *p;
  4189. int retval = -EINVAL;
  4190. struct timespec t;
  4191. if (pid < 0)
  4192. goto out_nounlock;
  4193. retval = -ESRCH;
  4194. read_lock(&tasklist_lock);
  4195. p = find_process_by_pid(pid);
  4196. if (!p)
  4197. goto out_unlock;
  4198. retval = security_task_getscheduler(p);
  4199. if (retval)
  4200. goto out_unlock;
  4201. jiffies_to_timespec(p->policy == SCHED_FIFO ?
  4202. 0 : task_timeslice(p), &t);
  4203. read_unlock(&tasklist_lock);
  4204. retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  4205. out_nounlock:
  4206. return retval;
  4207. out_unlock:
  4208. read_unlock(&tasklist_lock);
  4209. return retval;
  4210. }
  4211. static inline struct task_struct *eldest_child(struct task_struct *p)
  4212. {
  4213. if (list_empty(&p->children))
  4214. return NULL;
  4215. return list_entry(p->children.next,struct task_struct,sibling);
  4216. }
  4217. static inline struct task_struct *older_sibling(struct task_struct *p)
  4218. {
  4219. if (p->sibling.prev==&p->parent->children)
  4220. return NULL;
  4221. return list_entry(p->sibling.prev,struct task_struct,sibling);
  4222. }
  4223. static inline struct task_struct *younger_sibling(struct task_struct *p)
  4224. {
  4225. if (p->sibling.next==&p->parent->children)
  4226. return NULL;
  4227. return list_entry(p->sibling.next,struct task_struct,sibling);
  4228. }
  4229. static const char stat_nam[] = "RSDTtZX";
  4230. static void show_task(struct task_struct *p)
  4231. {
  4232. struct task_struct *relative;
  4233. unsigned long free = 0;
  4234. unsigned state;
  4235. state = p->state ? __ffs(p->state) + 1 : 0;
  4236. printk("%-13.13s %c", p->comm,
  4237. state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  4238. #if (BITS_PER_LONG == 32)
  4239. if (state == TASK_RUNNING)
  4240. printk(" running ");
  4241. else
  4242. printk(" %08lX ", thread_saved_pc(p));
  4243. #else
  4244. if (state == TASK_RUNNING)
  4245. printk(" running task ");
  4246. else
  4247. printk(" %016lx ", thread_saved_pc(p));
  4248. #endif
  4249. #ifdef CONFIG_DEBUG_STACK_USAGE
  4250. {
  4251. unsigned long *n = end_of_stack(p);
  4252. while (!*n)
  4253. n++;
  4254. free = (unsigned long)n - (unsigned long)end_of_stack(p);
  4255. }
  4256. #endif
  4257. printk("%5lu %5d %6d ", free, p->pid, p->parent->pid);
  4258. if ((relative = eldest_child(p)))
  4259. printk("%5d ", relative->pid);
  4260. else
  4261. printk(" ");
  4262. if ((relative = younger_sibling(p)))
  4263. printk("%7d", relative->pid);
  4264. else
  4265. printk(" ");
  4266. if ((relative = older_sibling(p)))
  4267. printk(" %5d", relative->pid);
  4268. else
  4269. printk(" ");
  4270. if (!p->mm)
  4271. printk(" (L-TLB)\n");
  4272. else
  4273. printk(" (NOTLB)\n");
  4274. if (state != TASK_RUNNING)
  4275. show_stack(p, NULL);
  4276. }
  4277. void show_state_filter(unsigned long state_filter)
  4278. {
  4279. struct task_struct *g, *p;
  4280. #if (BITS_PER_LONG == 32)
  4281. printk("\n"
  4282. " free sibling\n");
  4283. printk(" task PC stack pid father child younger older\n");
  4284. #else
  4285. printk("\n"
  4286. " free sibling\n");
  4287. printk(" task PC stack pid father child younger older\n");
  4288. #endif
  4289. read_lock(&tasklist_lock);
  4290. do_each_thread(g, p) {
  4291. /*
  4292. * reset the NMI-timeout, listing all files on a slow
  4293. * console might take alot of time:
  4294. */
  4295. touch_nmi_watchdog();
  4296. if (p->state & state_filter)
  4297. show_task(p);
  4298. } while_each_thread(g, p);
  4299. read_unlock(&tasklist_lock);
  4300. /*
  4301. * Only show locks if all tasks are dumped:
  4302. */
  4303. if (state_filter == -1)
  4304. debug_show_all_locks();
  4305. }
  4306. /**
  4307. * init_idle - set up an idle thread for a given CPU
  4308. * @idle: task in question
  4309. * @cpu: cpu the idle task belongs to
  4310. *
  4311. * NOTE: this function does not set the idle thread's NEED_RESCHED
  4312. * flag, to make booting more robust.
  4313. */
  4314. void __cpuinit init_idle(struct task_struct *idle, int cpu)
  4315. {
  4316. struct rq *rq = cpu_rq(cpu);
  4317. unsigned long flags;
  4318. idle->timestamp = sched_clock();
  4319. idle->sleep_avg = 0;
  4320. idle->array = NULL;
  4321. idle->prio = idle->normal_prio = MAX_PRIO;
  4322. idle->state = TASK_RUNNING;
  4323. idle->cpus_allowed = cpumask_of_cpu(cpu);
  4324. set_task_cpu(idle, cpu);
  4325. spin_lock_irqsave(&rq->lock, flags);
  4326. rq->curr = rq->idle = idle;
  4327. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  4328. idle->oncpu = 1;
  4329. #endif
  4330. spin_unlock_irqrestore(&rq->lock, flags);
  4331. /* Set the preempt count _outside_ the spinlocks! */
  4332. #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
  4333. task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
  4334. #else
  4335. task_thread_info(idle)->preempt_count = 0;
  4336. #endif
  4337. }
  4338. /*
  4339. * In a system that switches off the HZ timer nohz_cpu_mask
  4340. * indicates which cpus entered this state. This is used
  4341. * in the rcu update to wait only for active cpus. For system
  4342. * which do not switch off the HZ timer nohz_cpu_mask should
  4343. * always be CPU_MASK_NONE.
  4344. */
  4345. cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
  4346. #ifdef CONFIG_SMP
  4347. /*
  4348. * This is how migration works:
  4349. *
  4350. * 1) we queue a struct migration_req structure in the source CPU's
  4351. * runqueue and wake up that CPU's migration thread.
  4352. * 2) we down() the locked semaphore => thread blocks.
  4353. * 3) migration thread wakes up (implicitly it forces the migrated
  4354. * thread off the CPU)
  4355. * 4) it gets the migration request and checks whether the migrated
  4356. * task is still in the wrong runqueue.
  4357. * 5) if it's in the wrong runqueue then the migration thread removes
  4358. * it and puts it into the right queue.
  4359. * 6) migration thread up()s the semaphore.
  4360. * 7) we wake up and the migration is done.
  4361. */
  4362. /*
  4363. * Change a given task's CPU affinity. Migrate the thread to a
  4364. * proper CPU and schedule it away if the CPU it's executing on
  4365. * is removed from the allowed bitmask.
  4366. *
  4367. * NOTE: the caller must have a valid reference to the task, the
  4368. * task must not exit() & deallocate itself prematurely. The
  4369. * call is not atomic; no spinlocks may be held.
  4370. */
  4371. int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
  4372. {
  4373. struct migration_req req;
  4374. unsigned long flags;
  4375. struct rq *rq;
  4376. int ret = 0;
  4377. rq = task_rq_lock(p, &flags);
  4378. if (!cpus_intersects(new_mask, cpu_online_map)) {
  4379. ret = -EINVAL;
  4380. goto out;
  4381. }
  4382. p->cpus_allowed = new_mask;
  4383. /* Can the task run on the task's current CPU? If so, we're done */
  4384. if (cpu_isset(task_cpu(p), new_mask))
  4385. goto out;
  4386. if (migrate_task(p, any_online_cpu(new_mask), &req)) {
  4387. /* Need help from migration thread: drop lock and wait. */
  4388. task_rq_unlock(rq, &flags);
  4389. wake_up_process(rq->migration_thread);
  4390. wait_for_completion(&req.done);
  4391. tlb_migrate_finish(p->mm);
  4392. return 0;
  4393. }
  4394. out:
  4395. task_rq_unlock(rq, &flags);
  4396. return ret;
  4397. }
  4398. EXPORT_SYMBOL_GPL(set_cpus_allowed);
  4399. /*
  4400. * Move (not current) task off this cpu, onto dest cpu. We're doing
  4401. * this because either it can't run here any more (set_cpus_allowed()
  4402. * away from this CPU, or CPU going down), or because we're
  4403. * attempting to rebalance this task on exec (sched_exec).
  4404. *
  4405. * So we race with normal scheduler movements, but that's OK, as long
  4406. * as the task is no longer on this CPU.
  4407. *
  4408. * Returns non-zero if task was successfully migrated.
  4409. */
  4410. static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  4411. {
  4412. struct rq *rq_dest, *rq_src;
  4413. int ret = 0;
  4414. if (unlikely(cpu_is_offline(dest_cpu)))
  4415. return ret;
  4416. rq_src = cpu_rq(src_cpu);
  4417. rq_dest = cpu_rq(dest_cpu);
  4418. double_rq_lock(rq_src, rq_dest);
  4419. /* Already moved. */
  4420. if (task_cpu(p) != src_cpu)
  4421. goto out;
  4422. /* Affinity changed (again). */
  4423. if (!cpu_isset(dest_cpu, p->cpus_allowed))
  4424. goto out;
  4425. set_task_cpu(p, dest_cpu);
  4426. if (p->array) {
  4427. /*
  4428. * Sync timestamp with rq_dest's before activating.
  4429. * The same thing could be achieved by doing this step
  4430. * afterwards, and pretending it was a local activate.
  4431. * This way is cleaner and logically correct.
  4432. */
  4433. p->timestamp = p->timestamp - rq_src->most_recent_timestamp
  4434. + rq_dest->most_recent_timestamp;
  4435. deactivate_task(p, rq_src);
  4436. __activate_task(p, rq_dest);
  4437. if (TASK_PREEMPTS_CURR(p, rq_dest))
  4438. resched_task(rq_dest->curr);
  4439. }
  4440. ret = 1;
  4441. out:
  4442. double_rq_unlock(rq_src, rq_dest);
  4443. return ret;
  4444. }
  4445. /*
  4446. * migration_thread - this is a highprio system thread that performs
  4447. * thread migration by bumping thread off CPU then 'pushing' onto
  4448. * another runqueue.
  4449. */
  4450. static int migration_thread(void *data)
  4451. {
  4452. int cpu = (long)data;
  4453. struct rq *rq;
  4454. rq = cpu_rq(cpu);
  4455. BUG_ON(rq->migration_thread != current);
  4456. set_current_state(TASK_INTERRUPTIBLE);
  4457. while (!kthread_should_stop()) {
  4458. struct migration_req *req;
  4459. struct list_head *head;
  4460. try_to_freeze();
  4461. spin_lock_irq(&rq->lock);
  4462. if (cpu_is_offline(cpu)) {
  4463. spin_unlock_irq(&rq->lock);
  4464. goto wait_to_die;
  4465. }
  4466. if (rq->active_balance) {
  4467. active_load_balance(rq, cpu);
  4468. rq->active_balance = 0;
  4469. }
  4470. head = &rq->migration_queue;
  4471. if (list_empty(head)) {
  4472. spin_unlock_irq(&rq->lock);
  4473. schedule();
  4474. set_current_state(TASK_INTERRUPTIBLE);
  4475. continue;
  4476. }
  4477. req = list_entry(head->next, struct migration_req, list);
  4478. list_del_init(head->next);
  4479. spin_unlock(&rq->lock);
  4480. __migrate_task(req->task, cpu, req->dest_cpu);
  4481. local_irq_enable();
  4482. complete(&req->done);
  4483. }
  4484. __set_current_state(TASK_RUNNING);
  4485. return 0;
  4486. wait_to_die:
  4487. /* Wait for kthread_stop */
  4488. set_current_state(TASK_INTERRUPTIBLE);
  4489. while (!kthread_should_stop()) {
  4490. schedule();
  4491. set_current_state(TASK_INTERRUPTIBLE);
  4492. }
  4493. __set_current_state(TASK_RUNNING);
  4494. return 0;
  4495. }
  4496. #ifdef CONFIG_HOTPLUG_CPU
  4497. /*
  4498. * Figure out where task on dead CPU should go, use force if neccessary.
  4499. * NOTE: interrupts should be disabled by the caller
  4500. */
  4501. static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
  4502. {
  4503. unsigned long flags;
  4504. cpumask_t mask;
  4505. struct rq *rq;
  4506. int dest_cpu;
  4507. restart:
  4508. /* On same node? */
  4509. mask = node_to_cpumask(cpu_to_node(dead_cpu));
  4510. cpus_and(mask, mask, p->cpus_allowed);
  4511. dest_cpu = any_online_cpu(mask);
  4512. /* On any allowed CPU? */
  4513. if (dest_cpu == NR_CPUS)
  4514. dest_cpu = any_online_cpu(p->cpus_allowed);
  4515. /* No more Mr. Nice Guy. */
  4516. if (dest_cpu == NR_CPUS) {
  4517. rq = task_rq_lock(p, &flags);
  4518. cpus_setall(p->cpus_allowed);
  4519. dest_cpu = any_online_cpu(p->cpus_allowed);
  4520. task_rq_unlock(rq, &flags);
  4521. /*
  4522. * Don't tell them about moving exiting tasks or
  4523. * kernel threads (both mm NULL), since they never
  4524. * leave kernel.
  4525. */
  4526. if (p->mm && printk_ratelimit())
  4527. printk(KERN_INFO "process %d (%s) no "
  4528. "longer affine to cpu%d\n",
  4529. p->pid, p->comm, dead_cpu);
  4530. }
  4531. if (!__migrate_task(p, dead_cpu, dest_cpu))
  4532. goto restart;
  4533. }
  4534. /*
  4535. * While a dead CPU has no uninterruptible tasks queued at this point,
  4536. * it might still have a nonzero ->nr_uninterruptible counter, because
  4537. * for performance reasons the counter is not stricly tracking tasks to
  4538. * their home CPUs. So we just add the counter to another CPU's counter,
  4539. * to keep the global sum constant after CPU-down:
  4540. */
  4541. static void migrate_nr_uninterruptible(struct rq *rq_src)
  4542. {
  4543. struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
  4544. unsigned long flags;
  4545. local_irq_save(flags);
  4546. double_rq_lock(rq_src, rq_dest);
  4547. rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
  4548. rq_src->nr_uninterruptible = 0;
  4549. double_rq_unlock(rq_src, rq_dest);
  4550. local_irq_restore(flags);
  4551. }
  4552. /* Run through task list and migrate tasks from the dead cpu. */
  4553. static void migrate_live_tasks(int src_cpu)
  4554. {
  4555. struct task_struct *p, *t;
  4556. write_lock_irq(&tasklist_lock);
  4557. do_each_thread(t, p) {
  4558. if (p == current)
  4559. continue;
  4560. if (task_cpu(p) == src_cpu)
  4561. move_task_off_dead_cpu(src_cpu, p);
  4562. } while_each_thread(t, p);
  4563. write_unlock_irq(&tasklist_lock);
  4564. }
  4565. /* Schedules idle task to be the next runnable task on current CPU.
  4566. * It does so by boosting its priority to highest possible and adding it to
  4567. * the _front_ of the runqueue. Used by CPU offline code.
  4568. */
  4569. void sched_idle_next(void)
  4570. {
  4571. int this_cpu = smp_processor_id();
  4572. struct rq *rq = cpu_rq(this_cpu);
  4573. struct task_struct *p = rq->idle;
  4574. unsigned long flags;
  4575. /* cpu has to be offline */
  4576. BUG_ON(cpu_online(this_cpu));
  4577. /*
  4578. * Strictly not necessary since rest of the CPUs are stopped by now
  4579. * and interrupts disabled on the current cpu.
  4580. */
  4581. spin_lock_irqsave(&rq->lock, flags);
  4582. __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
  4583. /* Add idle task to the _front_ of its priority queue: */
  4584. __activate_idle_task(p, rq);
  4585. spin_unlock_irqrestore(&rq->lock, flags);
  4586. }
  4587. /*
  4588. * Ensures that the idle task is using init_mm right before its cpu goes
  4589. * offline.
  4590. */
  4591. void idle_task_exit(void)
  4592. {
  4593. struct mm_struct *mm = current->active_mm;
  4594. BUG_ON(cpu_online(smp_processor_id()));
  4595. if (mm != &init_mm)
  4596. switch_mm(mm, &init_mm, current);
  4597. mmdrop(mm);
  4598. }
  4599. /* called under rq->lock with disabled interrupts */
  4600. static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
  4601. {
  4602. struct rq *rq = cpu_rq(dead_cpu);
  4603. /* Must be exiting, otherwise would be on tasklist. */
  4604. BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
  4605. /* Cannot have done final schedule yet: would have vanished. */
  4606. BUG_ON(p->state == TASK_DEAD);
  4607. get_task_struct(p);
  4608. /*
  4609. * Drop lock around migration; if someone else moves it,
  4610. * that's OK. No task can be added to this CPU, so iteration is
  4611. * fine.
  4612. * NOTE: interrupts should be left disabled --dev@
  4613. */
  4614. spin_unlock(&rq->lock);
  4615. move_task_off_dead_cpu(dead_cpu, p);
  4616. spin_lock(&rq->lock);
  4617. put_task_struct(p);
  4618. }
  4619. /* release_task() removes task from tasklist, so we won't find dead tasks. */
  4620. static void migrate_dead_tasks(unsigned int dead_cpu)
  4621. {
  4622. struct rq *rq = cpu_rq(dead_cpu);
  4623. unsigned int arr, i;
  4624. for (arr = 0; arr < 2; arr++) {
  4625. for (i = 0; i < MAX_PRIO; i++) {
  4626. struct list_head *list = &rq->arrays[arr].queue[i];
  4627. while (!list_empty(list))
  4628. migrate_dead(dead_cpu, list_entry(list->next,
  4629. struct task_struct, run_list));
  4630. }
  4631. }
  4632. }
  4633. #endif /* CONFIG_HOTPLUG_CPU */
  4634. /*
  4635. * migration_call - callback that gets triggered when a CPU is added.
  4636. * Here we can start up the necessary migration thread for the new CPU.
  4637. */
  4638. static int __cpuinit
  4639. migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  4640. {
  4641. struct task_struct *p;
  4642. int cpu = (long)hcpu;
  4643. unsigned long flags;
  4644. struct rq *rq;
  4645. switch (action) {
  4646. case CPU_UP_PREPARE:
  4647. p = kthread_create(migration_thread, hcpu, "migration/%d",cpu);
  4648. if (IS_ERR(p))
  4649. return NOTIFY_BAD;
  4650. p->flags |= PF_NOFREEZE;
  4651. kthread_bind(p, cpu);
  4652. /* Must be high prio: stop_machine expects to yield to it. */
  4653. rq = task_rq_lock(p, &flags);
  4654. __setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
  4655. task_rq_unlock(rq, &flags);
  4656. cpu_rq(cpu)->migration_thread = p;
  4657. break;
  4658. case CPU_ONLINE:
  4659. /* Strictly unneccessary, as first user will wake it. */
  4660. wake_up_process(cpu_rq(cpu)->migration_thread);
  4661. break;
  4662. #ifdef CONFIG_HOTPLUG_CPU
  4663. case CPU_UP_CANCELED:
  4664. if (!cpu_rq(cpu)->migration_thread)
  4665. break;
  4666. /* Unbind it from offline cpu so it can run. Fall thru. */
  4667. kthread_bind(cpu_rq(cpu)->migration_thread,
  4668. any_online_cpu(cpu_online_map));
  4669. kthread_stop(cpu_rq(cpu)->migration_thread);
  4670. cpu_rq(cpu)->migration_thread = NULL;
  4671. break;
  4672. case CPU_DEAD:
  4673. migrate_live_tasks(cpu);
  4674. rq = cpu_rq(cpu);
  4675. kthread_stop(rq->migration_thread);
  4676. rq->migration_thread = NULL;
  4677. /* Idle task back to normal (off runqueue, low prio) */
  4678. rq = task_rq_lock(rq->idle, &flags);
  4679. deactivate_task(rq->idle, rq);
  4680. rq->idle->static_prio = MAX_PRIO;
  4681. __setscheduler(rq->idle, SCHED_NORMAL, 0);
  4682. migrate_dead_tasks(cpu);
  4683. task_rq_unlock(rq, &flags);
  4684. migrate_nr_uninterruptible(rq);
  4685. BUG_ON(rq->nr_running != 0);
  4686. /* No need to migrate the tasks: it was best-effort if
  4687. * they didn't do lock_cpu_hotplug(). Just wake up
  4688. * the requestors. */
  4689. spin_lock_irq(&rq->lock);
  4690. while (!list_empty(&rq->migration_queue)) {
  4691. struct migration_req *req;
  4692. req = list_entry(rq->migration_queue.next,
  4693. struct migration_req, list);
  4694. list_del_init(&req->list);
  4695. complete(&req->done);
  4696. }
  4697. spin_unlock_irq(&rq->lock);
  4698. break;
  4699. #endif
  4700. }
  4701. return NOTIFY_OK;
  4702. }
  4703. /* Register at highest priority so that task migration (migrate_all_tasks)
  4704. * happens before everything else.
  4705. */
  4706. static struct notifier_block __cpuinitdata migration_notifier = {
  4707. .notifier_call = migration_call,
  4708. .priority = 10
  4709. };
  4710. int __init migration_init(void)
  4711. {
  4712. void *cpu = (void *)(long)smp_processor_id();
  4713. int err;
  4714. /* Start one for the boot CPU: */
  4715. err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
  4716. BUG_ON(err == NOTIFY_BAD);
  4717. migration_call(&migration_notifier, CPU_ONLINE, cpu);
  4718. register_cpu_notifier(&migration_notifier);
  4719. return 0;
  4720. }
  4721. #endif
  4722. #ifdef CONFIG_SMP
  4723. #undef SCHED_DOMAIN_DEBUG
  4724. #ifdef SCHED_DOMAIN_DEBUG
  4725. static void sched_domain_debug(struct sched_domain *sd, int cpu)
  4726. {
  4727. int level = 0;
  4728. if (!sd) {
  4729. printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
  4730. return;
  4731. }
  4732. printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
  4733. do {
  4734. int i;
  4735. char str[NR_CPUS];
  4736. struct sched_group *group = sd->groups;
  4737. cpumask_t groupmask;
  4738. cpumask_scnprintf(str, NR_CPUS, sd->span);
  4739. cpus_clear(groupmask);
  4740. printk(KERN_DEBUG);
  4741. for (i = 0; i < level + 1; i++)
  4742. printk(" ");
  4743. printk("domain %d: ", level);
  4744. if (!(sd->flags & SD_LOAD_BALANCE)) {
  4745. printk("does not load-balance\n");
  4746. if (sd->parent)
  4747. printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain has parent");
  4748. break;
  4749. }
  4750. printk("span %s\n", str);
  4751. if (!cpu_isset(cpu, sd->span))
  4752. printk(KERN_ERR "ERROR: domain->span does not contain CPU%d\n", cpu);
  4753. if (!cpu_isset(cpu, group->cpumask))
  4754. printk(KERN_ERR "ERROR: domain->groups does not contain CPU%d\n", cpu);
  4755. printk(KERN_DEBUG);
  4756. for (i = 0; i < level + 2; i++)
  4757. printk(" ");
  4758. printk("groups:");
  4759. do {
  4760. if (!group) {
  4761. printk("\n");
  4762. printk(KERN_ERR "ERROR: group is NULL\n");
  4763. break;
  4764. }
  4765. if (!group->cpu_power) {
  4766. printk("\n");
  4767. printk(KERN_ERR "ERROR: domain->cpu_power not set\n");
  4768. }
  4769. if (!cpus_weight(group->cpumask)) {
  4770. printk("\n");
  4771. printk(KERN_ERR "ERROR: empty group\n");
  4772. }
  4773. if (cpus_intersects(groupmask, group->cpumask)) {
  4774. printk("\n");
  4775. printk(KERN_ERR "ERROR: repeated CPUs\n");
  4776. }
  4777. cpus_or(groupmask, groupmask, group->cpumask);
  4778. cpumask_scnprintf(str, NR_CPUS, group->cpumask);
  4779. printk(" %s", str);
  4780. group = group->next;
  4781. } while (group != sd->groups);
  4782. printk("\n");
  4783. if (!cpus_equal(sd->span, groupmask))
  4784. printk(KERN_ERR "ERROR: groups don't span domain->span\n");
  4785. level++;
  4786. sd = sd->parent;
  4787. if (sd) {
  4788. if (!cpus_subset(groupmask, sd->span))
  4789. printk(KERN_ERR "ERROR: parent span is not a superset of domain->span\n");
  4790. }
  4791. } while (sd);
  4792. }
  4793. #else
  4794. # define sched_domain_debug(sd, cpu) do { } while (0)
  4795. #endif
  4796. static int sd_degenerate(struct sched_domain *sd)
  4797. {
  4798. if (cpus_weight(sd->span) == 1)
  4799. return 1;
  4800. /* Following flags need at least 2 groups */
  4801. if (sd->flags & (SD_LOAD_BALANCE |
  4802. SD_BALANCE_NEWIDLE |
  4803. SD_BALANCE_FORK |
  4804. SD_BALANCE_EXEC |
  4805. SD_SHARE_CPUPOWER |
  4806. SD_SHARE_PKG_RESOURCES)) {
  4807. if (sd->groups != sd->groups->next)
  4808. return 0;
  4809. }
  4810. /* Following flags don't use groups */
  4811. if (sd->flags & (SD_WAKE_IDLE |
  4812. SD_WAKE_AFFINE |
  4813. SD_WAKE_BALANCE))
  4814. return 0;
  4815. return 1;
  4816. }
  4817. static int
  4818. sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
  4819. {
  4820. unsigned long cflags = sd->flags, pflags = parent->flags;
  4821. if (sd_degenerate(parent))
  4822. return 1;
  4823. if (!cpus_equal(sd->span, parent->span))
  4824. return 0;
  4825. /* Does parent contain flags not in child? */
  4826. /* WAKE_BALANCE is a subset of WAKE_AFFINE */
  4827. if (cflags & SD_WAKE_AFFINE)
  4828. pflags &= ~SD_WAKE_BALANCE;
  4829. /* Flags needing groups don't count if only 1 group in parent */
  4830. if (parent->groups == parent->groups->next) {
  4831. pflags &= ~(SD_LOAD_BALANCE |
  4832. SD_BALANCE_NEWIDLE |
  4833. SD_BALANCE_FORK |
  4834. SD_BALANCE_EXEC |
  4835. SD_SHARE_CPUPOWER |
  4836. SD_SHARE_PKG_RESOURCES);
  4837. }
  4838. if (~cflags & pflags)
  4839. return 0;
  4840. return 1;
  4841. }
  4842. /*
  4843. * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  4844. * hold the hotplug lock.
  4845. */
  4846. static void cpu_attach_domain(struct sched_domain *sd, int cpu)
  4847. {
  4848. struct rq *rq = cpu_rq(cpu);
  4849. struct sched_domain *tmp;
  4850. /* Remove the sched domains which do not contribute to scheduling. */
  4851. for (tmp = sd; tmp; tmp = tmp->parent) {
  4852. struct sched_domain *parent = tmp->parent;
  4853. if (!parent)
  4854. break;
  4855. if (sd_parent_degenerate(tmp, parent)) {
  4856. tmp->parent = parent->parent;
  4857. if (parent->parent)
  4858. parent->parent->child = tmp;
  4859. }
  4860. }
  4861. if (sd && sd_degenerate(sd)) {
  4862. sd = sd->parent;
  4863. if (sd)
  4864. sd->child = NULL;
  4865. }
  4866. sched_domain_debug(sd, cpu);
  4867. rcu_assign_pointer(rq->sd, sd);
  4868. }
  4869. /* cpus with isolated domains */
  4870. static cpumask_t __cpuinitdata cpu_isolated_map = CPU_MASK_NONE;
  4871. /* Setup the mask of cpus configured for isolated domains */
  4872. static int __init isolated_cpu_setup(char *str)
  4873. {
  4874. int ints[NR_CPUS], i;
  4875. str = get_options(str, ARRAY_SIZE(ints), ints);
  4876. cpus_clear(cpu_isolated_map);
  4877. for (i = 1; i <= ints[0]; i++)
  4878. if (ints[i] < NR_CPUS)
  4879. cpu_set(ints[i], cpu_isolated_map);
  4880. return 1;
  4881. }
  4882. __setup ("isolcpus=", isolated_cpu_setup);
  4883. /*
  4884. * init_sched_build_groups takes the cpumask we wish to span, and a pointer
  4885. * to a function which identifies what group(along with sched group) a CPU
  4886. * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
  4887. * (due to the fact that we keep track of groups covered with a cpumask_t).
  4888. *
  4889. * init_sched_build_groups will build a circular linked list of the groups
  4890. * covered by the given span, and will set each group's ->cpumask correctly,
  4891. * and ->cpu_power to 0.
  4892. */
  4893. static void
  4894. init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
  4895. int (*group_fn)(int cpu, const cpumask_t *cpu_map,
  4896. struct sched_group **sg))
  4897. {
  4898. struct sched_group *first = NULL, *last = NULL;
  4899. cpumask_t covered = CPU_MASK_NONE;
  4900. int i;
  4901. for_each_cpu_mask(i, span) {
  4902. struct sched_group *sg;
  4903. int group = group_fn(i, cpu_map, &sg);
  4904. int j;
  4905. if (cpu_isset(i, covered))
  4906. continue;
  4907. sg->cpumask = CPU_MASK_NONE;
  4908. sg->cpu_power = 0;
  4909. for_each_cpu_mask(j, span) {
  4910. if (group_fn(j, cpu_map, NULL) != group)
  4911. continue;
  4912. cpu_set(j, covered);
  4913. cpu_set(j, sg->cpumask);
  4914. }
  4915. if (!first)
  4916. first = sg;
  4917. if (last)
  4918. last->next = sg;
  4919. last = sg;
  4920. }
  4921. last->next = first;
  4922. }
  4923. #define SD_NODES_PER_DOMAIN 16
  4924. /*
  4925. * Self-tuning task migration cost measurement between source and target CPUs.
  4926. *
  4927. * This is done by measuring the cost of manipulating buffers of varying
  4928. * sizes. For a given buffer-size here are the steps that are taken:
  4929. *
  4930. * 1) the source CPU reads+dirties a shared buffer
  4931. * 2) the target CPU reads+dirties the same shared buffer
  4932. *
  4933. * We measure how long they take, in the following 4 scenarios:
  4934. *
  4935. * - source: CPU1, target: CPU2 | cost1
  4936. * - source: CPU2, target: CPU1 | cost2
  4937. * - source: CPU1, target: CPU1 | cost3
  4938. * - source: CPU2, target: CPU2 | cost4
  4939. *
  4940. * We then calculate the cost3+cost4-cost1-cost2 difference - this is
  4941. * the cost of migration.
  4942. *
  4943. * We then start off from a small buffer-size and iterate up to larger
  4944. * buffer sizes, in 5% steps - measuring each buffer-size separately, and
  4945. * doing a maximum search for the cost. (The maximum cost for a migration
  4946. * normally occurs when the working set size is around the effective cache
  4947. * size.)
  4948. */
  4949. #define SEARCH_SCOPE 2
  4950. #define MIN_CACHE_SIZE (64*1024U)
  4951. #define DEFAULT_CACHE_SIZE (5*1024*1024U)
  4952. #define ITERATIONS 1
  4953. #define SIZE_THRESH 130
  4954. #define COST_THRESH 130
  4955. /*
  4956. * The migration cost is a function of 'domain distance'. Domain
  4957. * distance is the number of steps a CPU has to iterate down its
  4958. * domain tree to share a domain with the other CPU. The farther
  4959. * two CPUs are from each other, the larger the distance gets.
  4960. *
  4961. * Note that we use the distance only to cache measurement results,
  4962. * the distance value is not used numerically otherwise. When two
  4963. * CPUs have the same distance it is assumed that the migration
  4964. * cost is the same. (this is a simplification but quite practical)
  4965. */
  4966. #define MAX_DOMAIN_DISTANCE 32
  4967. static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
  4968. { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] =
  4969. /*
  4970. * Architectures may override the migration cost and thus avoid
  4971. * boot-time calibration. Unit is nanoseconds. Mostly useful for
  4972. * virtualized hardware:
  4973. */
  4974. #ifdef CONFIG_DEFAULT_MIGRATION_COST
  4975. CONFIG_DEFAULT_MIGRATION_COST
  4976. #else
  4977. -1LL
  4978. #endif
  4979. };
  4980. /*
  4981. * Allow override of migration cost - in units of microseconds.
  4982. * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
  4983. * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
  4984. */
  4985. static int __init migration_cost_setup(char *str)
  4986. {
  4987. int ints[MAX_DOMAIN_DISTANCE+1], i;
  4988. str = get_options(str, ARRAY_SIZE(ints), ints);
  4989. printk("#ints: %d\n", ints[0]);
  4990. for (i = 1; i <= ints[0]; i++) {
  4991. migration_cost[i-1] = (unsigned long long)ints[i]*1000;
  4992. printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
  4993. }
  4994. return 1;
  4995. }
  4996. __setup ("migration_cost=", migration_cost_setup);
  4997. /*
  4998. * Global multiplier (divisor) for migration-cutoff values,
  4999. * in percentiles. E.g. use a value of 150 to get 1.5 times
  5000. * longer cache-hot cutoff times.
  5001. *
  5002. * (We scale it from 100 to 128 to long long handling easier.)
  5003. */
  5004. #define MIGRATION_FACTOR_SCALE 128
  5005. static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
  5006. static int __init setup_migration_factor(char *str)
  5007. {
  5008. get_option(&str, &migration_factor);
  5009. migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
  5010. return 1;
  5011. }
  5012. __setup("migration_factor=", setup_migration_factor);
  5013. /*
  5014. * Estimated distance of two CPUs, measured via the number of domains
  5015. * we have to pass for the two CPUs to be in the same span:
  5016. */
  5017. static unsigned long domain_distance(int cpu1, int cpu2)
  5018. {
  5019. unsigned long distance = 0;
  5020. struct sched_domain *sd;
  5021. for_each_domain(cpu1, sd) {
  5022. WARN_ON(!cpu_isset(cpu1, sd->span));
  5023. if (cpu_isset(cpu2, sd->span))
  5024. return distance;
  5025. distance++;
  5026. }
  5027. if (distance >= MAX_DOMAIN_DISTANCE) {
  5028. WARN_ON(1);
  5029. distance = MAX_DOMAIN_DISTANCE-1;
  5030. }
  5031. return distance;
  5032. }
  5033. static unsigned int migration_debug;
  5034. static int __init setup_migration_debug(char *str)
  5035. {
  5036. get_option(&str, &migration_debug);
  5037. return 1;
  5038. }
  5039. __setup("migration_debug=", setup_migration_debug);
  5040. /*
  5041. * Maximum cache-size that the scheduler should try to measure.
  5042. * Architectures with larger caches should tune this up during
  5043. * bootup. Gets used in the domain-setup code (i.e. during SMP
  5044. * bootup).
  5045. */
  5046. unsigned int max_cache_size;
  5047. static int __init setup_max_cache_size(char *str)
  5048. {
  5049. get_option(&str, &max_cache_size);
  5050. return 1;
  5051. }
  5052. __setup("max_cache_size=", setup_max_cache_size);
  5053. /*
  5054. * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
  5055. * is the operation that is timed, so we try to generate unpredictable
  5056. * cachemisses that still end up filling the L2 cache:
  5057. */
  5058. static void touch_cache(void *__cache, unsigned long __size)
  5059. {
  5060. unsigned long size = __size/sizeof(long), chunk1 = size/3,
  5061. chunk2 = 2*size/3;
  5062. unsigned long *cache = __cache;
  5063. int i;
  5064. for (i = 0; i < size/6; i += 8) {
  5065. switch (i % 6) {
  5066. case 0: cache[i]++;
  5067. case 1: cache[size-1-i]++;
  5068. case 2: cache[chunk1-i]++;
  5069. case 3: cache[chunk1+i]++;
  5070. case 4: cache[chunk2-i]++;
  5071. case 5: cache[chunk2+i]++;
  5072. }
  5073. }
  5074. }
  5075. /*
  5076. * Measure the cache-cost of one task migration. Returns in units of nsec.
  5077. */
  5078. static unsigned long long
  5079. measure_one(void *cache, unsigned long size, int source, int target)
  5080. {
  5081. cpumask_t mask, saved_mask;
  5082. unsigned long long t0, t1, t2, t3, cost;
  5083. saved_mask = current->cpus_allowed;
  5084. /*
  5085. * Flush source caches to RAM and invalidate them:
  5086. */
  5087. sched_cacheflush();
  5088. /*
  5089. * Migrate to the source CPU:
  5090. */
  5091. mask = cpumask_of_cpu(source);
  5092. set_cpus_allowed(current, mask);
  5093. WARN_ON(smp_processor_id() != source);
  5094. /*
  5095. * Dirty the working set:
  5096. */
  5097. t0 = sched_clock();
  5098. touch_cache(cache, size);
  5099. t1 = sched_clock();
  5100. /*
  5101. * Migrate to the target CPU, dirty the L2 cache and access
  5102. * the shared buffer. (which represents the working set
  5103. * of a migrated task.)
  5104. */
  5105. mask = cpumask_of_cpu(target);
  5106. set_cpus_allowed(current, mask);
  5107. WARN_ON(smp_processor_id() != target);
  5108. t2 = sched_clock();
  5109. touch_cache(cache, size);
  5110. t3 = sched_clock();
  5111. cost = t1-t0 + t3-t2;
  5112. if (migration_debug >= 2)
  5113. printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
  5114. source, target, t1-t0, t1-t0, t3-t2, cost);
  5115. /*
  5116. * Flush target caches to RAM and invalidate them:
  5117. */
  5118. sched_cacheflush();
  5119. set_cpus_allowed(current, saved_mask);
  5120. return cost;
  5121. }
  5122. /*
  5123. * Measure a series of task migrations and return the average
  5124. * result. Since this code runs early during bootup the system
  5125. * is 'undisturbed' and the average latency makes sense.
  5126. *
  5127. * The algorithm in essence auto-detects the relevant cache-size,
  5128. * so it will properly detect different cachesizes for different
  5129. * cache-hierarchies, depending on how the CPUs are connected.
  5130. *
  5131. * Architectures can prime the upper limit of the search range via
  5132. * max_cache_size, otherwise the search range defaults to 20MB...64K.
  5133. */
  5134. static unsigned long long
  5135. measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
  5136. {
  5137. unsigned long long cost1, cost2;
  5138. int i;
  5139. /*
  5140. * Measure the migration cost of 'size' bytes, over an
  5141. * average of 10 runs:
  5142. *
  5143. * (We perturb the cache size by a small (0..4k)
  5144. * value to compensate size/alignment related artifacts.
  5145. * We also subtract the cost of the operation done on
  5146. * the same CPU.)
  5147. */
  5148. cost1 = 0;
  5149. /*
  5150. * dry run, to make sure we start off cache-cold on cpu1,
  5151. * and to get any vmalloc pagefaults in advance:
  5152. */
  5153. measure_one(cache, size, cpu1, cpu2);
  5154. for (i = 0; i < ITERATIONS; i++)
  5155. cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
  5156. measure_one(cache, size, cpu2, cpu1);
  5157. for (i = 0; i < ITERATIONS; i++)
  5158. cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
  5159. /*
  5160. * (We measure the non-migrating [cached] cost on both
  5161. * cpu1 and cpu2, to handle CPUs with different speeds)
  5162. */
  5163. cost2 = 0;
  5164. measure_one(cache, size, cpu1, cpu1);
  5165. for (i = 0; i < ITERATIONS; i++)
  5166. cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
  5167. measure_one(cache, size, cpu2, cpu2);
  5168. for (i = 0; i < ITERATIONS; i++)
  5169. cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
  5170. /*
  5171. * Get the per-iteration migration cost:
  5172. */
  5173. do_div(cost1, 2*ITERATIONS);
  5174. do_div(cost2, 2*ITERATIONS);
  5175. return cost1 - cost2;
  5176. }
  5177. static unsigned long long measure_migration_cost(int cpu1, int cpu2)
  5178. {
  5179. unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
  5180. unsigned int max_size, size, size_found = 0;
  5181. long long cost = 0, prev_cost;
  5182. void *cache;
  5183. /*
  5184. * Search from max_cache_size*5 down to 64K - the real relevant
  5185. * cachesize has to lie somewhere inbetween.
  5186. */
  5187. if (max_cache_size) {
  5188. max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
  5189. size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
  5190. } else {
  5191. /*
  5192. * Since we have no estimation about the relevant
  5193. * search range
  5194. */
  5195. max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
  5196. size = MIN_CACHE_SIZE;
  5197. }
  5198. if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
  5199. printk("cpu %d and %d not both online!\n", cpu1, cpu2);
  5200. return 0;
  5201. }
  5202. /*
  5203. * Allocate the working set:
  5204. */
  5205. cache = vmalloc(max_size);
  5206. if (!cache) {
  5207. printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
  5208. return 1000000; /* return 1 msec on very small boxen */
  5209. }
  5210. while (size <= max_size) {
  5211. prev_cost = cost;
  5212. cost = measure_cost(cpu1, cpu2, cache, size);
  5213. /*
  5214. * Update the max:
  5215. */
  5216. if (cost > 0) {
  5217. if (max_cost < cost) {
  5218. max_cost = cost;
  5219. size_found = size;
  5220. }
  5221. }
  5222. /*
  5223. * Calculate average fluctuation, we use this to prevent
  5224. * noise from triggering an early break out of the loop:
  5225. */
  5226. fluct = abs(cost - prev_cost);
  5227. avg_fluct = (avg_fluct + fluct)/2;
  5228. if (migration_debug)
  5229. printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
  5230. cpu1, cpu2, size,
  5231. (long)cost / 1000000,
  5232. ((long)cost / 100000) % 10,
  5233. (long)max_cost / 1000000,
  5234. ((long)max_cost / 100000) % 10,
  5235. domain_distance(cpu1, cpu2),
  5236. cost, avg_fluct);
  5237. /*
  5238. * If we iterated at least 20% past the previous maximum,
  5239. * and the cost has dropped by more than 20% already,
  5240. * (taking fluctuations into account) then we assume to
  5241. * have found the maximum and break out of the loop early:
  5242. */
  5243. if (size_found && (size*100 > size_found*SIZE_THRESH))
  5244. if (cost+avg_fluct <= 0 ||
  5245. max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
  5246. if (migration_debug)
  5247. printk("-> found max.\n");
  5248. break;
  5249. }
  5250. /*
  5251. * Increase the cachesize in 10% steps:
  5252. */
  5253. size = size * 10 / 9;
  5254. }
  5255. if (migration_debug)
  5256. printk("[%d][%d] working set size found: %d, cost: %Ld\n",
  5257. cpu1, cpu2, size_found, max_cost);
  5258. vfree(cache);
  5259. /*
  5260. * A task is considered 'cache cold' if at least 2 times
  5261. * the worst-case cost of migration has passed.
  5262. *
  5263. * (this limit is only listened to if the load-balancing
  5264. * situation is 'nice' - if there is a large imbalance we
  5265. * ignore it for the sake of CPU utilization and
  5266. * processing fairness.)
  5267. */
  5268. return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
  5269. }
  5270. static void calibrate_migration_costs(const cpumask_t *cpu_map)
  5271. {
  5272. int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
  5273. unsigned long j0, j1, distance, max_distance = 0;
  5274. struct sched_domain *sd;
  5275. j0 = jiffies;
  5276. /*
  5277. * First pass - calculate the cacheflush times:
  5278. */
  5279. for_each_cpu_mask(cpu1, *cpu_map) {
  5280. for_each_cpu_mask(cpu2, *cpu_map) {
  5281. if (cpu1 == cpu2)
  5282. continue;
  5283. distance = domain_distance(cpu1, cpu2);
  5284. max_distance = max(max_distance, distance);
  5285. /*
  5286. * No result cached yet?
  5287. */
  5288. if (migration_cost[distance] == -1LL)
  5289. migration_cost[distance] =
  5290. measure_migration_cost(cpu1, cpu2);
  5291. }
  5292. }
  5293. /*
  5294. * Second pass - update the sched domain hierarchy with
  5295. * the new cache-hot-time estimations:
  5296. */
  5297. for_each_cpu_mask(cpu, *cpu_map) {
  5298. distance = 0;
  5299. for_each_domain(cpu, sd) {
  5300. sd->cache_hot_time = migration_cost[distance];
  5301. distance++;
  5302. }
  5303. }
  5304. /*
  5305. * Print the matrix:
  5306. */
  5307. if (migration_debug)
  5308. printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
  5309. max_cache_size,
  5310. #ifdef CONFIG_X86
  5311. cpu_khz/1000
  5312. #else
  5313. -1
  5314. #endif
  5315. );
  5316. if (system_state == SYSTEM_BOOTING) {
  5317. if (num_online_cpus() > 1) {
  5318. printk("migration_cost=");
  5319. for (distance = 0; distance <= max_distance; distance++) {
  5320. if (distance)
  5321. printk(",");
  5322. printk("%ld", (long)migration_cost[distance] / 1000);
  5323. }
  5324. printk("\n");
  5325. }
  5326. }
  5327. j1 = jiffies;
  5328. if (migration_debug)
  5329. printk("migration: %ld seconds\n", (j1-j0)/HZ);
  5330. /*
  5331. * Move back to the original CPU. NUMA-Q gets confused
  5332. * if we migrate to another quad during bootup.
  5333. */
  5334. if (raw_smp_processor_id() != orig_cpu) {
  5335. cpumask_t mask = cpumask_of_cpu(orig_cpu),
  5336. saved_mask = current->cpus_allowed;
  5337. set_cpus_allowed(current, mask);
  5338. set_cpus_allowed(current, saved_mask);
  5339. }
  5340. }
  5341. #ifdef CONFIG_NUMA
  5342. /**
  5343. * find_next_best_node - find the next node to include in a sched_domain
  5344. * @node: node whose sched_domain we're building
  5345. * @used_nodes: nodes already in the sched_domain
  5346. *
  5347. * Find the next node to include in a given scheduling domain. Simply
  5348. * finds the closest node not already in the @used_nodes map.
  5349. *
  5350. * Should use nodemask_t.
  5351. */
  5352. static int find_next_best_node(int node, unsigned long *used_nodes)
  5353. {
  5354. int i, n, val, min_val, best_node = 0;
  5355. min_val = INT_MAX;
  5356. for (i = 0; i < MAX_NUMNODES; i++) {
  5357. /* Start at @node */
  5358. n = (node + i) % MAX_NUMNODES;
  5359. if (!nr_cpus_node(n))
  5360. continue;
  5361. /* Skip already used nodes */
  5362. if (test_bit(n, used_nodes))
  5363. continue;
  5364. /* Simple min distance search */
  5365. val = node_distance(node, n);
  5366. if (val < min_val) {
  5367. min_val = val;
  5368. best_node = n;
  5369. }
  5370. }
  5371. set_bit(best_node, used_nodes);
  5372. return best_node;
  5373. }
  5374. /**
  5375. * sched_domain_node_span - get a cpumask for a node's sched_domain
  5376. * @node: node whose cpumask we're constructing
  5377. * @size: number of nodes to include in this span
  5378. *
  5379. * Given a node, construct a good cpumask for its sched_domain to span. It
  5380. * should be one that prevents unnecessary balancing, but also spreads tasks
  5381. * out optimally.
  5382. */
  5383. static cpumask_t sched_domain_node_span(int node)
  5384. {
  5385. DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
  5386. cpumask_t span, nodemask;
  5387. int i;
  5388. cpus_clear(span);
  5389. bitmap_zero(used_nodes, MAX_NUMNODES);
  5390. nodemask = node_to_cpumask(node);
  5391. cpus_or(span, span, nodemask);
  5392. set_bit(node, used_nodes);
  5393. for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
  5394. int next_node = find_next_best_node(node, used_nodes);
  5395. nodemask = node_to_cpumask(next_node);
  5396. cpus_or(span, span, nodemask);
  5397. }
  5398. return span;
  5399. }
  5400. #endif
  5401. int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
  5402. /*
  5403. * SMT sched-domains:
  5404. */
  5405. #ifdef CONFIG_SCHED_SMT
  5406. static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
  5407. static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
  5408. static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
  5409. struct sched_group **sg)
  5410. {
  5411. if (sg)
  5412. *sg = &per_cpu(sched_group_cpus, cpu);
  5413. return cpu;
  5414. }
  5415. #endif
  5416. /*
  5417. * multi-core sched-domains:
  5418. */
  5419. #ifdef CONFIG_SCHED_MC
  5420. static DEFINE_PER_CPU(struct sched_domain, core_domains);
  5421. static DEFINE_PER_CPU(struct sched_group, sched_group_core);
  5422. #endif
  5423. #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
  5424. static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
  5425. struct sched_group **sg)
  5426. {
  5427. int group;
  5428. cpumask_t mask = cpu_sibling_map[cpu];
  5429. cpus_and(mask, mask, *cpu_map);
  5430. group = first_cpu(mask);
  5431. if (sg)
  5432. *sg = &per_cpu(sched_group_core, group);
  5433. return group;
  5434. }
  5435. #elif defined(CONFIG_SCHED_MC)
  5436. static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
  5437. struct sched_group **sg)
  5438. {
  5439. if (sg)
  5440. *sg = &per_cpu(sched_group_core, cpu);
  5441. return cpu;
  5442. }
  5443. #endif
  5444. static DEFINE_PER_CPU(struct sched_domain, phys_domains);
  5445. static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
  5446. static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
  5447. struct sched_group **sg)
  5448. {
  5449. int group;
  5450. #ifdef CONFIG_SCHED_MC
  5451. cpumask_t mask = cpu_coregroup_map(cpu);
  5452. cpus_and(mask, mask, *cpu_map);
  5453. group = first_cpu(mask);
  5454. #elif defined(CONFIG_SCHED_SMT)
  5455. cpumask_t mask = cpu_sibling_map[cpu];
  5456. cpus_and(mask, mask, *cpu_map);
  5457. group = first_cpu(mask);
  5458. #else
  5459. group = cpu;
  5460. #endif
  5461. if (sg)
  5462. *sg = &per_cpu(sched_group_phys, group);
  5463. return group;
  5464. }
  5465. #ifdef CONFIG_NUMA
  5466. /*
  5467. * The init_sched_build_groups can't handle what we want to do with node
  5468. * groups, so roll our own. Now each node has its own list of groups which
  5469. * gets dynamically allocated.
  5470. */
  5471. static DEFINE_PER_CPU(struct sched_domain, node_domains);
  5472. static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
  5473. static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
  5474. static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
  5475. static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
  5476. struct sched_group **sg)
  5477. {
  5478. cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
  5479. int group;
  5480. cpus_and(nodemask, nodemask, *cpu_map);
  5481. group = first_cpu(nodemask);
  5482. if (sg)
  5483. *sg = &per_cpu(sched_group_allnodes, group);
  5484. return group;
  5485. }
  5486. static void init_numa_sched_groups_power(struct sched_group *group_head)
  5487. {
  5488. struct sched_group *sg = group_head;
  5489. int j;
  5490. if (!sg)
  5491. return;
  5492. next_sg:
  5493. for_each_cpu_mask(j, sg->cpumask) {
  5494. struct sched_domain *sd;
  5495. sd = &per_cpu(phys_domains, j);
  5496. if (j != first_cpu(sd->groups->cpumask)) {
  5497. /*
  5498. * Only add "power" once for each
  5499. * physical package.
  5500. */
  5501. continue;
  5502. }
  5503. sg->cpu_power += sd->groups->cpu_power;
  5504. }
  5505. sg = sg->next;
  5506. if (sg != group_head)
  5507. goto next_sg;
  5508. }
  5509. #endif
  5510. #ifdef CONFIG_NUMA
  5511. /* Free memory allocated for various sched_group structures */
  5512. static void free_sched_groups(const cpumask_t *cpu_map)
  5513. {
  5514. int cpu, i;
  5515. for_each_cpu_mask(cpu, *cpu_map) {
  5516. struct sched_group **sched_group_nodes
  5517. = sched_group_nodes_bycpu[cpu];
  5518. if (!sched_group_nodes)
  5519. continue;
  5520. for (i = 0; i < MAX_NUMNODES; i++) {
  5521. cpumask_t nodemask = node_to_cpumask(i);
  5522. struct sched_group *oldsg, *sg = sched_group_nodes[i];
  5523. cpus_and(nodemask, nodemask, *cpu_map);
  5524. if (cpus_empty(nodemask))
  5525. continue;
  5526. if (sg == NULL)
  5527. continue;
  5528. sg = sg->next;
  5529. next_sg:
  5530. oldsg = sg;
  5531. sg = sg->next;
  5532. kfree(oldsg);
  5533. if (oldsg != sched_group_nodes[i])
  5534. goto next_sg;
  5535. }
  5536. kfree(sched_group_nodes);
  5537. sched_group_nodes_bycpu[cpu] = NULL;
  5538. }
  5539. }
  5540. #else
  5541. static void free_sched_groups(const cpumask_t *cpu_map)
  5542. {
  5543. }
  5544. #endif
  5545. /*
  5546. * Initialize sched groups cpu_power.
  5547. *
  5548. * cpu_power indicates the capacity of sched group, which is used while
  5549. * distributing the load between different sched groups in a sched domain.
  5550. * Typically cpu_power for all the groups in a sched domain will be same unless
  5551. * there are asymmetries in the topology. If there are asymmetries, group
  5552. * having more cpu_power will pickup more load compared to the group having
  5553. * less cpu_power.
  5554. *
  5555. * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
  5556. * the maximum number of tasks a group can handle in the presence of other idle
  5557. * or lightly loaded groups in the same sched domain.
  5558. */
  5559. static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  5560. {
  5561. struct sched_domain *child;
  5562. struct sched_group *group;
  5563. WARN_ON(!sd || !sd->groups);
  5564. if (cpu != first_cpu(sd->groups->cpumask))
  5565. return;
  5566. child = sd->child;
  5567. /*
  5568. * For perf policy, if the groups in child domain share resources
  5569. * (for example cores sharing some portions of the cache hierarchy
  5570. * or SMT), then set this domain groups cpu_power such that each group
  5571. * can handle only one task, when there are other idle groups in the
  5572. * same sched domain.
  5573. */
  5574. if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
  5575. (child->flags &
  5576. (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
  5577. sd->groups->cpu_power = SCHED_LOAD_SCALE;
  5578. return;
  5579. }
  5580. sd->groups->cpu_power = 0;
  5581. /*
  5582. * add cpu_power of each child group to this groups cpu_power
  5583. */
  5584. group = child->groups;
  5585. do {
  5586. sd->groups->cpu_power += group->cpu_power;
  5587. group = group->next;
  5588. } while (group != child->groups);
  5589. }
  5590. /*
  5591. * Build sched domains for a given set of cpus and attach the sched domains
  5592. * to the individual cpus
  5593. */
  5594. static int build_sched_domains(const cpumask_t *cpu_map)
  5595. {
  5596. int i;
  5597. struct sched_domain *sd;
  5598. #ifdef CONFIG_NUMA
  5599. struct sched_group **sched_group_nodes = NULL;
  5600. int sd_allnodes = 0;
  5601. /*
  5602. * Allocate the per-node list of sched groups
  5603. */
  5604. sched_group_nodes = kzalloc(sizeof(struct sched_group*)*MAX_NUMNODES,
  5605. GFP_KERNEL);
  5606. if (!sched_group_nodes) {
  5607. printk(KERN_WARNING "Can not alloc sched group node list\n");
  5608. return -ENOMEM;
  5609. }
  5610. sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
  5611. #endif
  5612. /*
  5613. * Set up domains for cpus specified by the cpu_map.
  5614. */
  5615. for_each_cpu_mask(i, *cpu_map) {
  5616. struct sched_domain *sd = NULL, *p;
  5617. cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
  5618. cpus_and(nodemask, nodemask, *cpu_map);
  5619. #ifdef CONFIG_NUMA
  5620. if (cpus_weight(*cpu_map)
  5621. > SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
  5622. sd = &per_cpu(allnodes_domains, i);
  5623. *sd = SD_ALLNODES_INIT;
  5624. sd->span = *cpu_map;
  5625. cpu_to_allnodes_group(i, cpu_map, &sd->groups);
  5626. p = sd;
  5627. sd_allnodes = 1;
  5628. } else
  5629. p = NULL;
  5630. sd = &per_cpu(node_domains, i);
  5631. *sd = SD_NODE_INIT;
  5632. sd->span = sched_domain_node_span(cpu_to_node(i));
  5633. sd->parent = p;
  5634. if (p)
  5635. p->child = sd;
  5636. cpus_and(sd->span, sd->span, *cpu_map);
  5637. #endif
  5638. p = sd;
  5639. sd = &per_cpu(phys_domains, i);
  5640. *sd = SD_CPU_INIT;
  5641. sd->span = nodemask;
  5642. sd->parent = p;
  5643. if (p)
  5644. p->child = sd;
  5645. cpu_to_phys_group(i, cpu_map, &sd->groups);
  5646. #ifdef CONFIG_SCHED_MC
  5647. p = sd;
  5648. sd = &per_cpu(core_domains, i);
  5649. *sd = SD_MC_INIT;
  5650. sd->span = cpu_coregroup_map(i);
  5651. cpus_and(sd->span, sd->span, *cpu_map);
  5652. sd->parent = p;
  5653. p->child = sd;
  5654. cpu_to_core_group(i, cpu_map, &sd->groups);
  5655. #endif
  5656. #ifdef CONFIG_SCHED_SMT
  5657. p = sd;
  5658. sd = &per_cpu(cpu_domains, i);
  5659. *sd = SD_SIBLING_INIT;
  5660. sd->span = cpu_sibling_map[i];
  5661. cpus_and(sd->span, sd->span, *cpu_map);
  5662. sd->parent = p;
  5663. p->child = sd;
  5664. cpu_to_cpu_group(i, cpu_map, &sd->groups);
  5665. #endif
  5666. }
  5667. #ifdef CONFIG_SCHED_SMT
  5668. /* Set up CPU (sibling) groups */
  5669. for_each_cpu_mask(i, *cpu_map) {
  5670. cpumask_t this_sibling_map = cpu_sibling_map[i];
  5671. cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
  5672. if (i != first_cpu(this_sibling_map))
  5673. continue;
  5674. init_sched_build_groups(this_sibling_map, cpu_map, &cpu_to_cpu_group);
  5675. }
  5676. #endif
  5677. #ifdef CONFIG_SCHED_MC
  5678. /* Set up multi-core groups */
  5679. for_each_cpu_mask(i, *cpu_map) {
  5680. cpumask_t this_core_map = cpu_coregroup_map(i);
  5681. cpus_and(this_core_map, this_core_map, *cpu_map);
  5682. if (i != first_cpu(this_core_map))
  5683. continue;
  5684. init_sched_build_groups(this_core_map, cpu_map, &cpu_to_core_group);
  5685. }
  5686. #endif
  5687. /* Set up physical groups */
  5688. for (i = 0; i < MAX_NUMNODES; i++) {
  5689. cpumask_t nodemask = node_to_cpumask(i);
  5690. cpus_and(nodemask, nodemask, *cpu_map);
  5691. if (cpus_empty(nodemask))
  5692. continue;
  5693. init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
  5694. }
  5695. #ifdef CONFIG_NUMA
  5696. /* Set up node groups */
  5697. if (sd_allnodes)
  5698. init_sched_build_groups(*cpu_map, cpu_map, &cpu_to_allnodes_group);
  5699. for (i = 0; i < MAX_NUMNODES; i++) {
  5700. /* Set up node groups */
  5701. struct sched_group *sg, *prev;
  5702. cpumask_t nodemask = node_to_cpumask(i);
  5703. cpumask_t domainspan;
  5704. cpumask_t covered = CPU_MASK_NONE;
  5705. int j;
  5706. cpus_and(nodemask, nodemask, *cpu_map);
  5707. if (cpus_empty(nodemask)) {
  5708. sched_group_nodes[i] = NULL;
  5709. continue;
  5710. }
  5711. domainspan = sched_domain_node_span(i);
  5712. cpus_and(domainspan, domainspan, *cpu_map);
  5713. sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
  5714. if (!sg) {
  5715. printk(KERN_WARNING "Can not alloc domain group for "
  5716. "node %d\n", i);
  5717. goto error;
  5718. }
  5719. sched_group_nodes[i] = sg;
  5720. for_each_cpu_mask(j, nodemask) {
  5721. struct sched_domain *sd;
  5722. sd = &per_cpu(node_domains, j);
  5723. sd->groups = sg;
  5724. }
  5725. sg->cpu_power = 0;
  5726. sg->cpumask = nodemask;
  5727. sg->next = sg;
  5728. cpus_or(covered, covered, nodemask);
  5729. prev = sg;
  5730. for (j = 0; j < MAX_NUMNODES; j++) {
  5731. cpumask_t tmp, notcovered;
  5732. int n = (i + j) % MAX_NUMNODES;
  5733. cpus_complement(notcovered, covered);
  5734. cpus_and(tmp, notcovered, *cpu_map);
  5735. cpus_and(tmp, tmp, domainspan);
  5736. if (cpus_empty(tmp))
  5737. break;
  5738. nodemask = node_to_cpumask(n);
  5739. cpus_and(tmp, tmp, nodemask);
  5740. if (cpus_empty(tmp))
  5741. continue;
  5742. sg = kmalloc_node(sizeof(struct sched_group),
  5743. GFP_KERNEL, i);
  5744. if (!sg) {
  5745. printk(KERN_WARNING
  5746. "Can not alloc domain group for node %d\n", j);
  5747. goto error;
  5748. }
  5749. sg->cpu_power = 0;
  5750. sg->cpumask = tmp;
  5751. sg->next = prev->next;
  5752. cpus_or(covered, covered, tmp);
  5753. prev->next = sg;
  5754. prev = sg;
  5755. }
  5756. }
  5757. #endif
  5758. /* Calculate CPU power for physical packages and nodes */
  5759. #ifdef CONFIG_SCHED_SMT
  5760. for_each_cpu_mask(i, *cpu_map) {
  5761. sd = &per_cpu(cpu_domains, i);
  5762. init_sched_groups_power(i, sd);
  5763. }
  5764. #endif
  5765. #ifdef CONFIG_SCHED_MC
  5766. for_each_cpu_mask(i, *cpu_map) {
  5767. sd = &per_cpu(core_domains, i);
  5768. init_sched_groups_power(i, sd);
  5769. }
  5770. #endif
  5771. for_each_cpu_mask(i, *cpu_map) {
  5772. sd = &per_cpu(phys_domains, i);
  5773. init_sched_groups_power(i, sd);
  5774. }
  5775. #ifdef CONFIG_NUMA
  5776. for (i = 0; i < MAX_NUMNODES; i++)
  5777. init_numa_sched_groups_power(sched_group_nodes[i]);
  5778. if (sd_allnodes) {
  5779. struct sched_group *sg;
  5780. cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
  5781. init_numa_sched_groups_power(sg);
  5782. }
  5783. #endif
  5784. /* Attach the domains */
  5785. for_each_cpu_mask(i, *cpu_map) {
  5786. struct sched_domain *sd;
  5787. #ifdef CONFIG_SCHED_SMT
  5788. sd = &per_cpu(cpu_domains, i);
  5789. #elif defined(CONFIG_SCHED_MC)
  5790. sd = &per_cpu(core_domains, i);
  5791. #else
  5792. sd = &per_cpu(phys_domains, i);
  5793. #endif
  5794. cpu_attach_domain(sd, i);
  5795. }
  5796. /*
  5797. * Tune cache-hot values:
  5798. */
  5799. calibrate_migration_costs(cpu_map);
  5800. return 0;
  5801. #ifdef CONFIG_NUMA
  5802. error:
  5803. free_sched_groups(cpu_map);
  5804. return -ENOMEM;
  5805. #endif
  5806. }
  5807. /*
  5808. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  5809. */
  5810. static int arch_init_sched_domains(const cpumask_t *cpu_map)
  5811. {
  5812. cpumask_t cpu_default_map;
  5813. int err;
  5814. /*
  5815. * Setup mask for cpus without special case scheduling requirements.
  5816. * For now this just excludes isolated cpus, but could be used to
  5817. * exclude other special cases in the future.
  5818. */
  5819. cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
  5820. err = build_sched_domains(&cpu_default_map);
  5821. return err;
  5822. }
  5823. static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
  5824. {
  5825. free_sched_groups(cpu_map);
  5826. }
  5827. /*
  5828. * Detach sched domains from a group of cpus specified in cpu_map
  5829. * These cpus will now be attached to the NULL domain
  5830. */
  5831. static void detach_destroy_domains(const cpumask_t *cpu_map)
  5832. {
  5833. int i;
  5834. for_each_cpu_mask(i, *cpu_map)
  5835. cpu_attach_domain(NULL, i);
  5836. synchronize_sched();
  5837. arch_destroy_sched_domains(cpu_map);
  5838. }
  5839. /*
  5840. * Partition sched domains as specified by the cpumasks below.
  5841. * This attaches all cpus from the cpumasks to the NULL domain,
  5842. * waits for a RCU quiescent period, recalculates sched
  5843. * domain information and then attaches them back to the
  5844. * correct sched domains
  5845. * Call with hotplug lock held
  5846. */
  5847. int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
  5848. {
  5849. cpumask_t change_map;
  5850. int err = 0;
  5851. cpus_and(*partition1, *partition1, cpu_online_map);
  5852. cpus_and(*partition2, *partition2, cpu_online_map);
  5853. cpus_or(change_map, *partition1, *partition2);
  5854. /* Detach sched domains from all of the affected cpus */
  5855. detach_destroy_domains(&change_map);
  5856. if (!cpus_empty(*partition1))
  5857. err = build_sched_domains(partition1);
  5858. if (!err && !cpus_empty(*partition2))
  5859. err = build_sched_domains(partition2);
  5860. return err;
  5861. }
  5862. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  5863. int arch_reinit_sched_domains(void)
  5864. {
  5865. int err;
  5866. lock_cpu_hotplug();
  5867. detach_destroy_domains(&cpu_online_map);
  5868. err = arch_init_sched_domains(&cpu_online_map);
  5869. unlock_cpu_hotplug();
  5870. return err;
  5871. }
  5872. static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
  5873. {
  5874. int ret;
  5875. if (buf[0] != '0' && buf[0] != '1')
  5876. return -EINVAL;
  5877. if (smt)
  5878. sched_smt_power_savings = (buf[0] == '1');
  5879. else
  5880. sched_mc_power_savings = (buf[0] == '1');
  5881. ret = arch_reinit_sched_domains();
  5882. return ret ? ret : count;
  5883. }
  5884. int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
  5885. {
  5886. int err = 0;
  5887. #ifdef CONFIG_SCHED_SMT
  5888. if (smt_capable())
  5889. err = sysfs_create_file(&cls->kset.kobj,
  5890. &attr_sched_smt_power_savings.attr);
  5891. #endif
  5892. #ifdef CONFIG_SCHED_MC
  5893. if (!err && mc_capable())
  5894. err = sysfs_create_file(&cls->kset.kobj,
  5895. &attr_sched_mc_power_savings.attr);
  5896. #endif
  5897. return err;
  5898. }
  5899. #endif
  5900. #ifdef CONFIG_SCHED_MC
  5901. static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
  5902. {
  5903. return sprintf(page, "%u\n", sched_mc_power_savings);
  5904. }
  5905. static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
  5906. const char *buf, size_t count)
  5907. {
  5908. return sched_power_savings_store(buf, count, 0);
  5909. }
  5910. SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
  5911. sched_mc_power_savings_store);
  5912. #endif
  5913. #ifdef CONFIG_SCHED_SMT
  5914. static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
  5915. {
  5916. return sprintf(page, "%u\n", sched_smt_power_savings);
  5917. }
  5918. static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
  5919. const char *buf, size_t count)
  5920. {
  5921. return sched_power_savings_store(buf, count, 1);
  5922. }
  5923. SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
  5924. sched_smt_power_savings_store);
  5925. #endif
  5926. /*
  5927. * Force a reinitialization of the sched domains hierarchy. The domains
  5928. * and groups cannot be updated in place without racing with the balancing
  5929. * code, so we temporarily attach all running cpus to the NULL domain
  5930. * which will prevent rebalancing while the sched domains are recalculated.
  5931. */
  5932. static int update_sched_domains(struct notifier_block *nfb,
  5933. unsigned long action, void *hcpu)
  5934. {
  5935. switch (action) {
  5936. case CPU_UP_PREPARE:
  5937. case CPU_DOWN_PREPARE:
  5938. detach_destroy_domains(&cpu_online_map);
  5939. return NOTIFY_OK;
  5940. case CPU_UP_CANCELED:
  5941. case CPU_DOWN_FAILED:
  5942. case CPU_ONLINE:
  5943. case CPU_DEAD:
  5944. /*
  5945. * Fall through and re-initialise the domains.
  5946. */
  5947. break;
  5948. default:
  5949. return NOTIFY_DONE;
  5950. }
  5951. /* The hotplug lock is already held by cpu_up/cpu_down */
  5952. arch_init_sched_domains(&cpu_online_map);
  5953. return NOTIFY_OK;
  5954. }
  5955. void __init sched_init_smp(void)
  5956. {
  5957. cpumask_t non_isolated_cpus;
  5958. lock_cpu_hotplug();
  5959. arch_init_sched_domains(&cpu_online_map);
  5960. cpus_andnot(non_isolated_cpus, cpu_online_map, cpu_isolated_map);
  5961. if (cpus_empty(non_isolated_cpus))
  5962. cpu_set(smp_processor_id(), non_isolated_cpus);
  5963. unlock_cpu_hotplug();
  5964. /* XXX: Theoretical race here - CPU may be hotplugged now */
  5965. hotcpu_notifier(update_sched_domains, 0);
  5966. /* Move init over to a non-isolated CPU */
  5967. if (set_cpus_allowed(current, non_isolated_cpus) < 0)
  5968. BUG();
  5969. }
  5970. #else
  5971. void __init sched_init_smp(void)
  5972. {
  5973. }
  5974. #endif /* CONFIG_SMP */
  5975. int in_sched_functions(unsigned long addr)
  5976. {
  5977. /* Linker adds these: start and end of __sched functions */
  5978. extern char __sched_text_start[], __sched_text_end[];
  5979. return in_lock_functions(addr) ||
  5980. (addr >= (unsigned long)__sched_text_start
  5981. && addr < (unsigned long)__sched_text_end);
  5982. }
  5983. void __init sched_init(void)
  5984. {
  5985. int i, j, k;
  5986. for_each_possible_cpu(i) {
  5987. struct prio_array *array;
  5988. struct rq *rq;
  5989. rq = cpu_rq(i);
  5990. spin_lock_init(&rq->lock);
  5991. lockdep_set_class(&rq->lock, &rq->rq_lock_key);
  5992. rq->nr_running = 0;
  5993. rq->active = rq->arrays;
  5994. rq->expired = rq->arrays + 1;
  5995. rq->best_expired_prio = MAX_PRIO;
  5996. #ifdef CONFIG_SMP
  5997. rq->sd = NULL;
  5998. for (j = 1; j < 3; j++)
  5999. rq->cpu_load[j] = 0;
  6000. rq->active_balance = 0;
  6001. rq->push_cpu = 0;
  6002. rq->cpu = i;
  6003. rq->migration_thread = NULL;
  6004. INIT_LIST_HEAD(&rq->migration_queue);
  6005. #endif
  6006. atomic_set(&rq->nr_iowait, 0);
  6007. for (j = 0; j < 2; j++) {
  6008. array = rq->arrays + j;
  6009. for (k = 0; k < MAX_PRIO; k++) {
  6010. INIT_LIST_HEAD(array->queue + k);
  6011. __clear_bit(k, array->bitmap);
  6012. }
  6013. // delimiter for bitsearch
  6014. __set_bit(MAX_PRIO, array->bitmap);
  6015. }
  6016. }
  6017. set_load_weight(&init_task);
  6018. #ifdef CONFIG_SMP
  6019. open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
  6020. #endif
  6021. #ifdef CONFIG_RT_MUTEXES
  6022. plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
  6023. #endif
  6024. /*
  6025. * The boot idle thread does lazy MMU switching as well:
  6026. */
  6027. atomic_inc(&init_mm.mm_count);
  6028. enter_lazy_tlb(&init_mm, current);
  6029. /*
  6030. * Make us the idle thread. Technically, schedule() should not be
  6031. * called from this thread, however somewhere below it might be,
  6032. * but because we are the idle thread, we just pick up running again
  6033. * when this runqueue becomes "idle".
  6034. */
  6035. init_idle(current, smp_processor_id());
  6036. }
  6037. #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  6038. void __might_sleep(char *file, int line)
  6039. {
  6040. #ifdef in_atomic
  6041. static unsigned long prev_jiffy; /* ratelimiting */
  6042. if ((in_atomic() || irqs_disabled()) &&
  6043. system_state == SYSTEM_RUNNING && !oops_in_progress) {
  6044. if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
  6045. return;
  6046. prev_jiffy = jiffies;
  6047. printk(KERN_ERR "BUG: sleeping function called from invalid"
  6048. " context at %s:%d\n", file, line);
  6049. printk("in_atomic():%d, irqs_disabled():%d\n",
  6050. in_atomic(), irqs_disabled());
  6051. debug_show_held_locks(current);
  6052. dump_stack();
  6053. }
  6054. #endif
  6055. }
  6056. EXPORT_SYMBOL(__might_sleep);
  6057. #endif
  6058. #ifdef CONFIG_MAGIC_SYSRQ
  6059. void normalize_rt_tasks(void)
  6060. {
  6061. struct prio_array *array;
  6062. struct task_struct *p;
  6063. unsigned long flags;
  6064. struct rq *rq;
  6065. read_lock_irq(&tasklist_lock);
  6066. for_each_process(p) {
  6067. if (!rt_task(p))
  6068. continue;
  6069. spin_lock_irqsave(&p->pi_lock, flags);
  6070. rq = __task_rq_lock(p);
  6071. array = p->array;
  6072. if (array)
  6073. deactivate_task(p, task_rq(p));
  6074. __setscheduler(p, SCHED_NORMAL, 0);
  6075. if (array) {
  6076. __activate_task(p, task_rq(p));
  6077. resched_task(rq->curr);
  6078. }
  6079. __task_rq_unlock(rq);
  6080. spin_unlock_irqrestore(&p->pi_lock, flags);
  6081. }
  6082. read_unlock_irq(&tasklist_lock);
  6083. }
  6084. #endif /* CONFIG_MAGIC_SYSRQ */
  6085. #ifdef CONFIG_IA64
  6086. /*
  6087. * These functions are only useful for the IA64 MCA handling.
  6088. *
  6089. * They can only be called when the whole system has been
  6090. * stopped - every CPU needs to be quiescent, and no scheduling
  6091. * activity can take place. Using them for anything else would
  6092. * be a serious bug, and as a result, they aren't even visible
  6093. * under any other configuration.
  6094. */
  6095. /**
  6096. * curr_task - return the current task for a given cpu.
  6097. * @cpu: the processor in question.
  6098. *
  6099. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  6100. */
  6101. struct task_struct *curr_task(int cpu)
  6102. {
  6103. return cpu_curr(cpu);
  6104. }
  6105. /**
  6106. * set_curr_task - set the current task for a given cpu.
  6107. * @cpu: the processor in question.
  6108. * @p: the task pointer to set.
  6109. *
  6110. * Description: This function must only be used when non-maskable interrupts
  6111. * are serviced on a separate stack. It allows the architecture to switch the
  6112. * notion of the current task on a cpu in a non-blocking manner. This function
  6113. * must be called with all CPU's synchronized, and interrupts disabled, the
  6114. * and caller must save the original value of the current task (see
  6115. * curr_task() above) and restore that value before reenabling interrupts and
  6116. * re-starting the system.
  6117. *
  6118. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  6119. */
  6120. void set_curr_task(int cpu, struct task_struct *p)
  6121. {
  6122. cpu_curr(cpu) = p;
  6123. }
  6124. #endif