sched.c 161 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560
  1. /*
  2. * kernel/sched.c
  3. *
  4. * Kernel scheduler and related syscalls
  5. *
  6. * Copyright (C) 1991-2002 Linus Torvalds
  7. *
  8. * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
  9. * make semaphores SMP safe
  10. * 1998-11-19 Implemented schedule_timeout() and related stuff
  11. * by Andrea Arcangeli
  12. * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
  13. * hybrid priority-list and round-robin design with
  14. * an array-switch method of distributing timeslices
  15. * and per-CPU runqueues. Cleanups and useful suggestions
  16. * by Davide Libenzi, preemptible kernel bits by Robert Love.
  17. * 2003-09-03 Interactivity tuning by Con Kolivas.
  18. * 2004-04-02 Scheduler domains code by Nick Piggin
  19. * 2007-04-15 Work begun on replacing all interactivity tuning with a
  20. * fair scheduling design by Con Kolivas.
  21. * 2007-05-05 Load balancing (smp-nice) and other improvements
  22. * by Peter Williams
  23. * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
  24. * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
  25. */
  26. #include <linux/mm.h>
  27. #include <linux/module.h>
  28. #include <linux/nmi.h>
  29. #include <linux/init.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/highmem.h>
  32. #include <linux/smp_lock.h>
  33. #include <asm/mmu_context.h>
  34. #include <linux/interrupt.h>
  35. #include <linux/capability.h>
  36. #include <linux/completion.h>
  37. #include <linux/kernel_stat.h>
  38. #include <linux/debug_locks.h>
  39. #include <linux/security.h>
  40. #include <linux/notifier.h>
  41. #include <linux/profile.h>
  42. #include <linux/freezer.h>
  43. #include <linux/vmalloc.h>
  44. #include <linux/blkdev.h>
  45. #include <linux/delay.h>
  46. #include <linux/smp.h>
  47. #include <linux/threads.h>
  48. #include <linux/timer.h>
  49. #include <linux/rcupdate.h>
  50. #include <linux/cpu.h>
  51. #include <linux/cpuset.h>
  52. #include <linux/percpu.h>
  53. #include <linux/kthread.h>
  54. #include <linux/seq_file.h>
  55. #include <linux/syscalls.h>
  56. #include <linux/times.h>
  57. #include <linux/tsacct_kern.h>
  58. #include <linux/kprobes.h>
  59. #include <linux/delayacct.h>
  60. #include <linux/reciprocal_div.h>
  61. #include <linux/unistd.h>
  62. #include <asm/tlb.h>
  63. /*
  64. * Scheduler clock - returns current time in nanosec units.
  65. * This is default implementation.
  66. * Architectures and sub-architectures can override this.
  67. */
  68. unsigned long long __attribute__((weak)) sched_clock(void)
  69. {
  70. return (unsigned long long)jiffies * (1000000000 / HZ);
  71. }
  72. /*
  73. * Convert user-nice values [ -20 ... 0 ... 19 ]
  74. * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  75. * and back.
  76. */
  77. #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
  78. #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
  79. #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
  80. /*
  81. * 'User priority' is the nice value converted to something we
  82. * can work with better when scaling various scheduler parameters,
  83. * it's a [ 0 ... 39 ] range.
  84. */
  85. #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
  86. #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
  87. #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
  88. /*
  89. * Some helpers for converting nanosecond timing to jiffy resolution
  90. */
  91. #define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
  92. #define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
  93. #define NICE_0_LOAD SCHED_LOAD_SCALE
  94. #define NICE_0_SHIFT SCHED_LOAD_SHIFT
  95. /*
  96. * These are the 'tuning knobs' of the scheduler:
  97. *
  98. * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
  99. * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  100. * Timeslices get refilled after they expire.
  101. */
  102. #define MIN_TIMESLICE max(5 * HZ / 1000, 1)
  103. #define DEF_TIMESLICE (100 * HZ / 1000)
  104. #ifdef CONFIG_SMP
  105. /*
  106. * Divide a load by a sched group cpu_power : (load / sg->__cpu_power)
  107. * Since cpu_power is a 'constant', we can use a reciprocal divide.
  108. */
  109. static inline u32 sg_div_cpu_power(const struct sched_group *sg, u32 load)
  110. {
  111. return reciprocal_divide(load, sg->reciprocal_cpu_power);
  112. }
  113. /*
  114. * Each time a sched group cpu_power is changed,
  115. * we must compute its reciprocal value
  116. */
  117. static inline void sg_inc_cpu_power(struct sched_group *sg, u32 val)
  118. {
  119. sg->__cpu_power += val;
  120. sg->reciprocal_cpu_power = reciprocal_value(sg->__cpu_power);
  121. }
  122. #endif
  123. #define SCALE_PRIO(x, prio) \
  124. max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_TIMESLICE)
  125. /*
  126. * static_prio_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
  127. * to time slice values: [800ms ... 100ms ... 5ms]
  128. */
  129. static unsigned int static_prio_timeslice(int static_prio)
  130. {
  131. if (static_prio == NICE_TO_PRIO(19))
  132. return 1;
  133. if (static_prio < NICE_TO_PRIO(0))
  134. return SCALE_PRIO(DEF_TIMESLICE * 4, static_prio);
  135. else
  136. return SCALE_PRIO(DEF_TIMESLICE, static_prio);
  137. }
  138. static inline int rt_policy(int policy)
  139. {
  140. if (unlikely(policy == SCHED_FIFO) || unlikely(policy == SCHED_RR))
  141. return 1;
  142. return 0;
  143. }
  144. static inline int task_has_rt_policy(struct task_struct *p)
  145. {
  146. return rt_policy(p->policy);
  147. }
  148. /*
  149. * This is the priority-queue data structure of the RT scheduling class:
  150. */
  151. struct rt_prio_array {
  152. DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  153. struct list_head queue[MAX_RT_PRIO];
  154. };
  155. struct load_stat {
  156. struct load_weight load;
  157. u64 load_update_start, load_update_last;
  158. unsigned long delta_fair, delta_exec, delta_stat;
  159. };
  160. /* CFS-related fields in a runqueue */
  161. struct cfs_rq {
  162. struct load_weight load;
  163. unsigned long nr_running;
  164. s64 fair_clock;
  165. u64 exec_clock;
  166. s64 wait_runtime;
  167. u64 sleeper_bonus;
  168. unsigned long wait_runtime_overruns, wait_runtime_underruns;
  169. struct rb_root tasks_timeline;
  170. struct rb_node *rb_leftmost;
  171. struct rb_node *rb_load_balance_curr;
  172. #ifdef CONFIG_FAIR_GROUP_SCHED
  173. /* 'curr' points to currently running entity on this cfs_rq.
  174. * It is set to NULL otherwise (i.e when none are currently running).
  175. */
  176. struct sched_entity *curr;
  177. struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
  178. /* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  179. * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  180. * (like users, containers etc.)
  181. *
  182. * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  183. * list is used during load balance.
  184. */
  185. struct list_head leaf_cfs_rq_list; /* Better name : task_cfs_rq_list? */
  186. #endif
  187. };
  188. /* Real-Time classes' related field in a runqueue: */
  189. struct rt_rq {
  190. struct rt_prio_array active;
  191. int rt_load_balance_idx;
  192. struct list_head *rt_load_balance_head, *rt_load_balance_curr;
  193. };
  194. /*
  195. * This is the main, per-CPU runqueue data structure.
  196. *
  197. * Locking rule: those places that want to lock multiple runqueues
  198. * (such as the load balancing or the thread migration code), lock
  199. * acquire operations must be ordered by ascending &runqueue.
  200. */
  201. struct rq {
  202. spinlock_t lock; /* runqueue lock */
  203. /*
  204. * nr_running and cpu_load should be in the same cacheline because
  205. * remote CPUs use both these fields when doing load calculation.
  206. */
  207. unsigned long nr_running;
  208. #define CPU_LOAD_IDX_MAX 5
  209. unsigned long cpu_load[CPU_LOAD_IDX_MAX];
  210. unsigned char idle_at_tick;
  211. #ifdef CONFIG_NO_HZ
  212. unsigned char in_nohz_recently;
  213. #endif
  214. struct load_stat ls; /* capture load from *all* tasks on this cpu */
  215. unsigned long nr_load_updates;
  216. u64 nr_switches;
  217. struct cfs_rq cfs;
  218. #ifdef CONFIG_FAIR_GROUP_SCHED
  219. struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */
  220. #endif
  221. struct rt_rq rt;
  222. /*
  223. * This is part of a global counter where only the total sum
  224. * over all CPUs matters. A task can increase this counter on
  225. * one CPU and if it got migrated afterwards it may decrease
  226. * it on another CPU. Always updated under the runqueue lock:
  227. */
  228. unsigned long nr_uninterruptible;
  229. struct task_struct *curr, *idle;
  230. unsigned long next_balance;
  231. struct mm_struct *prev_mm;
  232. u64 clock, prev_clock_raw;
  233. s64 clock_max_delta;
  234. unsigned int clock_warps, clock_overflows;
  235. unsigned int clock_unstable_events;
  236. atomic_t nr_iowait;
  237. #ifdef CONFIG_SMP
  238. struct sched_domain *sd;
  239. /* For active balancing */
  240. int active_balance;
  241. int push_cpu;
  242. int cpu; /* cpu of this runqueue */
  243. struct task_struct *migration_thread;
  244. struct list_head migration_queue;
  245. #endif
  246. #ifdef CONFIG_SCHEDSTATS
  247. /* latency stats */
  248. struct sched_info rq_sched_info;
  249. /* sys_sched_yield() stats */
  250. unsigned long yld_exp_empty;
  251. unsigned long yld_act_empty;
  252. unsigned long yld_both_empty;
  253. unsigned long yld_cnt;
  254. /* schedule() stats */
  255. unsigned long sched_switch;
  256. unsigned long sched_cnt;
  257. unsigned long sched_goidle;
  258. /* try_to_wake_up() stats */
  259. unsigned long ttwu_cnt;
  260. unsigned long ttwu_local;
  261. #endif
  262. struct lock_class_key rq_lock_key;
  263. };
  264. static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  265. static DEFINE_MUTEX(sched_hotcpu_mutex);
  266. static inline void check_preempt_curr(struct rq *rq, struct task_struct *p)
  267. {
  268. rq->curr->sched_class->check_preempt_curr(rq, p);
  269. }
  270. static inline int cpu_of(struct rq *rq)
  271. {
  272. #ifdef CONFIG_SMP
  273. return rq->cpu;
  274. #else
  275. return 0;
  276. #endif
  277. }
  278. /*
  279. * Per-runqueue clock, as finegrained as the platform can give us:
  280. */
  281. static unsigned long long __rq_clock(struct rq *rq)
  282. {
  283. u64 prev_raw = rq->prev_clock_raw;
  284. u64 now = sched_clock();
  285. s64 delta = now - prev_raw;
  286. u64 clock = rq->clock;
  287. /*
  288. * Protect against sched_clock() occasionally going backwards:
  289. */
  290. if (unlikely(delta < 0)) {
  291. clock++;
  292. rq->clock_warps++;
  293. } else {
  294. /*
  295. * Catch too large forward jumps too:
  296. */
  297. if (unlikely(delta > 2*TICK_NSEC)) {
  298. clock++;
  299. rq->clock_overflows++;
  300. } else {
  301. if (unlikely(delta > rq->clock_max_delta))
  302. rq->clock_max_delta = delta;
  303. clock += delta;
  304. }
  305. }
  306. rq->prev_clock_raw = now;
  307. rq->clock = clock;
  308. return clock;
  309. }
  310. static inline unsigned long long rq_clock(struct rq *rq)
  311. {
  312. int this_cpu = smp_processor_id();
  313. if (this_cpu == cpu_of(rq))
  314. return __rq_clock(rq);
  315. return rq->clock;
  316. }
  317. /*
  318. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  319. * See detach_destroy_domains: synchronize_sched for details.
  320. *
  321. * The domain tree of any CPU may only be accessed from within
  322. * preempt-disabled sections.
  323. */
  324. #define for_each_domain(cpu, __sd) \
  325. for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
  326. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  327. #define this_rq() (&__get_cpu_var(runqueues))
  328. #define task_rq(p) cpu_rq(task_cpu(p))
  329. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  330. /*
  331. * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
  332. * clock constructed from sched_clock():
  333. */
  334. unsigned long long cpu_clock(int cpu)
  335. {
  336. unsigned long long now;
  337. unsigned long flags;
  338. local_irq_save(flags);
  339. now = rq_clock(cpu_rq(cpu));
  340. local_irq_restore(flags);
  341. return now;
  342. }
  343. #ifdef CONFIG_FAIR_GROUP_SCHED
  344. /* Change a task's ->cfs_rq if it moves across CPUs */
  345. static inline void set_task_cfs_rq(struct task_struct *p)
  346. {
  347. p->se.cfs_rq = &task_rq(p)->cfs;
  348. }
  349. #else
  350. static inline void set_task_cfs_rq(struct task_struct *p)
  351. {
  352. }
  353. #endif
  354. #ifndef prepare_arch_switch
  355. # define prepare_arch_switch(next) do { } while (0)
  356. #endif
  357. #ifndef finish_arch_switch
  358. # define finish_arch_switch(prev) do { } while (0)
  359. #endif
  360. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  361. static inline int task_running(struct rq *rq, struct task_struct *p)
  362. {
  363. return rq->curr == p;
  364. }
  365. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  366. {
  367. }
  368. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  369. {
  370. #ifdef CONFIG_DEBUG_SPINLOCK
  371. /* this is a valid case when another task releases the spinlock */
  372. rq->lock.owner = current;
  373. #endif
  374. /*
  375. * If we are tracking spinlock dependencies then we have to
  376. * fix up the runqueue lock - which gets 'carried over' from
  377. * prev into current:
  378. */
  379. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  380. spin_unlock_irq(&rq->lock);
  381. }
  382. #else /* __ARCH_WANT_UNLOCKED_CTXSW */
  383. static inline int task_running(struct rq *rq, struct task_struct *p)
  384. {
  385. #ifdef CONFIG_SMP
  386. return p->oncpu;
  387. #else
  388. return rq->curr == p;
  389. #endif
  390. }
  391. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  392. {
  393. #ifdef CONFIG_SMP
  394. /*
  395. * We can optimise this out completely for !SMP, because the
  396. * SMP rebalancing from interrupt is the only thing that cares
  397. * here.
  398. */
  399. next->oncpu = 1;
  400. #endif
  401. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  402. spin_unlock_irq(&rq->lock);
  403. #else
  404. spin_unlock(&rq->lock);
  405. #endif
  406. }
  407. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  408. {
  409. #ifdef CONFIG_SMP
  410. /*
  411. * After ->oncpu is cleared, the task can be moved to a different CPU.
  412. * We must ensure this doesn't happen until the switch is completely
  413. * finished.
  414. */
  415. smp_wmb();
  416. prev->oncpu = 0;
  417. #endif
  418. #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  419. local_irq_enable();
  420. #endif
  421. }
  422. #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
  423. /*
  424. * __task_rq_lock - lock the runqueue a given task resides on.
  425. * Must be called interrupts disabled.
  426. */
  427. static inline struct rq *__task_rq_lock(struct task_struct *p)
  428. __acquires(rq->lock)
  429. {
  430. struct rq *rq;
  431. repeat_lock_task:
  432. rq = task_rq(p);
  433. spin_lock(&rq->lock);
  434. if (unlikely(rq != task_rq(p))) {
  435. spin_unlock(&rq->lock);
  436. goto repeat_lock_task;
  437. }
  438. return rq;
  439. }
  440. /*
  441. * task_rq_lock - lock the runqueue a given task resides on and disable
  442. * interrupts. Note the ordering: we can safely lookup the task_rq without
  443. * explicitly disabling preemption.
  444. */
  445. static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  446. __acquires(rq->lock)
  447. {
  448. struct rq *rq;
  449. repeat_lock_task:
  450. local_irq_save(*flags);
  451. rq = task_rq(p);
  452. spin_lock(&rq->lock);
  453. if (unlikely(rq != task_rq(p))) {
  454. spin_unlock_irqrestore(&rq->lock, *flags);
  455. goto repeat_lock_task;
  456. }
  457. return rq;
  458. }
  459. static inline void __task_rq_unlock(struct rq *rq)
  460. __releases(rq->lock)
  461. {
  462. spin_unlock(&rq->lock);
  463. }
  464. static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
  465. __releases(rq->lock)
  466. {
  467. spin_unlock_irqrestore(&rq->lock, *flags);
  468. }
  469. /*
  470. * this_rq_lock - lock this runqueue and disable interrupts.
  471. */
  472. static inline struct rq *this_rq_lock(void)
  473. __acquires(rq->lock)
  474. {
  475. struct rq *rq;
  476. local_irq_disable();
  477. rq = this_rq();
  478. spin_lock(&rq->lock);
  479. return rq;
  480. }
  481. /*
  482. * CPU frequency is/was unstable - start new by setting prev_clock_raw:
  483. */
  484. void sched_clock_unstable_event(void)
  485. {
  486. unsigned long flags;
  487. struct rq *rq;
  488. rq = task_rq_lock(current, &flags);
  489. rq->prev_clock_raw = sched_clock();
  490. rq->clock_unstable_events++;
  491. task_rq_unlock(rq, &flags);
  492. }
  493. /*
  494. * resched_task - mark a task 'to be rescheduled now'.
  495. *
  496. * On UP this means the setting of the need_resched flag, on SMP it
  497. * might also involve a cross-CPU call to trigger the scheduler on
  498. * the target CPU.
  499. */
  500. #ifdef CONFIG_SMP
  501. #ifndef tsk_is_polling
  502. #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
  503. #endif
  504. static void resched_task(struct task_struct *p)
  505. {
  506. int cpu;
  507. assert_spin_locked(&task_rq(p)->lock);
  508. if (unlikely(test_tsk_thread_flag(p, TIF_NEED_RESCHED)))
  509. return;
  510. set_tsk_thread_flag(p, TIF_NEED_RESCHED);
  511. cpu = task_cpu(p);
  512. if (cpu == smp_processor_id())
  513. return;
  514. /* NEED_RESCHED must be visible before we test polling */
  515. smp_mb();
  516. if (!tsk_is_polling(p))
  517. smp_send_reschedule(cpu);
  518. }
  519. static void resched_cpu(int cpu)
  520. {
  521. struct rq *rq = cpu_rq(cpu);
  522. unsigned long flags;
  523. if (!spin_trylock_irqsave(&rq->lock, flags))
  524. return;
  525. resched_task(cpu_curr(cpu));
  526. spin_unlock_irqrestore(&rq->lock, flags);
  527. }
  528. #else
  529. static inline void resched_task(struct task_struct *p)
  530. {
  531. assert_spin_locked(&task_rq(p)->lock);
  532. set_tsk_need_resched(p);
  533. }
  534. #endif
  535. static u64 div64_likely32(u64 divident, unsigned long divisor)
  536. {
  537. #if BITS_PER_LONG == 32
  538. if (likely(divident <= 0xffffffffULL))
  539. return (u32)divident / divisor;
  540. do_div(divident, divisor);
  541. return divident;
  542. #else
  543. return divident / divisor;
  544. #endif
  545. }
  546. #if BITS_PER_LONG == 32
  547. # define WMULT_CONST (~0UL)
  548. #else
  549. # define WMULT_CONST (1UL << 32)
  550. #endif
  551. #define WMULT_SHIFT 32
  552. static inline unsigned long
  553. calc_delta_mine(unsigned long delta_exec, unsigned long weight,
  554. struct load_weight *lw)
  555. {
  556. u64 tmp;
  557. if (unlikely(!lw->inv_weight))
  558. lw->inv_weight = WMULT_CONST / lw->weight;
  559. tmp = (u64)delta_exec * weight;
  560. /*
  561. * Check whether we'd overflow the 64-bit multiplication:
  562. */
  563. if (unlikely(tmp > WMULT_CONST)) {
  564. tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight)
  565. >> (WMULT_SHIFT/2);
  566. } else {
  567. tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT;
  568. }
  569. return (unsigned long)min(tmp, (u64)sysctl_sched_runtime_limit);
  570. }
  571. static inline unsigned long
  572. calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
  573. {
  574. return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
  575. }
  576. static void update_load_add(struct load_weight *lw, unsigned long inc)
  577. {
  578. lw->weight += inc;
  579. lw->inv_weight = 0;
  580. }
  581. static void update_load_sub(struct load_weight *lw, unsigned long dec)
  582. {
  583. lw->weight -= dec;
  584. lw->inv_weight = 0;
  585. }
  586. static void __update_curr_load(struct rq *rq, struct load_stat *ls)
  587. {
  588. if (rq->curr != rq->idle && ls->load.weight) {
  589. ls->delta_exec += ls->delta_stat;
  590. ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
  591. ls->delta_stat = 0;
  592. }
  593. }
  594. /*
  595. * Update delta_exec, delta_fair fields for rq.
  596. *
  597. * delta_fair clock advances at a rate inversely proportional to
  598. * total load (rq->ls.load.weight) on the runqueue, while
  599. * delta_exec advances at the same rate as wall-clock (provided
  600. * cpu is not idle).
  601. *
  602. * delta_exec / delta_fair is a measure of the (smoothened) load on this
  603. * runqueue over any given interval. This (smoothened) load is used
  604. * during load balance.
  605. *
  606. * This function is called /before/ updating rq->ls.load
  607. * and when switching tasks.
  608. */
  609. static void update_curr_load(struct rq *rq, u64 now)
  610. {
  611. struct load_stat *ls = &rq->ls;
  612. u64 start;
  613. start = ls->load_update_start;
  614. ls->load_update_start = now;
  615. ls->delta_stat += now - start;
  616. /*
  617. * Stagger updates to ls->delta_fair. Very frequent updates
  618. * can be expensive.
  619. */
  620. if (ls->delta_stat >= sysctl_sched_stat_granularity)
  621. __update_curr_load(rq, ls);
  622. }
  623. /*
  624. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  625. * of tasks with abnormal "nice" values across CPUs the contribution that
  626. * each task makes to its run queue's load is weighted according to its
  627. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  628. * scaled version of the new time slice allocation that they receive on time
  629. * slice expiry etc.
  630. */
  631. /*
  632. * Assume: static_prio_timeslice(NICE_TO_PRIO(0)) == DEF_TIMESLICE
  633. * If static_prio_timeslice() is ever changed to break this assumption then
  634. * this code will need modification
  635. */
  636. #define TIME_SLICE_NICE_ZERO DEF_TIMESLICE
  637. #define load_weight(lp) \
  638. (((lp) * SCHED_LOAD_SCALE) / TIME_SLICE_NICE_ZERO)
  639. #define PRIO_TO_LOAD_WEIGHT(prio) \
  640. load_weight(static_prio_timeslice(prio))
  641. #define RTPRIO_TO_LOAD_WEIGHT(rp) \
  642. (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + load_weight(rp))
  643. #define WEIGHT_IDLEPRIO 2
  644. #define WMULT_IDLEPRIO (1 << 31)
  645. /*
  646. * Nice levels are multiplicative, with a gentle 10% change for every
  647. * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
  648. * nice 1, it will get ~10% less CPU time than another CPU-bound task
  649. * that remained on nice 0.
  650. *
  651. * The "10% effect" is relative and cumulative: from _any_ nice level,
  652. * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
  653. * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
  654. * If a task goes up by ~10% and another task goes down by ~10% then
  655. * the relative distance between them is ~25%.)
  656. */
  657. static const int prio_to_weight[40] = {
  658. /* -20 */ 88818, 71054, 56843, 45475, 36380, 29104, 23283, 18626, 14901, 11921,
  659. /* -10 */ 9537, 7629, 6103, 4883, 3906, 3125, 2500, 2000, 1600, 1280,
  660. /* 0 */ NICE_0_LOAD /* 1024 */,
  661. /* 1 */ 819, 655, 524, 419, 336, 268, 215, 172, 137,
  662. /* 10 */ 110, 87, 70, 56, 45, 36, 29, 23, 18, 15,
  663. };
  664. /*
  665. * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  666. *
  667. * In cases where the weight does not change often, we can use the
  668. * precalculated inverse to speed up arithmetics by turning divisions
  669. * into multiplications:
  670. */
  671. static const u32 prio_to_wmult[40] = {
  672. /* -20 */ 48356, 60446, 75558, 94446, 118058,
  673. /* -15 */ 147573, 184467, 230589, 288233, 360285,
  674. /* -10 */ 450347, 562979, 703746, 879575, 1099582,
  675. /* -5 */ 1374389, 1717986, 2147483, 2684354, 3355443,
  676. /* 0 */ 4194304, 5244160, 6557201, 8196502, 10250518,
  677. /* 5 */ 12782640, 16025997, 19976592, 24970740, 31350126,
  678. /* 10 */ 39045157, 49367440, 61356675, 76695844, 95443717,
  679. /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  680. };
  681. static inline void
  682. inc_load(struct rq *rq, const struct task_struct *p, u64 now)
  683. {
  684. update_curr_load(rq, now);
  685. update_load_add(&rq->ls.load, p->se.load.weight);
  686. }
  687. static inline void
  688. dec_load(struct rq *rq, const struct task_struct *p, u64 now)
  689. {
  690. update_curr_load(rq, now);
  691. update_load_sub(&rq->ls.load, p->se.load.weight);
  692. }
  693. static inline void inc_nr_running(struct task_struct *p, struct rq *rq, u64 now)
  694. {
  695. rq->nr_running++;
  696. inc_load(rq, p, now);
  697. }
  698. static inline void dec_nr_running(struct task_struct *p, struct rq *rq, u64 now)
  699. {
  700. rq->nr_running--;
  701. dec_load(rq, p, now);
  702. }
  703. static void activate_task(struct rq *rq, struct task_struct *p, int wakeup);
  704. /*
  705. * runqueue iterator, to support SMP load-balancing between different
  706. * scheduling classes, without having to expose their internal data
  707. * structures to the load-balancing proper:
  708. */
  709. struct rq_iterator {
  710. void *arg;
  711. struct task_struct *(*start)(void *);
  712. struct task_struct *(*next)(void *);
  713. };
  714. static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  715. unsigned long max_nr_move, unsigned long max_load_move,
  716. struct sched_domain *sd, enum cpu_idle_type idle,
  717. int *all_pinned, unsigned long *load_moved,
  718. int this_best_prio, int best_prio, int best_prio_seen,
  719. struct rq_iterator *iterator);
  720. #include "sched_stats.h"
  721. #include "sched_rt.c"
  722. #include "sched_fair.c"
  723. #include "sched_idletask.c"
  724. #ifdef CONFIG_SCHED_DEBUG
  725. # include "sched_debug.c"
  726. #endif
  727. #define sched_class_highest (&rt_sched_class)
  728. static void set_load_weight(struct task_struct *p)
  729. {
  730. task_rq(p)->cfs.wait_runtime -= p->se.wait_runtime;
  731. p->se.wait_runtime = 0;
  732. if (task_has_rt_policy(p)) {
  733. p->se.load.weight = prio_to_weight[0] * 2;
  734. p->se.load.inv_weight = prio_to_wmult[0] >> 1;
  735. return;
  736. }
  737. /*
  738. * SCHED_IDLE tasks get minimal weight:
  739. */
  740. if (p->policy == SCHED_IDLE) {
  741. p->se.load.weight = WEIGHT_IDLEPRIO;
  742. p->se.load.inv_weight = WMULT_IDLEPRIO;
  743. return;
  744. }
  745. p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
  746. p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
  747. }
  748. static void
  749. enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, u64 now)
  750. {
  751. sched_info_queued(p);
  752. p->sched_class->enqueue_task(rq, p, wakeup, now);
  753. p->se.on_rq = 1;
  754. }
  755. static void
  756. dequeue_task(struct rq *rq, struct task_struct *p, int sleep, u64 now)
  757. {
  758. p->sched_class->dequeue_task(rq, p, sleep, now);
  759. p->se.on_rq = 0;
  760. }
  761. /*
  762. * __normal_prio - return the priority that is based on the static prio
  763. */
  764. static inline int __normal_prio(struct task_struct *p)
  765. {
  766. return p->static_prio;
  767. }
  768. /*
  769. * Calculate the expected normal priority: i.e. priority
  770. * without taking RT-inheritance into account. Might be
  771. * boosted by interactivity modifiers. Changes upon fork,
  772. * setprio syscalls, and whenever the interactivity
  773. * estimator recalculates.
  774. */
  775. static inline int normal_prio(struct task_struct *p)
  776. {
  777. int prio;
  778. if (task_has_rt_policy(p))
  779. prio = MAX_RT_PRIO-1 - p->rt_priority;
  780. else
  781. prio = __normal_prio(p);
  782. return prio;
  783. }
  784. /*
  785. * Calculate the current priority, i.e. the priority
  786. * taken into account by the scheduler. This value might
  787. * be boosted by RT tasks, or might be boosted by
  788. * interactivity modifiers. Will be RT if the task got
  789. * RT-boosted. If not then it returns p->normal_prio.
  790. */
  791. static int effective_prio(struct task_struct *p)
  792. {
  793. p->normal_prio = normal_prio(p);
  794. /*
  795. * If we are RT tasks or we were boosted to RT priority,
  796. * keep the priority unchanged. Otherwise, update priority
  797. * to the normal priority:
  798. */
  799. if (!rt_prio(p->prio))
  800. return p->normal_prio;
  801. return p->prio;
  802. }
  803. /*
  804. * activate_task - move a task to the runqueue.
  805. */
  806. static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  807. {
  808. u64 now = rq_clock(rq);
  809. if (p->state == TASK_UNINTERRUPTIBLE)
  810. rq->nr_uninterruptible--;
  811. enqueue_task(rq, p, wakeup, now);
  812. inc_nr_running(p, rq, now);
  813. }
  814. /*
  815. * activate_idle_task - move idle task to the _front_ of runqueue.
  816. */
  817. static inline void activate_idle_task(struct task_struct *p, struct rq *rq)
  818. {
  819. u64 now = rq_clock(rq);
  820. if (p->state == TASK_UNINTERRUPTIBLE)
  821. rq->nr_uninterruptible--;
  822. enqueue_task(rq, p, 0, now);
  823. inc_nr_running(p, rq, now);
  824. }
  825. /*
  826. * deactivate_task - remove a task from the runqueue.
  827. */
  828. static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
  829. {
  830. u64 now = rq_clock(rq);
  831. if (p->state == TASK_UNINTERRUPTIBLE)
  832. rq->nr_uninterruptible++;
  833. dequeue_task(rq, p, sleep, now);
  834. dec_nr_running(p, rq, now);
  835. }
  836. /**
  837. * task_curr - is this task currently executing on a CPU?
  838. * @p: the task in question.
  839. */
  840. inline int task_curr(const struct task_struct *p)
  841. {
  842. return cpu_curr(task_cpu(p)) == p;
  843. }
  844. /* Used instead of source_load when we know the type == 0 */
  845. unsigned long weighted_cpuload(const int cpu)
  846. {
  847. return cpu_rq(cpu)->ls.load.weight;
  848. }
  849. static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  850. {
  851. #ifdef CONFIG_SMP
  852. task_thread_info(p)->cpu = cpu;
  853. set_task_cfs_rq(p);
  854. #endif
  855. }
  856. #ifdef CONFIG_SMP
  857. void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  858. {
  859. int old_cpu = task_cpu(p);
  860. struct rq *old_rq = cpu_rq(old_cpu), *new_rq = cpu_rq(new_cpu);
  861. u64 clock_offset, fair_clock_offset;
  862. clock_offset = old_rq->clock - new_rq->clock;
  863. fair_clock_offset = old_rq->cfs.fair_clock -
  864. new_rq->cfs.fair_clock;
  865. if (p->se.wait_start)
  866. p->se.wait_start -= clock_offset;
  867. if (p->se.wait_start_fair)
  868. p->se.wait_start_fair -= fair_clock_offset;
  869. if (p->se.sleep_start)
  870. p->se.sleep_start -= clock_offset;
  871. if (p->se.block_start)
  872. p->se.block_start -= clock_offset;
  873. if (p->se.sleep_start_fair)
  874. p->se.sleep_start_fair -= fair_clock_offset;
  875. __set_task_cpu(p, new_cpu);
  876. }
  877. struct migration_req {
  878. struct list_head list;
  879. struct task_struct *task;
  880. int dest_cpu;
  881. struct completion done;
  882. };
  883. /*
  884. * The task's runqueue lock must be held.
  885. * Returns true if you have to wait for migration thread.
  886. */
  887. static int
  888. migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
  889. {
  890. struct rq *rq = task_rq(p);
  891. /*
  892. * If the task is not on a runqueue (and not running), then
  893. * it is sufficient to simply update the task's cpu field.
  894. */
  895. if (!p->se.on_rq && !task_running(rq, p)) {
  896. set_task_cpu(p, dest_cpu);
  897. return 0;
  898. }
  899. init_completion(&req->done);
  900. req->task = p;
  901. req->dest_cpu = dest_cpu;
  902. list_add(&req->list, &rq->migration_queue);
  903. return 1;
  904. }
  905. /*
  906. * wait_task_inactive - wait for a thread to unschedule.
  907. *
  908. * The caller must ensure that the task *will* unschedule sometime soon,
  909. * else this function might spin for a *long* time. This function can't
  910. * be called with interrupts off, or it may introduce deadlock with
  911. * smp_call_function() if an IPI is sent by the same process we are
  912. * waiting to become inactive.
  913. */
  914. void wait_task_inactive(struct task_struct *p)
  915. {
  916. unsigned long flags;
  917. int running, on_rq;
  918. struct rq *rq;
  919. repeat:
  920. /*
  921. * We do the initial early heuristics without holding
  922. * any task-queue locks at all. We'll only try to get
  923. * the runqueue lock when things look like they will
  924. * work out!
  925. */
  926. rq = task_rq(p);
  927. /*
  928. * If the task is actively running on another CPU
  929. * still, just relax and busy-wait without holding
  930. * any locks.
  931. *
  932. * NOTE! Since we don't hold any locks, it's not
  933. * even sure that "rq" stays as the right runqueue!
  934. * But we don't care, since "task_running()" will
  935. * return false if the runqueue has changed and p
  936. * is actually now running somewhere else!
  937. */
  938. while (task_running(rq, p))
  939. cpu_relax();
  940. /*
  941. * Ok, time to look more closely! We need the rq
  942. * lock now, to be *sure*. If we're wrong, we'll
  943. * just go back and repeat.
  944. */
  945. rq = task_rq_lock(p, &flags);
  946. running = task_running(rq, p);
  947. on_rq = p->se.on_rq;
  948. task_rq_unlock(rq, &flags);
  949. /*
  950. * Was it really running after all now that we
  951. * checked with the proper locks actually held?
  952. *
  953. * Oops. Go back and try again..
  954. */
  955. if (unlikely(running)) {
  956. cpu_relax();
  957. goto repeat;
  958. }
  959. /*
  960. * It's not enough that it's not actively running,
  961. * it must be off the runqueue _entirely_, and not
  962. * preempted!
  963. *
  964. * So if it wa still runnable (but just not actively
  965. * running right now), it's preempted, and we should
  966. * yield - it could be a while.
  967. */
  968. if (unlikely(on_rq)) {
  969. yield();
  970. goto repeat;
  971. }
  972. /*
  973. * Ahh, all good. It wasn't running, and it wasn't
  974. * runnable, which means that it will never become
  975. * running in the future either. We're all done!
  976. */
  977. }
  978. /***
  979. * kick_process - kick a running thread to enter/exit the kernel
  980. * @p: the to-be-kicked thread
  981. *
  982. * Cause a process which is running on another CPU to enter
  983. * kernel-mode, without any delay. (to get signals handled.)
  984. *
  985. * NOTE: this function doesnt have to take the runqueue lock,
  986. * because all it wants to ensure is that the remote task enters
  987. * the kernel. If the IPI races and the task has been migrated
  988. * to another CPU then no harm is done and the purpose has been
  989. * achieved as well.
  990. */
  991. void kick_process(struct task_struct *p)
  992. {
  993. int cpu;
  994. preempt_disable();
  995. cpu = task_cpu(p);
  996. if ((cpu != smp_processor_id()) && task_curr(p))
  997. smp_send_reschedule(cpu);
  998. preempt_enable();
  999. }
  1000. /*
  1001. * Return a low guess at the load of a migration-source cpu weighted
  1002. * according to the scheduling class and "nice" value.
  1003. *
  1004. * We want to under-estimate the load of migration sources, to
  1005. * balance conservatively.
  1006. */
  1007. static inline unsigned long source_load(int cpu, int type)
  1008. {
  1009. struct rq *rq = cpu_rq(cpu);
  1010. unsigned long total = weighted_cpuload(cpu);
  1011. if (type == 0)
  1012. return total;
  1013. return min(rq->cpu_load[type-1], total);
  1014. }
  1015. /*
  1016. * Return a high guess at the load of a migration-target cpu weighted
  1017. * according to the scheduling class and "nice" value.
  1018. */
  1019. static inline unsigned long target_load(int cpu, int type)
  1020. {
  1021. struct rq *rq = cpu_rq(cpu);
  1022. unsigned long total = weighted_cpuload(cpu);
  1023. if (type == 0)
  1024. return total;
  1025. return max(rq->cpu_load[type-1], total);
  1026. }
  1027. /*
  1028. * Return the average load per task on the cpu's run queue
  1029. */
  1030. static inline unsigned long cpu_avg_load_per_task(int cpu)
  1031. {
  1032. struct rq *rq = cpu_rq(cpu);
  1033. unsigned long total = weighted_cpuload(cpu);
  1034. unsigned long n = rq->nr_running;
  1035. return n ? total / n : SCHED_LOAD_SCALE;
  1036. }
  1037. /*
  1038. * find_idlest_group finds and returns the least busy CPU group within the
  1039. * domain.
  1040. */
  1041. static struct sched_group *
  1042. find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
  1043. {
  1044. struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
  1045. unsigned long min_load = ULONG_MAX, this_load = 0;
  1046. int load_idx = sd->forkexec_idx;
  1047. int imbalance = 100 + (sd->imbalance_pct-100)/2;
  1048. do {
  1049. unsigned long load, avg_load;
  1050. int local_group;
  1051. int i;
  1052. /* Skip over this group if it has no CPUs allowed */
  1053. if (!cpus_intersects(group->cpumask, p->cpus_allowed))
  1054. goto nextgroup;
  1055. local_group = cpu_isset(this_cpu, group->cpumask);
  1056. /* Tally up the load of all CPUs in the group */
  1057. avg_load = 0;
  1058. for_each_cpu_mask(i, group->cpumask) {
  1059. /* Bias balancing toward cpus of our domain */
  1060. if (local_group)
  1061. load = source_load(i, load_idx);
  1062. else
  1063. load = target_load(i, load_idx);
  1064. avg_load += load;
  1065. }
  1066. /* Adjust by relative CPU power of the group */
  1067. avg_load = sg_div_cpu_power(group,
  1068. avg_load * SCHED_LOAD_SCALE);
  1069. if (local_group) {
  1070. this_load = avg_load;
  1071. this = group;
  1072. } else if (avg_load < min_load) {
  1073. min_load = avg_load;
  1074. idlest = group;
  1075. }
  1076. nextgroup:
  1077. group = group->next;
  1078. } while (group != sd->groups);
  1079. if (!idlest || 100*this_load < imbalance*min_load)
  1080. return NULL;
  1081. return idlest;
  1082. }
  1083. /*
  1084. * find_idlest_cpu - find the idlest cpu among the cpus in group.
  1085. */
  1086. static int
  1087. find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
  1088. {
  1089. cpumask_t tmp;
  1090. unsigned long load, min_load = ULONG_MAX;
  1091. int idlest = -1;
  1092. int i;
  1093. /* Traverse only the allowed CPUs */
  1094. cpus_and(tmp, group->cpumask, p->cpus_allowed);
  1095. for_each_cpu_mask(i, tmp) {
  1096. load = weighted_cpuload(i);
  1097. if (load < min_load || (load == min_load && i == this_cpu)) {
  1098. min_load = load;
  1099. idlest = i;
  1100. }
  1101. }
  1102. return idlest;
  1103. }
  1104. /*
  1105. * sched_balance_self: balance the current task (running on cpu) in domains
  1106. * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
  1107. * SD_BALANCE_EXEC.
  1108. *
  1109. * Balance, ie. select the least loaded group.
  1110. *
  1111. * Returns the target CPU number, or the same CPU if no balancing is needed.
  1112. *
  1113. * preempt must be disabled.
  1114. */
  1115. static int sched_balance_self(int cpu, int flag)
  1116. {
  1117. struct task_struct *t = current;
  1118. struct sched_domain *tmp, *sd = NULL;
  1119. for_each_domain(cpu, tmp) {
  1120. /*
  1121. * If power savings logic is enabled for a domain, stop there.
  1122. */
  1123. if (tmp->flags & SD_POWERSAVINGS_BALANCE)
  1124. break;
  1125. if (tmp->flags & flag)
  1126. sd = tmp;
  1127. }
  1128. while (sd) {
  1129. cpumask_t span;
  1130. struct sched_group *group;
  1131. int new_cpu, weight;
  1132. if (!(sd->flags & flag)) {
  1133. sd = sd->child;
  1134. continue;
  1135. }
  1136. span = sd->span;
  1137. group = find_idlest_group(sd, t, cpu);
  1138. if (!group) {
  1139. sd = sd->child;
  1140. continue;
  1141. }
  1142. new_cpu = find_idlest_cpu(group, t, cpu);
  1143. if (new_cpu == -1 || new_cpu == cpu) {
  1144. /* Now try balancing at a lower domain level of cpu */
  1145. sd = sd->child;
  1146. continue;
  1147. }
  1148. /* Now try balancing at a lower domain level of new_cpu */
  1149. cpu = new_cpu;
  1150. sd = NULL;
  1151. weight = cpus_weight(span);
  1152. for_each_domain(cpu, tmp) {
  1153. if (weight <= cpus_weight(tmp->span))
  1154. break;
  1155. if (tmp->flags & flag)
  1156. sd = tmp;
  1157. }
  1158. /* while loop will break here if sd == NULL */
  1159. }
  1160. return cpu;
  1161. }
  1162. #endif /* CONFIG_SMP */
  1163. /*
  1164. * wake_idle() will wake a task on an idle cpu if task->cpu is
  1165. * not idle and an idle cpu is available. The span of cpus to
  1166. * search starts with cpus closest then further out as needed,
  1167. * so we always favor a closer, idle cpu.
  1168. *
  1169. * Returns the CPU we should wake onto.
  1170. */
  1171. #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
  1172. static int wake_idle(int cpu, struct task_struct *p)
  1173. {
  1174. cpumask_t tmp;
  1175. struct sched_domain *sd;
  1176. int i;
  1177. /*
  1178. * If it is idle, then it is the best cpu to run this task.
  1179. *
  1180. * This cpu is also the best, if it has more than one task already.
  1181. * Siblings must be also busy(in most cases) as they didn't already
  1182. * pickup the extra load from this cpu and hence we need not check
  1183. * sibling runqueue info. This will avoid the checks and cache miss
  1184. * penalities associated with that.
  1185. */
  1186. if (idle_cpu(cpu) || cpu_rq(cpu)->nr_running > 1)
  1187. return cpu;
  1188. for_each_domain(cpu, sd) {
  1189. if (sd->flags & SD_WAKE_IDLE) {
  1190. cpus_and(tmp, sd->span, p->cpus_allowed);
  1191. for_each_cpu_mask(i, tmp) {
  1192. if (idle_cpu(i))
  1193. return i;
  1194. }
  1195. } else {
  1196. break;
  1197. }
  1198. }
  1199. return cpu;
  1200. }
  1201. #else
  1202. static inline int wake_idle(int cpu, struct task_struct *p)
  1203. {
  1204. return cpu;
  1205. }
  1206. #endif
  1207. /***
  1208. * try_to_wake_up - wake up a thread
  1209. * @p: the to-be-woken-up thread
  1210. * @state: the mask of task states that can be woken
  1211. * @sync: do a synchronous wakeup?
  1212. *
  1213. * Put it on the run-queue if it's not already there. The "current"
  1214. * thread is always on the run-queue (except when the actual
  1215. * re-schedule is in progress), and as such you're allowed to do
  1216. * the simpler "current->state = TASK_RUNNING" to mark yourself
  1217. * runnable without the overhead of this.
  1218. *
  1219. * returns failure only if the task is already active.
  1220. */
  1221. static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
  1222. {
  1223. int cpu, this_cpu, success = 0;
  1224. unsigned long flags;
  1225. long old_state;
  1226. struct rq *rq;
  1227. #ifdef CONFIG_SMP
  1228. struct sched_domain *sd, *this_sd = NULL;
  1229. unsigned long load, this_load;
  1230. int new_cpu;
  1231. #endif
  1232. rq = task_rq_lock(p, &flags);
  1233. old_state = p->state;
  1234. if (!(old_state & state))
  1235. goto out;
  1236. if (p->se.on_rq)
  1237. goto out_running;
  1238. cpu = task_cpu(p);
  1239. this_cpu = smp_processor_id();
  1240. #ifdef CONFIG_SMP
  1241. if (unlikely(task_running(rq, p)))
  1242. goto out_activate;
  1243. new_cpu = cpu;
  1244. schedstat_inc(rq, ttwu_cnt);
  1245. if (cpu == this_cpu) {
  1246. schedstat_inc(rq, ttwu_local);
  1247. goto out_set_cpu;
  1248. }
  1249. for_each_domain(this_cpu, sd) {
  1250. if (cpu_isset(cpu, sd->span)) {
  1251. schedstat_inc(sd, ttwu_wake_remote);
  1252. this_sd = sd;
  1253. break;
  1254. }
  1255. }
  1256. if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
  1257. goto out_set_cpu;
  1258. /*
  1259. * Check for affine wakeup and passive balancing possibilities.
  1260. */
  1261. if (this_sd) {
  1262. int idx = this_sd->wake_idx;
  1263. unsigned int imbalance;
  1264. imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;
  1265. load = source_load(cpu, idx);
  1266. this_load = target_load(this_cpu, idx);
  1267. new_cpu = this_cpu; /* Wake to this CPU if we can */
  1268. if (this_sd->flags & SD_WAKE_AFFINE) {
  1269. unsigned long tl = this_load;
  1270. unsigned long tl_per_task;
  1271. tl_per_task = cpu_avg_load_per_task(this_cpu);
  1272. /*
  1273. * If sync wakeup then subtract the (maximum possible)
  1274. * effect of the currently running task from the load
  1275. * of the current CPU:
  1276. */
  1277. if (sync)
  1278. tl -= current->se.load.weight;
  1279. if ((tl <= load &&
  1280. tl + target_load(cpu, idx) <= tl_per_task) ||
  1281. 100*(tl + p->se.load.weight) <= imbalance*load) {
  1282. /*
  1283. * This domain has SD_WAKE_AFFINE and
  1284. * p is cache cold in this domain, and
  1285. * there is no bad imbalance.
  1286. */
  1287. schedstat_inc(this_sd, ttwu_move_affine);
  1288. goto out_set_cpu;
  1289. }
  1290. }
  1291. /*
  1292. * Start passive balancing when half the imbalance_pct
  1293. * limit is reached.
  1294. */
  1295. if (this_sd->flags & SD_WAKE_BALANCE) {
  1296. if (imbalance*this_load <= 100*load) {
  1297. schedstat_inc(this_sd, ttwu_move_balance);
  1298. goto out_set_cpu;
  1299. }
  1300. }
  1301. }
  1302. new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
  1303. out_set_cpu:
  1304. new_cpu = wake_idle(new_cpu, p);
  1305. if (new_cpu != cpu) {
  1306. set_task_cpu(p, new_cpu);
  1307. task_rq_unlock(rq, &flags);
  1308. /* might preempt at this point */
  1309. rq = task_rq_lock(p, &flags);
  1310. old_state = p->state;
  1311. if (!(old_state & state))
  1312. goto out;
  1313. if (p->se.on_rq)
  1314. goto out_running;
  1315. this_cpu = smp_processor_id();
  1316. cpu = task_cpu(p);
  1317. }
  1318. out_activate:
  1319. #endif /* CONFIG_SMP */
  1320. activate_task(rq, p, 1);
  1321. /*
  1322. * Sync wakeups (i.e. those types of wakeups where the waker
  1323. * has indicated that it will leave the CPU in short order)
  1324. * don't trigger a preemption, if the woken up task will run on
  1325. * this cpu. (in this case the 'I will reschedule' promise of
  1326. * the waker guarantees that the freshly woken up task is going
  1327. * to be considered on this CPU.)
  1328. */
  1329. if (!sync || cpu != this_cpu)
  1330. check_preempt_curr(rq, p);
  1331. success = 1;
  1332. out_running:
  1333. p->state = TASK_RUNNING;
  1334. out:
  1335. task_rq_unlock(rq, &flags);
  1336. return success;
  1337. }
  1338. int fastcall wake_up_process(struct task_struct *p)
  1339. {
  1340. return try_to_wake_up(p, TASK_STOPPED | TASK_TRACED |
  1341. TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE, 0);
  1342. }
  1343. EXPORT_SYMBOL(wake_up_process);
  1344. int fastcall wake_up_state(struct task_struct *p, unsigned int state)
  1345. {
  1346. return try_to_wake_up(p, state, 0);
  1347. }
  1348. /*
  1349. * Perform scheduler related setup for a newly forked process p.
  1350. * p is forked by current.
  1351. *
  1352. * __sched_fork() is basic setup used by init_idle() too:
  1353. */
  1354. static void __sched_fork(struct task_struct *p)
  1355. {
  1356. p->se.wait_start_fair = 0;
  1357. p->se.wait_start = 0;
  1358. p->se.exec_start = 0;
  1359. p->se.sum_exec_runtime = 0;
  1360. p->se.delta_exec = 0;
  1361. p->se.delta_fair_run = 0;
  1362. p->se.delta_fair_sleep = 0;
  1363. p->se.wait_runtime = 0;
  1364. p->se.sum_wait_runtime = 0;
  1365. p->se.sum_sleep_runtime = 0;
  1366. p->se.sleep_start = 0;
  1367. p->se.sleep_start_fair = 0;
  1368. p->se.block_start = 0;
  1369. p->se.sleep_max = 0;
  1370. p->se.block_max = 0;
  1371. p->se.exec_max = 0;
  1372. p->se.wait_max = 0;
  1373. p->se.wait_runtime_overruns = 0;
  1374. p->se.wait_runtime_underruns = 0;
  1375. INIT_LIST_HEAD(&p->run_list);
  1376. p->se.on_rq = 0;
  1377. #ifdef CONFIG_PREEMPT_NOTIFIERS
  1378. INIT_HLIST_HEAD(&p->preempt_notifiers);
  1379. #endif
  1380. /*
  1381. * We mark the process as running here, but have not actually
  1382. * inserted it onto the runqueue yet. This guarantees that
  1383. * nobody will actually run it, and a signal or other external
  1384. * event cannot wake it up and insert it on the runqueue either.
  1385. */
  1386. p->state = TASK_RUNNING;
  1387. }
  1388. /*
  1389. * fork()/clone()-time setup:
  1390. */
  1391. void sched_fork(struct task_struct *p, int clone_flags)
  1392. {
  1393. int cpu = get_cpu();
  1394. __sched_fork(p);
  1395. #ifdef CONFIG_SMP
  1396. cpu = sched_balance_self(cpu, SD_BALANCE_FORK);
  1397. #endif
  1398. __set_task_cpu(p, cpu);
  1399. /*
  1400. * Make sure we do not leak PI boosting priority to the child:
  1401. */
  1402. p->prio = current->normal_prio;
  1403. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  1404. if (likely(sched_info_on()))
  1405. memset(&p->sched_info, 0, sizeof(p->sched_info));
  1406. #endif
  1407. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  1408. p->oncpu = 0;
  1409. #endif
  1410. #ifdef CONFIG_PREEMPT
  1411. /* Want to start with kernel preemption disabled. */
  1412. task_thread_info(p)->preempt_count = 1;
  1413. #endif
  1414. put_cpu();
  1415. }
  1416. /*
  1417. * After fork, child runs first. (default) If set to 0 then
  1418. * parent will (try to) run first.
  1419. */
  1420. unsigned int __read_mostly sysctl_sched_child_runs_first = 1;
  1421. /*
  1422. * wake_up_new_task - wake up a newly created task for the first time.
  1423. *
  1424. * This function will do some initial scheduler statistics housekeeping
  1425. * that must be done for every newly created context, then puts the task
  1426. * on the runqueue and wakes it.
  1427. */
  1428. void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  1429. {
  1430. unsigned long flags;
  1431. struct rq *rq;
  1432. int this_cpu;
  1433. rq = task_rq_lock(p, &flags);
  1434. BUG_ON(p->state != TASK_RUNNING);
  1435. this_cpu = smp_processor_id(); /* parent's CPU */
  1436. p->prio = effective_prio(p);
  1437. if (!sysctl_sched_child_runs_first || (clone_flags & CLONE_VM) ||
  1438. task_cpu(p) != this_cpu || !current->se.on_rq) {
  1439. activate_task(rq, p, 0);
  1440. } else {
  1441. /*
  1442. * Let the scheduling class do new task startup
  1443. * management (if any):
  1444. */
  1445. p->sched_class->task_new(rq, p);
  1446. }
  1447. check_preempt_curr(rq, p);
  1448. task_rq_unlock(rq, &flags);
  1449. }
  1450. #ifdef CONFIG_PREEMPT_NOTIFIERS
  1451. /**
  1452. * preempt_notifier_register - tell me when current is being being preempted
  1453. * and rescheduled
  1454. */
  1455. void preempt_notifier_register(struct preempt_notifier *notifier)
  1456. {
  1457. hlist_add_head(&notifier->link, &current->preempt_notifiers);
  1458. }
  1459. EXPORT_SYMBOL_GPL(preempt_notifier_register);
  1460. /**
  1461. * preempt_notifier_unregister - no longer interested in preemption notifications
  1462. *
  1463. * This is safe to call from within a preemption notifier.
  1464. */
  1465. void preempt_notifier_unregister(struct preempt_notifier *notifier)
  1466. {
  1467. hlist_del(&notifier->link);
  1468. }
  1469. EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
  1470. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  1471. {
  1472. struct preempt_notifier *notifier;
  1473. struct hlist_node *node;
  1474. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  1475. notifier->ops->sched_in(notifier, raw_smp_processor_id());
  1476. }
  1477. static void
  1478. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  1479. struct task_struct *next)
  1480. {
  1481. struct preempt_notifier *notifier;
  1482. struct hlist_node *node;
  1483. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  1484. notifier->ops->sched_out(notifier, next);
  1485. }
  1486. #else
  1487. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  1488. {
  1489. }
  1490. static void
  1491. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  1492. struct task_struct *next)
  1493. {
  1494. }
  1495. #endif
  1496. /**
  1497. * prepare_task_switch - prepare to switch tasks
  1498. * @rq: the runqueue preparing to switch
  1499. * @next: the task we are going to switch to.
  1500. *
  1501. * This is called with the rq lock held and interrupts off. It must
  1502. * be paired with a subsequent finish_task_switch after the context
  1503. * switch.
  1504. *
  1505. * prepare_task_switch sets up locking and calls architecture specific
  1506. * hooks.
  1507. */
  1508. static inline void
  1509. prepare_task_switch(struct rq *rq, struct task_struct *prev,
  1510. struct task_struct *next)
  1511. {
  1512. fire_sched_out_preempt_notifiers(prev, next);
  1513. prepare_lock_switch(rq, next);
  1514. prepare_arch_switch(next);
  1515. }
  1516. /**
  1517. * finish_task_switch - clean up after a task-switch
  1518. * @rq: runqueue associated with task-switch
  1519. * @prev: the thread we just switched away from.
  1520. *
  1521. * finish_task_switch must be called after the context switch, paired
  1522. * with a prepare_task_switch call before the context switch.
  1523. * finish_task_switch will reconcile locking set up by prepare_task_switch,
  1524. * and do any other architecture-specific cleanup actions.
  1525. *
  1526. * Note that we may have delayed dropping an mm in context_switch(). If
  1527. * so, we finish that here outside of the runqueue lock. (Doing it
  1528. * with the lock held can cause deadlocks; see schedule() for
  1529. * details.)
  1530. */
  1531. static inline void finish_task_switch(struct rq *rq, struct task_struct *prev)
  1532. __releases(rq->lock)
  1533. {
  1534. struct mm_struct *mm = rq->prev_mm;
  1535. long prev_state;
  1536. rq->prev_mm = NULL;
  1537. /*
  1538. * A task struct has one reference for the use as "current".
  1539. * If a task dies, then it sets TASK_DEAD in tsk->state and calls
  1540. * schedule one last time. The schedule call will never return, and
  1541. * the scheduled task must drop that reference.
  1542. * The test for TASK_DEAD must occur while the runqueue locks are
  1543. * still held, otherwise prev could be scheduled on another cpu, die
  1544. * there before we look at prev->state, and then the reference would
  1545. * be dropped twice.
  1546. * Manfred Spraul <manfred@colorfullife.com>
  1547. */
  1548. prev_state = prev->state;
  1549. finish_arch_switch(prev);
  1550. finish_lock_switch(rq, prev);
  1551. fire_sched_in_preempt_notifiers(current);
  1552. if (mm)
  1553. mmdrop(mm);
  1554. if (unlikely(prev_state == TASK_DEAD)) {
  1555. /*
  1556. * Remove function-return probe instances associated with this
  1557. * task and put them back on the free list.
  1558. */
  1559. kprobe_flush_task(prev);
  1560. put_task_struct(prev);
  1561. }
  1562. }
  1563. /**
  1564. * schedule_tail - first thing a freshly forked thread must call.
  1565. * @prev: the thread we just switched away from.
  1566. */
  1567. asmlinkage void schedule_tail(struct task_struct *prev)
  1568. __releases(rq->lock)
  1569. {
  1570. struct rq *rq = this_rq();
  1571. finish_task_switch(rq, prev);
  1572. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  1573. /* In this case, finish_task_switch does not reenable preemption */
  1574. preempt_enable();
  1575. #endif
  1576. if (current->set_child_tid)
  1577. put_user(current->pid, current->set_child_tid);
  1578. }
  1579. /*
  1580. * context_switch - switch to the new MM and the new
  1581. * thread's register state.
  1582. */
  1583. static inline void
  1584. context_switch(struct rq *rq, struct task_struct *prev,
  1585. struct task_struct *next)
  1586. {
  1587. struct mm_struct *mm, *oldmm;
  1588. prepare_task_switch(rq, prev, next);
  1589. mm = next->mm;
  1590. oldmm = prev->active_mm;
  1591. /*
  1592. * For paravirt, this is coupled with an exit in switch_to to
  1593. * combine the page table reload and the switch backend into
  1594. * one hypercall.
  1595. */
  1596. arch_enter_lazy_cpu_mode();
  1597. if (unlikely(!mm)) {
  1598. next->active_mm = oldmm;
  1599. atomic_inc(&oldmm->mm_count);
  1600. enter_lazy_tlb(oldmm, next);
  1601. } else
  1602. switch_mm(oldmm, mm, next);
  1603. if (unlikely(!prev->mm)) {
  1604. prev->active_mm = NULL;
  1605. rq->prev_mm = oldmm;
  1606. }
  1607. /*
  1608. * Since the runqueue lock will be released by the next
  1609. * task (which is an invalid locking op but in the case
  1610. * of the scheduler it's an obvious special-case), so we
  1611. * do an early lockdep release here:
  1612. */
  1613. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  1614. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  1615. #endif
  1616. /* Here we just switch the register state and the stack. */
  1617. switch_to(prev, next, prev);
  1618. barrier();
  1619. /*
  1620. * this_rq must be evaluated again because prev may have moved
  1621. * CPUs since it called schedule(), thus the 'rq' on its stack
  1622. * frame will be invalid.
  1623. */
  1624. finish_task_switch(this_rq(), prev);
  1625. }
  1626. /*
  1627. * nr_running, nr_uninterruptible and nr_context_switches:
  1628. *
  1629. * externally visible scheduler statistics: current number of runnable
  1630. * threads, current number of uninterruptible-sleeping threads, total
  1631. * number of context switches performed since bootup.
  1632. */
  1633. unsigned long nr_running(void)
  1634. {
  1635. unsigned long i, sum = 0;
  1636. for_each_online_cpu(i)
  1637. sum += cpu_rq(i)->nr_running;
  1638. return sum;
  1639. }
  1640. unsigned long nr_uninterruptible(void)
  1641. {
  1642. unsigned long i, sum = 0;
  1643. for_each_possible_cpu(i)
  1644. sum += cpu_rq(i)->nr_uninterruptible;
  1645. /*
  1646. * Since we read the counters lockless, it might be slightly
  1647. * inaccurate. Do not allow it to go below zero though:
  1648. */
  1649. if (unlikely((long)sum < 0))
  1650. sum = 0;
  1651. return sum;
  1652. }
  1653. unsigned long long nr_context_switches(void)
  1654. {
  1655. int i;
  1656. unsigned long long sum = 0;
  1657. for_each_possible_cpu(i)
  1658. sum += cpu_rq(i)->nr_switches;
  1659. return sum;
  1660. }
  1661. unsigned long nr_iowait(void)
  1662. {
  1663. unsigned long i, sum = 0;
  1664. for_each_possible_cpu(i)
  1665. sum += atomic_read(&cpu_rq(i)->nr_iowait);
  1666. return sum;
  1667. }
  1668. unsigned long nr_active(void)
  1669. {
  1670. unsigned long i, running = 0, uninterruptible = 0;
  1671. for_each_online_cpu(i) {
  1672. running += cpu_rq(i)->nr_running;
  1673. uninterruptible += cpu_rq(i)->nr_uninterruptible;
  1674. }
  1675. if (unlikely((long)uninterruptible < 0))
  1676. uninterruptible = 0;
  1677. return running + uninterruptible;
  1678. }
  1679. /*
  1680. * Update rq->cpu_load[] statistics. This function is usually called every
  1681. * scheduler tick (TICK_NSEC).
  1682. */
  1683. static void update_cpu_load(struct rq *this_rq)
  1684. {
  1685. u64 fair_delta64, exec_delta64, idle_delta64, sample_interval64, tmp64;
  1686. unsigned long total_load = this_rq->ls.load.weight;
  1687. unsigned long this_load = total_load;
  1688. struct load_stat *ls = &this_rq->ls;
  1689. u64 now = __rq_clock(this_rq);
  1690. int i, scale;
  1691. this_rq->nr_load_updates++;
  1692. if (unlikely(!(sysctl_sched_features & SCHED_FEAT_PRECISE_CPU_LOAD)))
  1693. goto do_avg;
  1694. /* Update delta_fair/delta_exec fields first */
  1695. update_curr_load(this_rq, now);
  1696. fair_delta64 = ls->delta_fair + 1;
  1697. ls->delta_fair = 0;
  1698. exec_delta64 = ls->delta_exec + 1;
  1699. ls->delta_exec = 0;
  1700. sample_interval64 = now - ls->load_update_last;
  1701. ls->load_update_last = now;
  1702. if ((s64)sample_interval64 < (s64)TICK_NSEC)
  1703. sample_interval64 = TICK_NSEC;
  1704. if (exec_delta64 > sample_interval64)
  1705. exec_delta64 = sample_interval64;
  1706. idle_delta64 = sample_interval64 - exec_delta64;
  1707. tmp64 = div64_64(SCHED_LOAD_SCALE * exec_delta64, fair_delta64);
  1708. tmp64 = div64_64(tmp64 * exec_delta64, sample_interval64);
  1709. this_load = (unsigned long)tmp64;
  1710. do_avg:
  1711. /* Update our load: */
  1712. for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
  1713. unsigned long old_load, new_load;
  1714. /* scale is effectively 1 << i now, and >> i divides by scale */
  1715. old_load = this_rq->cpu_load[i];
  1716. new_load = this_load;
  1717. this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
  1718. }
  1719. }
  1720. #ifdef CONFIG_SMP
  1721. /*
  1722. * double_rq_lock - safely lock two runqueues
  1723. *
  1724. * Note this does not disable interrupts like task_rq_lock,
  1725. * you need to do so manually before calling.
  1726. */
  1727. static void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1728. __acquires(rq1->lock)
  1729. __acquires(rq2->lock)
  1730. {
  1731. BUG_ON(!irqs_disabled());
  1732. if (rq1 == rq2) {
  1733. spin_lock(&rq1->lock);
  1734. __acquire(rq2->lock); /* Fake it out ;) */
  1735. } else {
  1736. if (rq1 < rq2) {
  1737. spin_lock(&rq1->lock);
  1738. spin_lock(&rq2->lock);
  1739. } else {
  1740. spin_lock(&rq2->lock);
  1741. spin_lock(&rq1->lock);
  1742. }
  1743. }
  1744. }
  1745. /*
  1746. * double_rq_unlock - safely unlock two runqueues
  1747. *
  1748. * Note this does not restore interrupts like task_rq_unlock,
  1749. * you need to do so manually after calling.
  1750. */
  1751. static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1752. __releases(rq1->lock)
  1753. __releases(rq2->lock)
  1754. {
  1755. spin_unlock(&rq1->lock);
  1756. if (rq1 != rq2)
  1757. spin_unlock(&rq2->lock);
  1758. else
  1759. __release(rq2->lock);
  1760. }
  1761. /*
  1762. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  1763. */
  1764. static void double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1765. __releases(this_rq->lock)
  1766. __acquires(busiest->lock)
  1767. __acquires(this_rq->lock)
  1768. {
  1769. if (unlikely(!irqs_disabled())) {
  1770. /* printk() doesn't work good under rq->lock */
  1771. spin_unlock(&this_rq->lock);
  1772. BUG_ON(1);
  1773. }
  1774. if (unlikely(!spin_trylock(&busiest->lock))) {
  1775. if (busiest < this_rq) {
  1776. spin_unlock(&this_rq->lock);
  1777. spin_lock(&busiest->lock);
  1778. spin_lock(&this_rq->lock);
  1779. } else
  1780. spin_lock(&busiest->lock);
  1781. }
  1782. }
  1783. /*
  1784. * If dest_cpu is allowed for this process, migrate the task to it.
  1785. * This is accomplished by forcing the cpu_allowed mask to only
  1786. * allow dest_cpu, which will force the cpu onto dest_cpu. Then
  1787. * the cpu_allowed mask is restored.
  1788. */
  1789. static void sched_migrate_task(struct task_struct *p, int dest_cpu)
  1790. {
  1791. struct migration_req req;
  1792. unsigned long flags;
  1793. struct rq *rq;
  1794. rq = task_rq_lock(p, &flags);
  1795. if (!cpu_isset(dest_cpu, p->cpus_allowed)
  1796. || unlikely(cpu_is_offline(dest_cpu)))
  1797. goto out;
  1798. /* force the process onto the specified CPU */
  1799. if (migrate_task(p, dest_cpu, &req)) {
  1800. /* Need to wait for migration thread (might exit: take ref). */
  1801. struct task_struct *mt = rq->migration_thread;
  1802. get_task_struct(mt);
  1803. task_rq_unlock(rq, &flags);
  1804. wake_up_process(mt);
  1805. put_task_struct(mt);
  1806. wait_for_completion(&req.done);
  1807. return;
  1808. }
  1809. out:
  1810. task_rq_unlock(rq, &flags);
  1811. }
  1812. /*
  1813. * sched_exec - execve() is a valuable balancing opportunity, because at
  1814. * this point the task has the smallest effective memory and cache footprint.
  1815. */
  1816. void sched_exec(void)
  1817. {
  1818. int new_cpu, this_cpu = get_cpu();
  1819. new_cpu = sched_balance_self(this_cpu, SD_BALANCE_EXEC);
  1820. put_cpu();
  1821. if (new_cpu != this_cpu)
  1822. sched_migrate_task(current, new_cpu);
  1823. }
  1824. /*
  1825. * pull_task - move a task from a remote runqueue to the local runqueue.
  1826. * Both runqueues must be locked.
  1827. */
  1828. static void pull_task(struct rq *src_rq, struct task_struct *p,
  1829. struct rq *this_rq, int this_cpu)
  1830. {
  1831. deactivate_task(src_rq, p, 0);
  1832. set_task_cpu(p, this_cpu);
  1833. activate_task(this_rq, p, 0);
  1834. /*
  1835. * Note that idle threads have a prio of MAX_PRIO, for this test
  1836. * to be always true for them.
  1837. */
  1838. check_preempt_curr(this_rq, p);
  1839. }
  1840. /*
  1841. * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
  1842. */
  1843. static
  1844. int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
  1845. struct sched_domain *sd, enum cpu_idle_type idle,
  1846. int *all_pinned)
  1847. {
  1848. /*
  1849. * We do not migrate tasks that are:
  1850. * 1) running (obviously), or
  1851. * 2) cannot be migrated to this CPU due to cpus_allowed, or
  1852. * 3) are cache-hot on their current CPU.
  1853. */
  1854. if (!cpu_isset(this_cpu, p->cpus_allowed))
  1855. return 0;
  1856. *all_pinned = 0;
  1857. if (task_running(rq, p))
  1858. return 0;
  1859. /*
  1860. * Aggressive migration if too many balance attempts have failed:
  1861. */
  1862. if (sd->nr_balance_failed > sd->cache_nice_tries)
  1863. return 1;
  1864. return 1;
  1865. }
  1866. static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1867. unsigned long max_nr_move, unsigned long max_load_move,
  1868. struct sched_domain *sd, enum cpu_idle_type idle,
  1869. int *all_pinned, unsigned long *load_moved,
  1870. int this_best_prio, int best_prio, int best_prio_seen,
  1871. struct rq_iterator *iterator)
  1872. {
  1873. int pulled = 0, pinned = 0, skip_for_load;
  1874. struct task_struct *p;
  1875. long rem_load_move = max_load_move;
  1876. if (max_nr_move == 0 || max_load_move == 0)
  1877. goto out;
  1878. pinned = 1;
  1879. /*
  1880. * Start the load-balancing iterator:
  1881. */
  1882. p = iterator->start(iterator->arg);
  1883. next:
  1884. if (!p)
  1885. goto out;
  1886. /*
  1887. * To help distribute high priority tasks accross CPUs we don't
  1888. * skip a task if it will be the highest priority task (i.e. smallest
  1889. * prio value) on its new queue regardless of its load weight
  1890. */
  1891. skip_for_load = (p->se.load.weight >> 1) > rem_load_move +
  1892. SCHED_LOAD_SCALE_FUZZ;
  1893. if (skip_for_load && p->prio < this_best_prio)
  1894. skip_for_load = !best_prio_seen && p->prio == best_prio;
  1895. if (skip_for_load ||
  1896. !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) {
  1897. best_prio_seen |= p->prio == best_prio;
  1898. p = iterator->next(iterator->arg);
  1899. goto next;
  1900. }
  1901. pull_task(busiest, p, this_rq, this_cpu);
  1902. pulled++;
  1903. rem_load_move -= p->se.load.weight;
  1904. /*
  1905. * We only want to steal up to the prescribed number of tasks
  1906. * and the prescribed amount of weighted load.
  1907. */
  1908. if (pulled < max_nr_move && rem_load_move > 0) {
  1909. if (p->prio < this_best_prio)
  1910. this_best_prio = p->prio;
  1911. p = iterator->next(iterator->arg);
  1912. goto next;
  1913. }
  1914. out:
  1915. /*
  1916. * Right now, this is the only place pull_task() is called,
  1917. * so we can safely collect pull_task() stats here rather than
  1918. * inside pull_task().
  1919. */
  1920. schedstat_add(sd, lb_gained[idle], pulled);
  1921. if (all_pinned)
  1922. *all_pinned = pinned;
  1923. *load_moved = max_load_move - rem_load_move;
  1924. return pulled;
  1925. }
  1926. /*
  1927. * move_tasks tries to move up to max_nr_move tasks and max_load_move weighted
  1928. * load from busiest to this_rq, as part of a balancing operation within
  1929. * "domain". Returns the number of tasks moved.
  1930. *
  1931. * Called with both runqueues locked.
  1932. */
  1933. static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  1934. unsigned long max_nr_move, unsigned long max_load_move,
  1935. struct sched_domain *sd, enum cpu_idle_type idle,
  1936. int *all_pinned)
  1937. {
  1938. struct sched_class *class = sched_class_highest;
  1939. unsigned long load_moved, total_nr_moved = 0, nr_moved;
  1940. long rem_load_move = max_load_move;
  1941. do {
  1942. nr_moved = class->load_balance(this_rq, this_cpu, busiest,
  1943. max_nr_move, (unsigned long)rem_load_move,
  1944. sd, idle, all_pinned, &load_moved);
  1945. total_nr_moved += nr_moved;
  1946. max_nr_move -= nr_moved;
  1947. rem_load_move -= load_moved;
  1948. class = class->next;
  1949. } while (class && max_nr_move && rem_load_move > 0);
  1950. return total_nr_moved;
  1951. }
  1952. /*
  1953. * find_busiest_group finds and returns the busiest CPU group within the
  1954. * domain. It calculates and returns the amount of weighted load which
  1955. * should be moved to restore balance via the imbalance parameter.
  1956. */
  1957. static struct sched_group *
  1958. find_busiest_group(struct sched_domain *sd, int this_cpu,
  1959. unsigned long *imbalance, enum cpu_idle_type idle,
  1960. int *sd_idle, cpumask_t *cpus, int *balance)
  1961. {
  1962. struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
  1963. unsigned long max_load, avg_load, total_load, this_load, total_pwr;
  1964. unsigned long max_pull;
  1965. unsigned long busiest_load_per_task, busiest_nr_running;
  1966. unsigned long this_load_per_task, this_nr_running;
  1967. int load_idx;
  1968. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  1969. int power_savings_balance = 1;
  1970. unsigned long leader_nr_running = 0, min_load_per_task = 0;
  1971. unsigned long min_nr_running = ULONG_MAX;
  1972. struct sched_group *group_min = NULL, *group_leader = NULL;
  1973. #endif
  1974. max_load = this_load = total_load = total_pwr = 0;
  1975. busiest_load_per_task = busiest_nr_running = 0;
  1976. this_load_per_task = this_nr_running = 0;
  1977. if (idle == CPU_NOT_IDLE)
  1978. load_idx = sd->busy_idx;
  1979. else if (idle == CPU_NEWLY_IDLE)
  1980. load_idx = sd->newidle_idx;
  1981. else
  1982. load_idx = sd->idle_idx;
  1983. do {
  1984. unsigned long load, group_capacity;
  1985. int local_group;
  1986. int i;
  1987. unsigned int balance_cpu = -1, first_idle_cpu = 0;
  1988. unsigned long sum_nr_running, sum_weighted_load;
  1989. local_group = cpu_isset(this_cpu, group->cpumask);
  1990. if (local_group)
  1991. balance_cpu = first_cpu(group->cpumask);
  1992. /* Tally up the load of all CPUs in the group */
  1993. sum_weighted_load = sum_nr_running = avg_load = 0;
  1994. for_each_cpu_mask(i, group->cpumask) {
  1995. struct rq *rq;
  1996. if (!cpu_isset(i, *cpus))
  1997. continue;
  1998. rq = cpu_rq(i);
  1999. if (*sd_idle && rq->nr_running)
  2000. *sd_idle = 0;
  2001. /* Bias balancing toward cpus of our domain */
  2002. if (local_group) {
  2003. if (idle_cpu(i) && !first_idle_cpu) {
  2004. first_idle_cpu = 1;
  2005. balance_cpu = i;
  2006. }
  2007. load = target_load(i, load_idx);
  2008. } else
  2009. load = source_load(i, load_idx);
  2010. avg_load += load;
  2011. sum_nr_running += rq->nr_running;
  2012. sum_weighted_load += weighted_cpuload(i);
  2013. }
  2014. /*
  2015. * First idle cpu or the first cpu(busiest) in this sched group
  2016. * is eligible for doing load balancing at this and above
  2017. * domains. In the newly idle case, we will allow all the cpu's
  2018. * to do the newly idle load balance.
  2019. */
  2020. if (idle != CPU_NEWLY_IDLE && local_group &&
  2021. balance_cpu != this_cpu && balance) {
  2022. *balance = 0;
  2023. goto ret;
  2024. }
  2025. total_load += avg_load;
  2026. total_pwr += group->__cpu_power;
  2027. /* Adjust by relative CPU power of the group */
  2028. avg_load = sg_div_cpu_power(group,
  2029. avg_load * SCHED_LOAD_SCALE);
  2030. group_capacity = group->__cpu_power / SCHED_LOAD_SCALE;
  2031. if (local_group) {
  2032. this_load = avg_load;
  2033. this = group;
  2034. this_nr_running = sum_nr_running;
  2035. this_load_per_task = sum_weighted_load;
  2036. } else if (avg_load > max_load &&
  2037. sum_nr_running > group_capacity) {
  2038. max_load = avg_load;
  2039. busiest = group;
  2040. busiest_nr_running = sum_nr_running;
  2041. busiest_load_per_task = sum_weighted_load;
  2042. }
  2043. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2044. /*
  2045. * Busy processors will not participate in power savings
  2046. * balance.
  2047. */
  2048. if (idle == CPU_NOT_IDLE ||
  2049. !(sd->flags & SD_POWERSAVINGS_BALANCE))
  2050. goto group_next;
  2051. /*
  2052. * If the local group is idle or completely loaded
  2053. * no need to do power savings balance at this domain
  2054. */
  2055. if (local_group && (this_nr_running >= group_capacity ||
  2056. !this_nr_running))
  2057. power_savings_balance = 0;
  2058. /*
  2059. * If a group is already running at full capacity or idle,
  2060. * don't include that group in power savings calculations
  2061. */
  2062. if (!power_savings_balance || sum_nr_running >= group_capacity
  2063. || !sum_nr_running)
  2064. goto group_next;
  2065. /*
  2066. * Calculate the group which has the least non-idle load.
  2067. * This is the group from where we need to pick up the load
  2068. * for saving power
  2069. */
  2070. if ((sum_nr_running < min_nr_running) ||
  2071. (sum_nr_running == min_nr_running &&
  2072. first_cpu(group->cpumask) <
  2073. first_cpu(group_min->cpumask))) {
  2074. group_min = group;
  2075. min_nr_running = sum_nr_running;
  2076. min_load_per_task = sum_weighted_load /
  2077. sum_nr_running;
  2078. }
  2079. /*
  2080. * Calculate the group which is almost near its
  2081. * capacity but still has some space to pick up some load
  2082. * from other group and save more power
  2083. */
  2084. if (sum_nr_running <= group_capacity - 1) {
  2085. if (sum_nr_running > leader_nr_running ||
  2086. (sum_nr_running == leader_nr_running &&
  2087. first_cpu(group->cpumask) >
  2088. first_cpu(group_leader->cpumask))) {
  2089. group_leader = group;
  2090. leader_nr_running = sum_nr_running;
  2091. }
  2092. }
  2093. group_next:
  2094. #endif
  2095. group = group->next;
  2096. } while (group != sd->groups);
  2097. if (!busiest || this_load >= max_load || busiest_nr_running == 0)
  2098. goto out_balanced;
  2099. avg_load = (SCHED_LOAD_SCALE * total_load) / total_pwr;
  2100. if (this_load >= avg_load ||
  2101. 100*max_load <= sd->imbalance_pct*this_load)
  2102. goto out_balanced;
  2103. busiest_load_per_task /= busiest_nr_running;
  2104. /*
  2105. * We're trying to get all the cpus to the average_load, so we don't
  2106. * want to push ourselves above the average load, nor do we wish to
  2107. * reduce the max loaded cpu below the average load, as either of these
  2108. * actions would just result in more rebalancing later, and ping-pong
  2109. * tasks around. Thus we look for the minimum possible imbalance.
  2110. * Negative imbalances (*we* are more loaded than anyone else) will
  2111. * be counted as no imbalance for these purposes -- we can't fix that
  2112. * by pulling tasks to us. Be careful of negative numbers as they'll
  2113. * appear as very large values with unsigned longs.
  2114. */
  2115. if (max_load <= busiest_load_per_task)
  2116. goto out_balanced;
  2117. /*
  2118. * In the presence of smp nice balancing, certain scenarios can have
  2119. * max load less than avg load(as we skip the groups at or below
  2120. * its cpu_power, while calculating max_load..)
  2121. */
  2122. if (max_load < avg_load) {
  2123. *imbalance = 0;
  2124. goto small_imbalance;
  2125. }
  2126. /* Don't want to pull so many tasks that a group would go idle */
  2127. max_pull = min(max_load - avg_load, max_load - busiest_load_per_task);
  2128. /* How much load to actually move to equalise the imbalance */
  2129. *imbalance = min(max_pull * busiest->__cpu_power,
  2130. (avg_load - this_load) * this->__cpu_power)
  2131. / SCHED_LOAD_SCALE;
  2132. /*
  2133. * if *imbalance is less than the average load per runnable task
  2134. * there is no gaurantee that any tasks will be moved so we'll have
  2135. * a think about bumping its value to force at least one task to be
  2136. * moved
  2137. */
  2138. if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task/2) {
  2139. unsigned long tmp, pwr_now, pwr_move;
  2140. unsigned int imbn;
  2141. small_imbalance:
  2142. pwr_move = pwr_now = 0;
  2143. imbn = 2;
  2144. if (this_nr_running) {
  2145. this_load_per_task /= this_nr_running;
  2146. if (busiest_load_per_task > this_load_per_task)
  2147. imbn = 1;
  2148. } else
  2149. this_load_per_task = SCHED_LOAD_SCALE;
  2150. if (max_load - this_load + SCHED_LOAD_SCALE_FUZZ >=
  2151. busiest_load_per_task * imbn) {
  2152. *imbalance = busiest_load_per_task;
  2153. return busiest;
  2154. }
  2155. /*
  2156. * OK, we don't have enough imbalance to justify moving tasks,
  2157. * however we may be able to increase total CPU power used by
  2158. * moving them.
  2159. */
  2160. pwr_now += busiest->__cpu_power *
  2161. min(busiest_load_per_task, max_load);
  2162. pwr_now += this->__cpu_power *
  2163. min(this_load_per_task, this_load);
  2164. pwr_now /= SCHED_LOAD_SCALE;
  2165. /* Amount of load we'd subtract */
  2166. tmp = sg_div_cpu_power(busiest,
  2167. busiest_load_per_task * SCHED_LOAD_SCALE);
  2168. if (max_load > tmp)
  2169. pwr_move += busiest->__cpu_power *
  2170. min(busiest_load_per_task, max_load - tmp);
  2171. /* Amount of load we'd add */
  2172. if (max_load * busiest->__cpu_power <
  2173. busiest_load_per_task * SCHED_LOAD_SCALE)
  2174. tmp = sg_div_cpu_power(this,
  2175. max_load * busiest->__cpu_power);
  2176. else
  2177. tmp = sg_div_cpu_power(this,
  2178. busiest_load_per_task * SCHED_LOAD_SCALE);
  2179. pwr_move += this->__cpu_power *
  2180. min(this_load_per_task, this_load + tmp);
  2181. pwr_move /= SCHED_LOAD_SCALE;
  2182. /* Move if we gain throughput */
  2183. if (pwr_move <= pwr_now)
  2184. goto out_balanced;
  2185. *imbalance = busiest_load_per_task;
  2186. }
  2187. return busiest;
  2188. out_balanced:
  2189. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  2190. if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
  2191. goto ret;
  2192. if (this == group_leader && group_leader != group_min) {
  2193. *imbalance = min_load_per_task;
  2194. return group_min;
  2195. }
  2196. #endif
  2197. ret:
  2198. *imbalance = 0;
  2199. return NULL;
  2200. }
  2201. /*
  2202. * find_busiest_queue - find the busiest runqueue among the cpus in group.
  2203. */
  2204. static struct rq *
  2205. find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
  2206. unsigned long imbalance, cpumask_t *cpus)
  2207. {
  2208. struct rq *busiest = NULL, *rq;
  2209. unsigned long max_load = 0;
  2210. int i;
  2211. for_each_cpu_mask(i, group->cpumask) {
  2212. unsigned long wl;
  2213. if (!cpu_isset(i, *cpus))
  2214. continue;
  2215. rq = cpu_rq(i);
  2216. wl = weighted_cpuload(i);
  2217. if (rq->nr_running == 1 && wl > imbalance)
  2218. continue;
  2219. if (wl > max_load) {
  2220. max_load = wl;
  2221. busiest = rq;
  2222. }
  2223. }
  2224. return busiest;
  2225. }
  2226. /*
  2227. * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
  2228. * so long as it is large enough.
  2229. */
  2230. #define MAX_PINNED_INTERVAL 512
  2231. static inline unsigned long minus_1_or_zero(unsigned long n)
  2232. {
  2233. return n > 0 ? n - 1 : 0;
  2234. }
  2235. /*
  2236. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  2237. * tasks if there is an imbalance.
  2238. */
  2239. static int load_balance(int this_cpu, struct rq *this_rq,
  2240. struct sched_domain *sd, enum cpu_idle_type idle,
  2241. int *balance)
  2242. {
  2243. int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
  2244. struct sched_group *group;
  2245. unsigned long imbalance;
  2246. struct rq *busiest;
  2247. cpumask_t cpus = CPU_MASK_ALL;
  2248. unsigned long flags;
  2249. /*
  2250. * When power savings policy is enabled for the parent domain, idle
  2251. * sibling can pick up load irrespective of busy siblings. In this case,
  2252. * let the state of idle sibling percolate up as CPU_IDLE, instead of
  2253. * portraying it as CPU_NOT_IDLE.
  2254. */
  2255. if (idle != CPU_NOT_IDLE && sd->flags & SD_SHARE_CPUPOWER &&
  2256. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2257. sd_idle = 1;
  2258. schedstat_inc(sd, lb_cnt[idle]);
  2259. redo:
  2260. group = find_busiest_group(sd, this_cpu, &imbalance, idle, &sd_idle,
  2261. &cpus, balance);
  2262. if (*balance == 0)
  2263. goto out_balanced;
  2264. if (!group) {
  2265. schedstat_inc(sd, lb_nobusyg[idle]);
  2266. goto out_balanced;
  2267. }
  2268. busiest = find_busiest_queue(group, idle, imbalance, &cpus);
  2269. if (!busiest) {
  2270. schedstat_inc(sd, lb_nobusyq[idle]);
  2271. goto out_balanced;
  2272. }
  2273. BUG_ON(busiest == this_rq);
  2274. schedstat_add(sd, lb_imbalance[idle], imbalance);
  2275. nr_moved = 0;
  2276. if (busiest->nr_running > 1) {
  2277. /*
  2278. * Attempt to move tasks. If find_busiest_group has found
  2279. * an imbalance but busiest->nr_running <= 1, the group is
  2280. * still unbalanced. nr_moved simply stays zero, so it is
  2281. * correctly treated as an imbalance.
  2282. */
  2283. local_irq_save(flags);
  2284. double_rq_lock(this_rq, busiest);
  2285. nr_moved = move_tasks(this_rq, this_cpu, busiest,
  2286. minus_1_or_zero(busiest->nr_running),
  2287. imbalance, sd, idle, &all_pinned);
  2288. double_rq_unlock(this_rq, busiest);
  2289. local_irq_restore(flags);
  2290. /*
  2291. * some other cpu did the load balance for us.
  2292. */
  2293. if (nr_moved && this_cpu != smp_processor_id())
  2294. resched_cpu(this_cpu);
  2295. /* All tasks on this runqueue were pinned by CPU affinity */
  2296. if (unlikely(all_pinned)) {
  2297. cpu_clear(cpu_of(busiest), cpus);
  2298. if (!cpus_empty(cpus))
  2299. goto redo;
  2300. goto out_balanced;
  2301. }
  2302. }
  2303. if (!nr_moved) {
  2304. schedstat_inc(sd, lb_failed[idle]);
  2305. sd->nr_balance_failed++;
  2306. if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
  2307. spin_lock_irqsave(&busiest->lock, flags);
  2308. /* don't kick the migration_thread, if the curr
  2309. * task on busiest cpu can't be moved to this_cpu
  2310. */
  2311. if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) {
  2312. spin_unlock_irqrestore(&busiest->lock, flags);
  2313. all_pinned = 1;
  2314. goto out_one_pinned;
  2315. }
  2316. if (!busiest->active_balance) {
  2317. busiest->active_balance = 1;
  2318. busiest->push_cpu = this_cpu;
  2319. active_balance = 1;
  2320. }
  2321. spin_unlock_irqrestore(&busiest->lock, flags);
  2322. if (active_balance)
  2323. wake_up_process(busiest->migration_thread);
  2324. /*
  2325. * We've kicked active balancing, reset the failure
  2326. * counter.
  2327. */
  2328. sd->nr_balance_failed = sd->cache_nice_tries+1;
  2329. }
  2330. } else
  2331. sd->nr_balance_failed = 0;
  2332. if (likely(!active_balance)) {
  2333. /* We were unbalanced, so reset the balancing interval */
  2334. sd->balance_interval = sd->min_interval;
  2335. } else {
  2336. /*
  2337. * If we've begun active balancing, start to back off. This
  2338. * case may not be covered by the all_pinned logic if there
  2339. * is only 1 task on the busy runqueue (because we don't call
  2340. * move_tasks).
  2341. */
  2342. if (sd->balance_interval < sd->max_interval)
  2343. sd->balance_interval *= 2;
  2344. }
  2345. if (!nr_moved && !sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2346. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2347. return -1;
  2348. return nr_moved;
  2349. out_balanced:
  2350. schedstat_inc(sd, lb_balanced[idle]);
  2351. sd->nr_balance_failed = 0;
  2352. out_one_pinned:
  2353. /* tune up the balancing interval */
  2354. if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
  2355. (sd->balance_interval < sd->max_interval))
  2356. sd->balance_interval *= 2;
  2357. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2358. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2359. return -1;
  2360. return 0;
  2361. }
  2362. /*
  2363. * Check this_cpu to ensure it is balanced within domain. Attempt to move
  2364. * tasks if there is an imbalance.
  2365. *
  2366. * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
  2367. * this_rq is locked.
  2368. */
  2369. static int
  2370. load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
  2371. {
  2372. struct sched_group *group;
  2373. struct rq *busiest = NULL;
  2374. unsigned long imbalance;
  2375. int nr_moved = 0;
  2376. int sd_idle = 0;
  2377. int all_pinned = 0;
  2378. cpumask_t cpus = CPU_MASK_ALL;
  2379. /*
  2380. * When power savings policy is enabled for the parent domain, idle
  2381. * sibling can pick up load irrespective of busy siblings. In this case,
  2382. * let the state of idle sibling percolate up as IDLE, instead of
  2383. * portraying it as CPU_NOT_IDLE.
  2384. */
  2385. if (sd->flags & SD_SHARE_CPUPOWER &&
  2386. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2387. sd_idle = 1;
  2388. schedstat_inc(sd, lb_cnt[CPU_NEWLY_IDLE]);
  2389. redo:
  2390. group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
  2391. &sd_idle, &cpus, NULL);
  2392. if (!group) {
  2393. schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
  2394. goto out_balanced;
  2395. }
  2396. busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance,
  2397. &cpus);
  2398. if (!busiest) {
  2399. schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
  2400. goto out_balanced;
  2401. }
  2402. BUG_ON(busiest == this_rq);
  2403. schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
  2404. nr_moved = 0;
  2405. if (busiest->nr_running > 1) {
  2406. /* Attempt to move tasks */
  2407. double_lock_balance(this_rq, busiest);
  2408. nr_moved = move_tasks(this_rq, this_cpu, busiest,
  2409. minus_1_or_zero(busiest->nr_running),
  2410. imbalance, sd, CPU_NEWLY_IDLE,
  2411. &all_pinned);
  2412. spin_unlock(&busiest->lock);
  2413. if (unlikely(all_pinned)) {
  2414. cpu_clear(cpu_of(busiest), cpus);
  2415. if (!cpus_empty(cpus))
  2416. goto redo;
  2417. }
  2418. }
  2419. if (!nr_moved) {
  2420. schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
  2421. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2422. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2423. return -1;
  2424. } else
  2425. sd->nr_balance_failed = 0;
  2426. return nr_moved;
  2427. out_balanced:
  2428. schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
  2429. if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
  2430. !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
  2431. return -1;
  2432. sd->nr_balance_failed = 0;
  2433. return 0;
  2434. }
  2435. /*
  2436. * idle_balance is called by schedule() if this_cpu is about to become
  2437. * idle. Attempts to pull tasks from other CPUs.
  2438. */
  2439. static void idle_balance(int this_cpu, struct rq *this_rq)
  2440. {
  2441. struct sched_domain *sd;
  2442. int pulled_task = -1;
  2443. unsigned long next_balance = jiffies + HZ;
  2444. for_each_domain(this_cpu, sd) {
  2445. unsigned long interval;
  2446. if (!(sd->flags & SD_LOAD_BALANCE))
  2447. continue;
  2448. if (sd->flags & SD_BALANCE_NEWIDLE)
  2449. /* If we've pulled tasks over stop searching: */
  2450. pulled_task = load_balance_newidle(this_cpu,
  2451. this_rq, sd);
  2452. interval = msecs_to_jiffies(sd->balance_interval);
  2453. if (time_after(next_balance, sd->last_balance + interval))
  2454. next_balance = sd->last_balance + interval;
  2455. if (pulled_task)
  2456. break;
  2457. }
  2458. if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
  2459. /*
  2460. * We are going idle. next_balance may be set based on
  2461. * a busy processor. So reset next_balance.
  2462. */
  2463. this_rq->next_balance = next_balance;
  2464. }
  2465. }
  2466. /*
  2467. * active_load_balance is run by migration threads. It pushes running tasks
  2468. * off the busiest CPU onto idle CPUs. It requires at least 1 task to be
  2469. * running on each physical CPU where possible, and avoids physical /
  2470. * logical imbalances.
  2471. *
  2472. * Called with busiest_rq locked.
  2473. */
  2474. static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
  2475. {
  2476. int target_cpu = busiest_rq->push_cpu;
  2477. struct sched_domain *sd;
  2478. struct rq *target_rq;
  2479. /* Is there any task to move? */
  2480. if (busiest_rq->nr_running <= 1)
  2481. return;
  2482. target_rq = cpu_rq(target_cpu);
  2483. /*
  2484. * This condition is "impossible", if it occurs
  2485. * we need to fix it. Originally reported by
  2486. * Bjorn Helgaas on a 128-cpu setup.
  2487. */
  2488. BUG_ON(busiest_rq == target_rq);
  2489. /* move a task from busiest_rq to target_rq */
  2490. double_lock_balance(busiest_rq, target_rq);
  2491. /* Search for an sd spanning us and the target CPU. */
  2492. for_each_domain(target_cpu, sd) {
  2493. if ((sd->flags & SD_LOAD_BALANCE) &&
  2494. cpu_isset(busiest_cpu, sd->span))
  2495. break;
  2496. }
  2497. if (likely(sd)) {
  2498. schedstat_inc(sd, alb_cnt);
  2499. if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
  2500. RTPRIO_TO_LOAD_WEIGHT(100), sd, CPU_IDLE,
  2501. NULL))
  2502. schedstat_inc(sd, alb_pushed);
  2503. else
  2504. schedstat_inc(sd, alb_failed);
  2505. }
  2506. spin_unlock(&target_rq->lock);
  2507. }
  2508. #ifdef CONFIG_NO_HZ
  2509. static struct {
  2510. atomic_t load_balancer;
  2511. cpumask_t cpu_mask;
  2512. } nohz ____cacheline_aligned = {
  2513. .load_balancer = ATOMIC_INIT(-1),
  2514. .cpu_mask = CPU_MASK_NONE,
  2515. };
  2516. /*
  2517. * This routine will try to nominate the ilb (idle load balancing)
  2518. * owner among the cpus whose ticks are stopped. ilb owner will do the idle
  2519. * load balancing on behalf of all those cpus. If all the cpus in the system
  2520. * go into this tickless mode, then there will be no ilb owner (as there is
  2521. * no need for one) and all the cpus will sleep till the next wakeup event
  2522. * arrives...
  2523. *
  2524. * For the ilb owner, tick is not stopped. And this tick will be used
  2525. * for idle load balancing. ilb owner will still be part of
  2526. * nohz.cpu_mask..
  2527. *
  2528. * While stopping the tick, this cpu will become the ilb owner if there
  2529. * is no other owner. And will be the owner till that cpu becomes busy
  2530. * or if all cpus in the system stop their ticks at which point
  2531. * there is no need for ilb owner.
  2532. *
  2533. * When the ilb owner becomes busy, it nominates another owner, during the
  2534. * next busy scheduler_tick()
  2535. */
  2536. int select_nohz_load_balancer(int stop_tick)
  2537. {
  2538. int cpu = smp_processor_id();
  2539. if (stop_tick) {
  2540. cpu_set(cpu, nohz.cpu_mask);
  2541. cpu_rq(cpu)->in_nohz_recently = 1;
  2542. /*
  2543. * If we are going offline and still the leader, give up!
  2544. */
  2545. if (cpu_is_offline(cpu) &&
  2546. atomic_read(&nohz.load_balancer) == cpu) {
  2547. if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
  2548. BUG();
  2549. return 0;
  2550. }
  2551. /* time for ilb owner also to sleep */
  2552. if (cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
  2553. if (atomic_read(&nohz.load_balancer) == cpu)
  2554. atomic_set(&nohz.load_balancer, -1);
  2555. return 0;
  2556. }
  2557. if (atomic_read(&nohz.load_balancer) == -1) {
  2558. /* make me the ilb owner */
  2559. if (atomic_cmpxchg(&nohz.load_balancer, -1, cpu) == -1)
  2560. return 1;
  2561. } else if (atomic_read(&nohz.load_balancer) == cpu)
  2562. return 1;
  2563. } else {
  2564. if (!cpu_isset(cpu, nohz.cpu_mask))
  2565. return 0;
  2566. cpu_clear(cpu, nohz.cpu_mask);
  2567. if (atomic_read(&nohz.load_balancer) == cpu)
  2568. if (atomic_cmpxchg(&nohz.load_balancer, cpu, -1) != cpu)
  2569. BUG();
  2570. }
  2571. return 0;
  2572. }
  2573. #endif
  2574. static DEFINE_SPINLOCK(balancing);
  2575. /*
  2576. * It checks each scheduling domain to see if it is due to be balanced,
  2577. * and initiates a balancing operation if so.
  2578. *
  2579. * Balancing parameters are set up in arch_init_sched_domains.
  2580. */
  2581. static inline void rebalance_domains(int cpu, enum cpu_idle_type idle)
  2582. {
  2583. int balance = 1;
  2584. struct rq *rq = cpu_rq(cpu);
  2585. unsigned long interval;
  2586. struct sched_domain *sd;
  2587. /* Earliest time when we have to do rebalance again */
  2588. unsigned long next_balance = jiffies + 60*HZ;
  2589. for_each_domain(cpu, sd) {
  2590. if (!(sd->flags & SD_LOAD_BALANCE))
  2591. continue;
  2592. interval = sd->balance_interval;
  2593. if (idle != CPU_IDLE)
  2594. interval *= sd->busy_factor;
  2595. /* scale ms to jiffies */
  2596. interval = msecs_to_jiffies(interval);
  2597. if (unlikely(!interval))
  2598. interval = 1;
  2599. if (interval > HZ*NR_CPUS/10)
  2600. interval = HZ*NR_CPUS/10;
  2601. if (sd->flags & SD_SERIALIZE) {
  2602. if (!spin_trylock(&balancing))
  2603. goto out;
  2604. }
  2605. if (time_after_eq(jiffies, sd->last_balance + interval)) {
  2606. if (load_balance(cpu, rq, sd, idle, &balance)) {
  2607. /*
  2608. * We've pulled tasks over so either we're no
  2609. * longer idle, or one of our SMT siblings is
  2610. * not idle.
  2611. */
  2612. idle = CPU_NOT_IDLE;
  2613. }
  2614. sd->last_balance = jiffies;
  2615. }
  2616. if (sd->flags & SD_SERIALIZE)
  2617. spin_unlock(&balancing);
  2618. out:
  2619. if (time_after(next_balance, sd->last_balance + interval))
  2620. next_balance = sd->last_balance + interval;
  2621. /*
  2622. * Stop the load balance at this level. There is another
  2623. * CPU in our sched group which is doing load balancing more
  2624. * actively.
  2625. */
  2626. if (!balance)
  2627. break;
  2628. }
  2629. rq->next_balance = next_balance;
  2630. }
  2631. /*
  2632. * run_rebalance_domains is triggered when needed from the scheduler tick.
  2633. * In CONFIG_NO_HZ case, the idle load balance owner will do the
  2634. * rebalancing for all the cpus for whom scheduler ticks are stopped.
  2635. */
  2636. static void run_rebalance_domains(struct softirq_action *h)
  2637. {
  2638. int this_cpu = smp_processor_id();
  2639. struct rq *this_rq = cpu_rq(this_cpu);
  2640. enum cpu_idle_type idle = this_rq->idle_at_tick ?
  2641. CPU_IDLE : CPU_NOT_IDLE;
  2642. rebalance_domains(this_cpu, idle);
  2643. #ifdef CONFIG_NO_HZ
  2644. /*
  2645. * If this cpu is the owner for idle load balancing, then do the
  2646. * balancing on behalf of the other idle cpus whose ticks are
  2647. * stopped.
  2648. */
  2649. if (this_rq->idle_at_tick &&
  2650. atomic_read(&nohz.load_balancer) == this_cpu) {
  2651. cpumask_t cpus = nohz.cpu_mask;
  2652. struct rq *rq;
  2653. int balance_cpu;
  2654. cpu_clear(this_cpu, cpus);
  2655. for_each_cpu_mask(balance_cpu, cpus) {
  2656. /*
  2657. * If this cpu gets work to do, stop the load balancing
  2658. * work being done for other cpus. Next load
  2659. * balancing owner will pick it up.
  2660. */
  2661. if (need_resched())
  2662. break;
  2663. rebalance_domains(balance_cpu, SCHED_IDLE);
  2664. rq = cpu_rq(balance_cpu);
  2665. if (time_after(this_rq->next_balance, rq->next_balance))
  2666. this_rq->next_balance = rq->next_balance;
  2667. }
  2668. }
  2669. #endif
  2670. }
  2671. /*
  2672. * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
  2673. *
  2674. * In case of CONFIG_NO_HZ, this is the place where we nominate a new
  2675. * idle load balancing owner or decide to stop the periodic load balancing,
  2676. * if the whole system is idle.
  2677. */
  2678. static inline void trigger_load_balance(struct rq *rq, int cpu)
  2679. {
  2680. #ifdef CONFIG_NO_HZ
  2681. /*
  2682. * If we were in the nohz mode recently and busy at the current
  2683. * scheduler tick, then check if we need to nominate new idle
  2684. * load balancer.
  2685. */
  2686. if (rq->in_nohz_recently && !rq->idle_at_tick) {
  2687. rq->in_nohz_recently = 0;
  2688. if (atomic_read(&nohz.load_balancer) == cpu) {
  2689. cpu_clear(cpu, nohz.cpu_mask);
  2690. atomic_set(&nohz.load_balancer, -1);
  2691. }
  2692. if (atomic_read(&nohz.load_balancer) == -1) {
  2693. /*
  2694. * simple selection for now: Nominate the
  2695. * first cpu in the nohz list to be the next
  2696. * ilb owner.
  2697. *
  2698. * TBD: Traverse the sched domains and nominate
  2699. * the nearest cpu in the nohz.cpu_mask.
  2700. */
  2701. int ilb = first_cpu(nohz.cpu_mask);
  2702. if (ilb != NR_CPUS)
  2703. resched_cpu(ilb);
  2704. }
  2705. }
  2706. /*
  2707. * If this cpu is idle and doing idle load balancing for all the
  2708. * cpus with ticks stopped, is it time for that to stop?
  2709. */
  2710. if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) == cpu &&
  2711. cpus_weight(nohz.cpu_mask) == num_online_cpus()) {
  2712. resched_cpu(cpu);
  2713. return;
  2714. }
  2715. /*
  2716. * If this cpu is idle and the idle load balancing is done by
  2717. * someone else, then no need raise the SCHED_SOFTIRQ
  2718. */
  2719. if (rq->idle_at_tick && atomic_read(&nohz.load_balancer) != cpu &&
  2720. cpu_isset(cpu, nohz.cpu_mask))
  2721. return;
  2722. #endif
  2723. if (time_after_eq(jiffies, rq->next_balance))
  2724. raise_softirq(SCHED_SOFTIRQ);
  2725. }
  2726. #else /* CONFIG_SMP */
  2727. /*
  2728. * on UP we do not need to balance between CPUs:
  2729. */
  2730. static inline void idle_balance(int cpu, struct rq *rq)
  2731. {
  2732. }
  2733. /* Avoid "used but not defined" warning on UP */
  2734. static int balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
  2735. unsigned long max_nr_move, unsigned long max_load_move,
  2736. struct sched_domain *sd, enum cpu_idle_type idle,
  2737. int *all_pinned, unsigned long *load_moved,
  2738. int this_best_prio, int best_prio, int best_prio_seen,
  2739. struct rq_iterator *iterator)
  2740. {
  2741. *load_moved = 0;
  2742. return 0;
  2743. }
  2744. #endif
  2745. DEFINE_PER_CPU(struct kernel_stat, kstat);
  2746. EXPORT_PER_CPU_SYMBOL(kstat);
  2747. /*
  2748. * Return p->sum_exec_runtime plus any more ns on the sched_clock
  2749. * that have not yet been banked in case the task is currently running.
  2750. */
  2751. unsigned long long task_sched_runtime(struct task_struct *p)
  2752. {
  2753. unsigned long flags;
  2754. u64 ns, delta_exec;
  2755. struct rq *rq;
  2756. rq = task_rq_lock(p, &flags);
  2757. ns = p->se.sum_exec_runtime;
  2758. if (rq->curr == p) {
  2759. delta_exec = rq_clock(rq) - p->se.exec_start;
  2760. if ((s64)delta_exec > 0)
  2761. ns += delta_exec;
  2762. }
  2763. task_rq_unlock(rq, &flags);
  2764. return ns;
  2765. }
  2766. /*
  2767. * Account user cpu time to a process.
  2768. * @p: the process that the cpu time gets accounted to
  2769. * @hardirq_offset: the offset to subtract from hardirq_count()
  2770. * @cputime: the cpu time spent in user space since the last update
  2771. */
  2772. void account_user_time(struct task_struct *p, cputime_t cputime)
  2773. {
  2774. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2775. cputime64_t tmp;
  2776. p->utime = cputime_add(p->utime, cputime);
  2777. /* Add user time to cpustat. */
  2778. tmp = cputime_to_cputime64(cputime);
  2779. if (TASK_NICE(p) > 0)
  2780. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  2781. else
  2782. cpustat->user = cputime64_add(cpustat->user, tmp);
  2783. }
  2784. /*
  2785. * Account system cpu time to a process.
  2786. * @p: the process that the cpu time gets accounted to
  2787. * @hardirq_offset: the offset to subtract from hardirq_count()
  2788. * @cputime: the cpu time spent in kernel space since the last update
  2789. */
  2790. void account_system_time(struct task_struct *p, int hardirq_offset,
  2791. cputime_t cputime)
  2792. {
  2793. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2794. struct rq *rq = this_rq();
  2795. cputime64_t tmp;
  2796. p->stime = cputime_add(p->stime, cputime);
  2797. /* Add system time to cpustat. */
  2798. tmp = cputime_to_cputime64(cputime);
  2799. if (hardirq_count() - hardirq_offset)
  2800. cpustat->irq = cputime64_add(cpustat->irq, tmp);
  2801. else if (softirq_count())
  2802. cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
  2803. else if (p != rq->idle)
  2804. cpustat->system = cputime64_add(cpustat->system, tmp);
  2805. else if (atomic_read(&rq->nr_iowait) > 0)
  2806. cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
  2807. else
  2808. cpustat->idle = cputime64_add(cpustat->idle, tmp);
  2809. /* Account for system time used */
  2810. acct_update_integrals(p);
  2811. }
  2812. /*
  2813. * Account for involuntary wait time.
  2814. * @p: the process from which the cpu time has been stolen
  2815. * @steal: the cpu time spent in involuntary wait
  2816. */
  2817. void account_steal_time(struct task_struct *p, cputime_t steal)
  2818. {
  2819. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2820. cputime64_t tmp = cputime_to_cputime64(steal);
  2821. struct rq *rq = this_rq();
  2822. if (p == rq->idle) {
  2823. p->stime = cputime_add(p->stime, steal);
  2824. if (atomic_read(&rq->nr_iowait) > 0)
  2825. cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
  2826. else
  2827. cpustat->idle = cputime64_add(cpustat->idle, tmp);
  2828. } else
  2829. cpustat->steal = cputime64_add(cpustat->steal, tmp);
  2830. }
  2831. /*
  2832. * This function gets called by the timer code, with HZ frequency.
  2833. * We call it with interrupts disabled.
  2834. *
  2835. * It also gets called by the fork code, when changing the parent's
  2836. * timeslices.
  2837. */
  2838. void scheduler_tick(void)
  2839. {
  2840. int cpu = smp_processor_id();
  2841. struct rq *rq = cpu_rq(cpu);
  2842. struct task_struct *curr = rq->curr;
  2843. spin_lock(&rq->lock);
  2844. if (curr != rq->idle) /* FIXME: needed? */
  2845. curr->sched_class->task_tick(rq, curr);
  2846. update_cpu_load(rq);
  2847. spin_unlock(&rq->lock);
  2848. #ifdef CONFIG_SMP
  2849. rq->idle_at_tick = idle_cpu(cpu);
  2850. trigger_load_balance(rq, cpu);
  2851. #endif
  2852. }
  2853. #if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
  2854. void fastcall add_preempt_count(int val)
  2855. {
  2856. /*
  2857. * Underflow?
  2858. */
  2859. if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  2860. return;
  2861. preempt_count() += val;
  2862. /*
  2863. * Spinlock count overflowing soon?
  2864. */
  2865. DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
  2866. PREEMPT_MASK - 10);
  2867. }
  2868. EXPORT_SYMBOL(add_preempt_count);
  2869. void fastcall sub_preempt_count(int val)
  2870. {
  2871. /*
  2872. * Underflow?
  2873. */
  2874. if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  2875. return;
  2876. /*
  2877. * Is the spinlock portion underflowing?
  2878. */
  2879. if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  2880. !(preempt_count() & PREEMPT_MASK)))
  2881. return;
  2882. preempt_count() -= val;
  2883. }
  2884. EXPORT_SYMBOL(sub_preempt_count);
  2885. #endif
  2886. /*
  2887. * Print scheduling while atomic bug:
  2888. */
  2889. static noinline void __schedule_bug(struct task_struct *prev)
  2890. {
  2891. printk(KERN_ERR "BUG: scheduling while atomic: %s/0x%08x/%d\n",
  2892. prev->comm, preempt_count(), prev->pid);
  2893. debug_show_held_locks(prev);
  2894. if (irqs_disabled())
  2895. print_irqtrace_events(prev);
  2896. dump_stack();
  2897. }
  2898. /*
  2899. * Various schedule()-time debugging checks and statistics:
  2900. */
  2901. static inline void schedule_debug(struct task_struct *prev)
  2902. {
  2903. /*
  2904. * Test if we are atomic. Since do_exit() needs to call into
  2905. * schedule() atomically, we ignore that path for now.
  2906. * Otherwise, whine if we are scheduling when we should not be.
  2907. */
  2908. if (unlikely(in_atomic_preempt_off()) && unlikely(!prev->exit_state))
  2909. __schedule_bug(prev);
  2910. profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  2911. schedstat_inc(this_rq(), sched_cnt);
  2912. }
  2913. /*
  2914. * Pick up the highest-prio task:
  2915. */
  2916. static inline struct task_struct *
  2917. pick_next_task(struct rq *rq, struct task_struct *prev, u64 now)
  2918. {
  2919. struct sched_class *class;
  2920. struct task_struct *p;
  2921. /*
  2922. * Optimization: we know that if all tasks are in
  2923. * the fair class we can call that function directly:
  2924. */
  2925. if (likely(rq->nr_running == rq->cfs.nr_running)) {
  2926. p = fair_sched_class.pick_next_task(rq, now);
  2927. if (likely(p))
  2928. return p;
  2929. }
  2930. class = sched_class_highest;
  2931. for ( ; ; ) {
  2932. p = class->pick_next_task(rq, now);
  2933. if (p)
  2934. return p;
  2935. /*
  2936. * Will never be NULL as the idle class always
  2937. * returns a non-NULL p:
  2938. */
  2939. class = class->next;
  2940. }
  2941. }
  2942. /*
  2943. * schedule() is the main scheduler function.
  2944. */
  2945. asmlinkage void __sched schedule(void)
  2946. {
  2947. struct task_struct *prev, *next;
  2948. long *switch_count;
  2949. struct rq *rq;
  2950. u64 now;
  2951. int cpu;
  2952. need_resched:
  2953. preempt_disable();
  2954. cpu = smp_processor_id();
  2955. rq = cpu_rq(cpu);
  2956. rcu_qsctr_inc(cpu);
  2957. prev = rq->curr;
  2958. switch_count = &prev->nivcsw;
  2959. release_kernel_lock(prev);
  2960. need_resched_nonpreemptible:
  2961. schedule_debug(prev);
  2962. spin_lock_irq(&rq->lock);
  2963. clear_tsk_need_resched(prev);
  2964. if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  2965. if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
  2966. unlikely(signal_pending(prev)))) {
  2967. prev->state = TASK_RUNNING;
  2968. } else {
  2969. deactivate_task(rq, prev, 1);
  2970. }
  2971. switch_count = &prev->nvcsw;
  2972. }
  2973. if (unlikely(!rq->nr_running))
  2974. idle_balance(cpu, rq);
  2975. now = __rq_clock(rq);
  2976. prev->sched_class->put_prev_task(rq, prev, now);
  2977. next = pick_next_task(rq, prev, now);
  2978. sched_info_switch(prev, next);
  2979. if (likely(prev != next)) {
  2980. rq->nr_switches++;
  2981. rq->curr = next;
  2982. ++*switch_count;
  2983. context_switch(rq, prev, next); /* unlocks the rq */
  2984. } else
  2985. spin_unlock_irq(&rq->lock);
  2986. if (unlikely(reacquire_kernel_lock(current) < 0)) {
  2987. cpu = smp_processor_id();
  2988. rq = cpu_rq(cpu);
  2989. goto need_resched_nonpreemptible;
  2990. }
  2991. preempt_enable_no_resched();
  2992. if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  2993. goto need_resched;
  2994. }
  2995. EXPORT_SYMBOL(schedule);
  2996. #ifdef CONFIG_PREEMPT
  2997. /*
  2998. * this is the entry point to schedule() from in-kernel preemption
  2999. * off of preempt_enable. Kernel preemptions off return from interrupt
  3000. * occur there and call schedule directly.
  3001. */
  3002. asmlinkage void __sched preempt_schedule(void)
  3003. {
  3004. struct thread_info *ti = current_thread_info();
  3005. #ifdef CONFIG_PREEMPT_BKL
  3006. struct task_struct *task = current;
  3007. int saved_lock_depth;
  3008. #endif
  3009. /*
  3010. * If there is a non-zero preempt_count or interrupts are disabled,
  3011. * we do not want to preempt the current task. Just return..
  3012. */
  3013. if (likely(ti->preempt_count || irqs_disabled()))
  3014. return;
  3015. need_resched:
  3016. add_preempt_count(PREEMPT_ACTIVE);
  3017. /*
  3018. * We keep the big kernel semaphore locked, but we
  3019. * clear ->lock_depth so that schedule() doesnt
  3020. * auto-release the semaphore:
  3021. */
  3022. #ifdef CONFIG_PREEMPT_BKL
  3023. saved_lock_depth = task->lock_depth;
  3024. task->lock_depth = -1;
  3025. #endif
  3026. schedule();
  3027. #ifdef CONFIG_PREEMPT_BKL
  3028. task->lock_depth = saved_lock_depth;
  3029. #endif
  3030. sub_preempt_count(PREEMPT_ACTIVE);
  3031. /* we could miss a preemption opportunity between schedule and now */
  3032. barrier();
  3033. if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  3034. goto need_resched;
  3035. }
  3036. EXPORT_SYMBOL(preempt_schedule);
  3037. /*
  3038. * this is the entry point to schedule() from kernel preemption
  3039. * off of irq context.
  3040. * Note, that this is called and return with irqs disabled. This will
  3041. * protect us against recursive calling from irq.
  3042. */
  3043. asmlinkage void __sched preempt_schedule_irq(void)
  3044. {
  3045. struct thread_info *ti = current_thread_info();
  3046. #ifdef CONFIG_PREEMPT_BKL
  3047. struct task_struct *task = current;
  3048. int saved_lock_depth;
  3049. #endif
  3050. /* Catch callers which need to be fixed */
  3051. BUG_ON(ti->preempt_count || !irqs_disabled());
  3052. need_resched:
  3053. add_preempt_count(PREEMPT_ACTIVE);
  3054. /*
  3055. * We keep the big kernel semaphore locked, but we
  3056. * clear ->lock_depth so that schedule() doesnt
  3057. * auto-release the semaphore:
  3058. */
  3059. #ifdef CONFIG_PREEMPT_BKL
  3060. saved_lock_depth = task->lock_depth;
  3061. task->lock_depth = -1;
  3062. #endif
  3063. local_irq_enable();
  3064. schedule();
  3065. local_irq_disable();
  3066. #ifdef CONFIG_PREEMPT_BKL
  3067. task->lock_depth = saved_lock_depth;
  3068. #endif
  3069. sub_preempt_count(PREEMPT_ACTIVE);
  3070. /* we could miss a preemption opportunity between schedule and now */
  3071. barrier();
  3072. if (unlikely(test_thread_flag(TIF_NEED_RESCHED)))
  3073. goto need_resched;
  3074. }
  3075. #endif /* CONFIG_PREEMPT */
  3076. int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
  3077. void *key)
  3078. {
  3079. return try_to_wake_up(curr->private, mode, sync);
  3080. }
  3081. EXPORT_SYMBOL(default_wake_function);
  3082. /*
  3083. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  3084. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  3085. * number) then we wake all the non-exclusive tasks and one exclusive task.
  3086. *
  3087. * There are circumstances in which we can try to wake a task which has already
  3088. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  3089. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  3090. */
  3091. static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  3092. int nr_exclusive, int sync, void *key)
  3093. {
  3094. struct list_head *tmp, *next;
  3095. list_for_each_safe(tmp, next, &q->task_list) {
  3096. wait_queue_t *curr = list_entry(tmp, wait_queue_t, task_list);
  3097. unsigned flags = curr->flags;
  3098. if (curr->func(curr, mode, sync, key) &&
  3099. (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  3100. break;
  3101. }
  3102. }
  3103. /**
  3104. * __wake_up - wake up threads blocked on a waitqueue.
  3105. * @q: the waitqueue
  3106. * @mode: which threads
  3107. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3108. * @key: is directly passed to the wakeup function
  3109. */
  3110. void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode,
  3111. int nr_exclusive, void *key)
  3112. {
  3113. unsigned long flags;
  3114. spin_lock_irqsave(&q->lock, flags);
  3115. __wake_up_common(q, mode, nr_exclusive, 0, key);
  3116. spin_unlock_irqrestore(&q->lock, flags);
  3117. }
  3118. EXPORT_SYMBOL(__wake_up);
  3119. /*
  3120. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  3121. */
  3122. void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  3123. {
  3124. __wake_up_common(q, mode, 1, 0, NULL);
  3125. }
  3126. /**
  3127. * __wake_up_sync - wake up threads blocked on a waitqueue.
  3128. * @q: the waitqueue
  3129. * @mode: which threads
  3130. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3131. *
  3132. * The sync wakeup differs that the waker knows that it will schedule
  3133. * away soon, so while the target thread will be woken up, it will not
  3134. * be migrated to another CPU - ie. the two threads are 'synchronized'
  3135. * with each other. This can prevent needless bouncing between CPUs.
  3136. *
  3137. * On UP it can prevent extra preemption.
  3138. */
  3139. void fastcall
  3140. __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  3141. {
  3142. unsigned long flags;
  3143. int sync = 1;
  3144. if (unlikely(!q))
  3145. return;
  3146. if (unlikely(!nr_exclusive))
  3147. sync = 0;
  3148. spin_lock_irqsave(&q->lock, flags);
  3149. __wake_up_common(q, mode, nr_exclusive, sync, NULL);
  3150. spin_unlock_irqrestore(&q->lock, flags);
  3151. }
  3152. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  3153. void fastcall complete(struct completion *x)
  3154. {
  3155. unsigned long flags;
  3156. spin_lock_irqsave(&x->wait.lock, flags);
  3157. x->done++;
  3158. __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
  3159. 1, 0, NULL);
  3160. spin_unlock_irqrestore(&x->wait.lock, flags);
  3161. }
  3162. EXPORT_SYMBOL(complete);
  3163. void fastcall complete_all(struct completion *x)
  3164. {
  3165. unsigned long flags;
  3166. spin_lock_irqsave(&x->wait.lock, flags);
  3167. x->done += UINT_MAX/2;
  3168. __wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
  3169. 0, 0, NULL);
  3170. spin_unlock_irqrestore(&x->wait.lock, flags);
  3171. }
  3172. EXPORT_SYMBOL(complete_all);
  3173. void fastcall __sched wait_for_completion(struct completion *x)
  3174. {
  3175. might_sleep();
  3176. spin_lock_irq(&x->wait.lock);
  3177. if (!x->done) {
  3178. DECLARE_WAITQUEUE(wait, current);
  3179. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3180. __add_wait_queue_tail(&x->wait, &wait);
  3181. do {
  3182. __set_current_state(TASK_UNINTERRUPTIBLE);
  3183. spin_unlock_irq(&x->wait.lock);
  3184. schedule();
  3185. spin_lock_irq(&x->wait.lock);
  3186. } while (!x->done);
  3187. __remove_wait_queue(&x->wait, &wait);
  3188. }
  3189. x->done--;
  3190. spin_unlock_irq(&x->wait.lock);
  3191. }
  3192. EXPORT_SYMBOL(wait_for_completion);
  3193. unsigned long fastcall __sched
  3194. wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  3195. {
  3196. might_sleep();
  3197. spin_lock_irq(&x->wait.lock);
  3198. if (!x->done) {
  3199. DECLARE_WAITQUEUE(wait, current);
  3200. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3201. __add_wait_queue_tail(&x->wait, &wait);
  3202. do {
  3203. __set_current_state(TASK_UNINTERRUPTIBLE);
  3204. spin_unlock_irq(&x->wait.lock);
  3205. timeout = schedule_timeout(timeout);
  3206. spin_lock_irq(&x->wait.lock);
  3207. if (!timeout) {
  3208. __remove_wait_queue(&x->wait, &wait);
  3209. goto out;
  3210. }
  3211. } while (!x->done);
  3212. __remove_wait_queue(&x->wait, &wait);
  3213. }
  3214. x->done--;
  3215. out:
  3216. spin_unlock_irq(&x->wait.lock);
  3217. return timeout;
  3218. }
  3219. EXPORT_SYMBOL(wait_for_completion_timeout);
  3220. int fastcall __sched wait_for_completion_interruptible(struct completion *x)
  3221. {
  3222. int ret = 0;
  3223. might_sleep();
  3224. spin_lock_irq(&x->wait.lock);
  3225. if (!x->done) {
  3226. DECLARE_WAITQUEUE(wait, current);
  3227. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3228. __add_wait_queue_tail(&x->wait, &wait);
  3229. do {
  3230. if (signal_pending(current)) {
  3231. ret = -ERESTARTSYS;
  3232. __remove_wait_queue(&x->wait, &wait);
  3233. goto out;
  3234. }
  3235. __set_current_state(TASK_INTERRUPTIBLE);
  3236. spin_unlock_irq(&x->wait.lock);
  3237. schedule();
  3238. spin_lock_irq(&x->wait.lock);
  3239. } while (!x->done);
  3240. __remove_wait_queue(&x->wait, &wait);
  3241. }
  3242. x->done--;
  3243. out:
  3244. spin_unlock_irq(&x->wait.lock);
  3245. return ret;
  3246. }
  3247. EXPORT_SYMBOL(wait_for_completion_interruptible);
  3248. unsigned long fastcall __sched
  3249. wait_for_completion_interruptible_timeout(struct completion *x,
  3250. unsigned long timeout)
  3251. {
  3252. might_sleep();
  3253. spin_lock_irq(&x->wait.lock);
  3254. if (!x->done) {
  3255. DECLARE_WAITQUEUE(wait, current);
  3256. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3257. __add_wait_queue_tail(&x->wait, &wait);
  3258. do {
  3259. if (signal_pending(current)) {
  3260. timeout = -ERESTARTSYS;
  3261. __remove_wait_queue(&x->wait, &wait);
  3262. goto out;
  3263. }
  3264. __set_current_state(TASK_INTERRUPTIBLE);
  3265. spin_unlock_irq(&x->wait.lock);
  3266. timeout = schedule_timeout(timeout);
  3267. spin_lock_irq(&x->wait.lock);
  3268. if (!timeout) {
  3269. __remove_wait_queue(&x->wait, &wait);
  3270. goto out;
  3271. }
  3272. } while (!x->done);
  3273. __remove_wait_queue(&x->wait, &wait);
  3274. }
  3275. x->done--;
  3276. out:
  3277. spin_unlock_irq(&x->wait.lock);
  3278. return timeout;
  3279. }
  3280. EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  3281. static inline void
  3282. sleep_on_head(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
  3283. {
  3284. spin_lock_irqsave(&q->lock, *flags);
  3285. __add_wait_queue(q, wait);
  3286. spin_unlock(&q->lock);
  3287. }
  3288. static inline void
  3289. sleep_on_tail(wait_queue_head_t *q, wait_queue_t *wait, unsigned long *flags)
  3290. {
  3291. spin_lock_irq(&q->lock);
  3292. __remove_wait_queue(q, wait);
  3293. spin_unlock_irqrestore(&q->lock, *flags);
  3294. }
  3295. void __sched interruptible_sleep_on(wait_queue_head_t *q)
  3296. {
  3297. unsigned long flags;
  3298. wait_queue_t wait;
  3299. init_waitqueue_entry(&wait, current);
  3300. current->state = TASK_INTERRUPTIBLE;
  3301. sleep_on_head(q, &wait, &flags);
  3302. schedule();
  3303. sleep_on_tail(q, &wait, &flags);
  3304. }
  3305. EXPORT_SYMBOL(interruptible_sleep_on);
  3306. long __sched
  3307. interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3308. {
  3309. unsigned long flags;
  3310. wait_queue_t wait;
  3311. init_waitqueue_entry(&wait, current);
  3312. current->state = TASK_INTERRUPTIBLE;
  3313. sleep_on_head(q, &wait, &flags);
  3314. timeout = schedule_timeout(timeout);
  3315. sleep_on_tail(q, &wait, &flags);
  3316. return timeout;
  3317. }
  3318. EXPORT_SYMBOL(interruptible_sleep_on_timeout);
  3319. void __sched sleep_on(wait_queue_head_t *q)
  3320. {
  3321. unsigned long flags;
  3322. wait_queue_t wait;
  3323. init_waitqueue_entry(&wait, current);
  3324. current->state = TASK_UNINTERRUPTIBLE;
  3325. sleep_on_head(q, &wait, &flags);
  3326. schedule();
  3327. sleep_on_tail(q, &wait, &flags);
  3328. }
  3329. EXPORT_SYMBOL(sleep_on);
  3330. long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3331. {
  3332. unsigned long flags;
  3333. wait_queue_t wait;
  3334. init_waitqueue_entry(&wait, current);
  3335. current->state = TASK_UNINTERRUPTIBLE;
  3336. sleep_on_head(q, &wait, &flags);
  3337. timeout = schedule_timeout(timeout);
  3338. sleep_on_tail(q, &wait, &flags);
  3339. return timeout;
  3340. }
  3341. EXPORT_SYMBOL(sleep_on_timeout);
  3342. #ifdef CONFIG_RT_MUTEXES
  3343. /*
  3344. * rt_mutex_setprio - set the current priority of a task
  3345. * @p: task
  3346. * @prio: prio value (kernel-internal form)
  3347. *
  3348. * This function changes the 'effective' priority of a task. It does
  3349. * not touch ->normal_prio like __setscheduler().
  3350. *
  3351. * Used by the rt_mutex code to implement priority inheritance logic.
  3352. */
  3353. void rt_mutex_setprio(struct task_struct *p, int prio)
  3354. {
  3355. unsigned long flags;
  3356. int oldprio, on_rq;
  3357. struct rq *rq;
  3358. u64 now;
  3359. BUG_ON(prio < 0 || prio > MAX_PRIO);
  3360. rq = task_rq_lock(p, &flags);
  3361. now = rq_clock(rq);
  3362. oldprio = p->prio;
  3363. on_rq = p->se.on_rq;
  3364. if (on_rq)
  3365. dequeue_task(rq, p, 0, now);
  3366. if (rt_prio(prio))
  3367. p->sched_class = &rt_sched_class;
  3368. else
  3369. p->sched_class = &fair_sched_class;
  3370. p->prio = prio;
  3371. if (on_rq) {
  3372. enqueue_task(rq, p, 0, now);
  3373. /*
  3374. * Reschedule if we are currently running on this runqueue and
  3375. * our priority decreased, or if we are not currently running on
  3376. * this runqueue and our priority is higher than the current's
  3377. */
  3378. if (task_running(rq, p)) {
  3379. if (p->prio > oldprio)
  3380. resched_task(rq->curr);
  3381. } else {
  3382. check_preempt_curr(rq, p);
  3383. }
  3384. }
  3385. task_rq_unlock(rq, &flags);
  3386. }
  3387. #endif
  3388. void set_user_nice(struct task_struct *p, long nice)
  3389. {
  3390. int old_prio, delta, on_rq;
  3391. unsigned long flags;
  3392. struct rq *rq;
  3393. u64 now;
  3394. if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  3395. return;
  3396. /*
  3397. * We have to be careful, if called from sys_setpriority(),
  3398. * the task might be in the middle of scheduling on another CPU.
  3399. */
  3400. rq = task_rq_lock(p, &flags);
  3401. now = rq_clock(rq);
  3402. /*
  3403. * The RT priorities are set via sched_setscheduler(), but we still
  3404. * allow the 'normal' nice value to be set - but as expected
  3405. * it wont have any effect on scheduling until the task is
  3406. * SCHED_FIFO/SCHED_RR:
  3407. */
  3408. if (task_has_rt_policy(p)) {
  3409. p->static_prio = NICE_TO_PRIO(nice);
  3410. goto out_unlock;
  3411. }
  3412. on_rq = p->se.on_rq;
  3413. if (on_rq) {
  3414. dequeue_task(rq, p, 0, now);
  3415. dec_load(rq, p, now);
  3416. }
  3417. p->static_prio = NICE_TO_PRIO(nice);
  3418. set_load_weight(p);
  3419. old_prio = p->prio;
  3420. p->prio = effective_prio(p);
  3421. delta = p->prio - old_prio;
  3422. if (on_rq) {
  3423. enqueue_task(rq, p, 0, now);
  3424. inc_load(rq, p, now);
  3425. /*
  3426. * If the task increased its priority or is running and
  3427. * lowered its priority, then reschedule its CPU:
  3428. */
  3429. if (delta < 0 || (delta > 0 && task_running(rq, p)))
  3430. resched_task(rq->curr);
  3431. }
  3432. out_unlock:
  3433. task_rq_unlock(rq, &flags);
  3434. }
  3435. EXPORT_SYMBOL(set_user_nice);
  3436. /*
  3437. * can_nice - check if a task can reduce its nice value
  3438. * @p: task
  3439. * @nice: nice value
  3440. */
  3441. int can_nice(const struct task_struct *p, const int nice)
  3442. {
  3443. /* convert nice value [19,-20] to rlimit style value [1,40] */
  3444. int nice_rlim = 20 - nice;
  3445. return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
  3446. capable(CAP_SYS_NICE));
  3447. }
  3448. #ifdef __ARCH_WANT_SYS_NICE
  3449. /*
  3450. * sys_nice - change the priority of the current process.
  3451. * @increment: priority increment
  3452. *
  3453. * sys_setpriority is a more generic, but much slower function that
  3454. * does similar things.
  3455. */
  3456. asmlinkage long sys_nice(int increment)
  3457. {
  3458. long nice, retval;
  3459. /*
  3460. * Setpriority might change our priority at the same moment.
  3461. * We don't have to worry. Conceptually one call occurs first
  3462. * and we have a single winner.
  3463. */
  3464. if (increment < -40)
  3465. increment = -40;
  3466. if (increment > 40)
  3467. increment = 40;
  3468. nice = PRIO_TO_NICE(current->static_prio) + increment;
  3469. if (nice < -20)
  3470. nice = -20;
  3471. if (nice > 19)
  3472. nice = 19;
  3473. if (increment < 0 && !can_nice(current, nice))
  3474. return -EPERM;
  3475. retval = security_task_setnice(current, nice);
  3476. if (retval)
  3477. return retval;
  3478. set_user_nice(current, nice);
  3479. return 0;
  3480. }
  3481. #endif
  3482. /**
  3483. * task_prio - return the priority value of a given task.
  3484. * @p: the task in question.
  3485. *
  3486. * This is the priority value as seen by users in /proc.
  3487. * RT tasks are offset by -200. Normal tasks are centered
  3488. * around 0, value goes from -16 to +15.
  3489. */
  3490. int task_prio(const struct task_struct *p)
  3491. {
  3492. return p->prio - MAX_RT_PRIO;
  3493. }
  3494. /**
  3495. * task_nice - return the nice value of a given task.
  3496. * @p: the task in question.
  3497. */
  3498. int task_nice(const struct task_struct *p)
  3499. {
  3500. return TASK_NICE(p);
  3501. }
  3502. EXPORT_SYMBOL_GPL(task_nice);
  3503. /**
  3504. * idle_cpu - is a given cpu idle currently?
  3505. * @cpu: the processor in question.
  3506. */
  3507. int idle_cpu(int cpu)
  3508. {
  3509. return cpu_curr(cpu) == cpu_rq(cpu)->idle;
  3510. }
  3511. /**
  3512. * idle_task - return the idle task for a given cpu.
  3513. * @cpu: the processor in question.
  3514. */
  3515. struct task_struct *idle_task(int cpu)
  3516. {
  3517. return cpu_rq(cpu)->idle;
  3518. }
  3519. /**
  3520. * find_process_by_pid - find a process with a matching PID value.
  3521. * @pid: the pid in question.
  3522. */
  3523. static inline struct task_struct *find_process_by_pid(pid_t pid)
  3524. {
  3525. return pid ? find_task_by_pid(pid) : current;
  3526. }
  3527. /* Actually do priority change: must hold rq lock. */
  3528. static void
  3529. __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
  3530. {
  3531. BUG_ON(p->se.on_rq);
  3532. p->policy = policy;
  3533. switch (p->policy) {
  3534. case SCHED_NORMAL:
  3535. case SCHED_BATCH:
  3536. case SCHED_IDLE:
  3537. p->sched_class = &fair_sched_class;
  3538. break;
  3539. case SCHED_FIFO:
  3540. case SCHED_RR:
  3541. p->sched_class = &rt_sched_class;
  3542. break;
  3543. }
  3544. p->rt_priority = prio;
  3545. p->normal_prio = normal_prio(p);
  3546. /* we are holding p->pi_lock already */
  3547. p->prio = rt_mutex_getprio(p);
  3548. set_load_weight(p);
  3549. }
  3550. /**
  3551. * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
  3552. * @p: the task in question.
  3553. * @policy: new policy.
  3554. * @param: structure containing the new RT priority.
  3555. *
  3556. * NOTE that the task may be already dead.
  3557. */
  3558. int sched_setscheduler(struct task_struct *p, int policy,
  3559. struct sched_param *param)
  3560. {
  3561. int retval, oldprio, oldpolicy = -1, on_rq;
  3562. unsigned long flags;
  3563. struct rq *rq;
  3564. /* may grab non-irq protected spin_locks */
  3565. BUG_ON(in_interrupt());
  3566. recheck:
  3567. /* double check policy once rq lock held */
  3568. if (policy < 0)
  3569. policy = oldpolicy = p->policy;
  3570. else if (policy != SCHED_FIFO && policy != SCHED_RR &&
  3571. policy != SCHED_NORMAL && policy != SCHED_BATCH &&
  3572. policy != SCHED_IDLE)
  3573. return -EINVAL;
  3574. /*
  3575. * Valid priorities for SCHED_FIFO and SCHED_RR are
  3576. * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
  3577. * SCHED_BATCH and SCHED_IDLE is 0.
  3578. */
  3579. if (param->sched_priority < 0 ||
  3580. (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
  3581. (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
  3582. return -EINVAL;
  3583. if (rt_policy(policy) != (param->sched_priority != 0))
  3584. return -EINVAL;
  3585. /*
  3586. * Allow unprivileged RT tasks to decrease priority:
  3587. */
  3588. if (!capable(CAP_SYS_NICE)) {
  3589. if (rt_policy(policy)) {
  3590. unsigned long rlim_rtprio;
  3591. if (!lock_task_sighand(p, &flags))
  3592. return -ESRCH;
  3593. rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
  3594. unlock_task_sighand(p, &flags);
  3595. /* can't set/change the rt policy */
  3596. if (policy != p->policy && !rlim_rtprio)
  3597. return -EPERM;
  3598. /* can't increase priority */
  3599. if (param->sched_priority > p->rt_priority &&
  3600. param->sched_priority > rlim_rtprio)
  3601. return -EPERM;
  3602. }
  3603. /*
  3604. * Like positive nice levels, dont allow tasks to
  3605. * move out of SCHED_IDLE either:
  3606. */
  3607. if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
  3608. return -EPERM;
  3609. /* can't change other user's priorities */
  3610. if ((current->euid != p->euid) &&
  3611. (current->euid != p->uid))
  3612. return -EPERM;
  3613. }
  3614. retval = security_task_setscheduler(p, policy, param);
  3615. if (retval)
  3616. return retval;
  3617. /*
  3618. * make sure no PI-waiters arrive (or leave) while we are
  3619. * changing the priority of the task:
  3620. */
  3621. spin_lock_irqsave(&p->pi_lock, flags);
  3622. /*
  3623. * To be able to change p->policy safely, the apropriate
  3624. * runqueue lock must be held.
  3625. */
  3626. rq = __task_rq_lock(p);
  3627. /* recheck policy now with rq lock held */
  3628. if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
  3629. policy = oldpolicy = -1;
  3630. __task_rq_unlock(rq);
  3631. spin_unlock_irqrestore(&p->pi_lock, flags);
  3632. goto recheck;
  3633. }
  3634. on_rq = p->se.on_rq;
  3635. if (on_rq)
  3636. deactivate_task(rq, p, 0);
  3637. oldprio = p->prio;
  3638. __setscheduler(rq, p, policy, param->sched_priority);
  3639. if (on_rq) {
  3640. activate_task(rq, p, 0);
  3641. /*
  3642. * Reschedule if we are currently running on this runqueue and
  3643. * our priority decreased, or if we are not currently running on
  3644. * this runqueue and our priority is higher than the current's
  3645. */
  3646. if (task_running(rq, p)) {
  3647. if (p->prio > oldprio)
  3648. resched_task(rq->curr);
  3649. } else {
  3650. check_preempt_curr(rq, p);
  3651. }
  3652. }
  3653. __task_rq_unlock(rq);
  3654. spin_unlock_irqrestore(&p->pi_lock, flags);
  3655. rt_mutex_adjust_pi(p);
  3656. return 0;
  3657. }
  3658. EXPORT_SYMBOL_GPL(sched_setscheduler);
  3659. static int
  3660. do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  3661. {
  3662. struct sched_param lparam;
  3663. struct task_struct *p;
  3664. int retval;
  3665. if (!param || pid < 0)
  3666. return -EINVAL;
  3667. if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
  3668. return -EFAULT;
  3669. rcu_read_lock();
  3670. retval = -ESRCH;
  3671. p = find_process_by_pid(pid);
  3672. if (p != NULL)
  3673. retval = sched_setscheduler(p, policy, &lparam);
  3674. rcu_read_unlock();
  3675. return retval;
  3676. }
  3677. /**
  3678. * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  3679. * @pid: the pid in question.
  3680. * @policy: new policy.
  3681. * @param: structure containing the new RT priority.
  3682. */
  3683. asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
  3684. struct sched_param __user *param)
  3685. {
  3686. /* negative values for policy are not valid */
  3687. if (policy < 0)
  3688. return -EINVAL;
  3689. return do_sched_setscheduler(pid, policy, param);
  3690. }
  3691. /**
  3692. * sys_sched_setparam - set/change the RT priority of a thread
  3693. * @pid: the pid in question.
  3694. * @param: structure containing the new RT priority.
  3695. */
  3696. asmlinkage long sys_sched_setparam(pid_t pid, struct sched_param __user *param)
  3697. {
  3698. return do_sched_setscheduler(pid, -1, param);
  3699. }
  3700. /**
  3701. * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  3702. * @pid: the pid in question.
  3703. */
  3704. asmlinkage long sys_sched_getscheduler(pid_t pid)
  3705. {
  3706. struct task_struct *p;
  3707. int retval = -EINVAL;
  3708. if (pid < 0)
  3709. goto out_nounlock;
  3710. retval = -ESRCH;
  3711. read_lock(&tasklist_lock);
  3712. p = find_process_by_pid(pid);
  3713. if (p) {
  3714. retval = security_task_getscheduler(p);
  3715. if (!retval)
  3716. retval = p->policy;
  3717. }
  3718. read_unlock(&tasklist_lock);
  3719. out_nounlock:
  3720. return retval;
  3721. }
  3722. /**
  3723. * sys_sched_getscheduler - get the RT priority of a thread
  3724. * @pid: the pid in question.
  3725. * @param: structure containing the RT priority.
  3726. */
  3727. asmlinkage long sys_sched_getparam(pid_t pid, struct sched_param __user *param)
  3728. {
  3729. struct sched_param lp;
  3730. struct task_struct *p;
  3731. int retval = -EINVAL;
  3732. if (!param || pid < 0)
  3733. goto out_nounlock;
  3734. read_lock(&tasklist_lock);
  3735. p = find_process_by_pid(pid);
  3736. retval = -ESRCH;
  3737. if (!p)
  3738. goto out_unlock;
  3739. retval = security_task_getscheduler(p);
  3740. if (retval)
  3741. goto out_unlock;
  3742. lp.sched_priority = p->rt_priority;
  3743. read_unlock(&tasklist_lock);
  3744. /*
  3745. * This one might sleep, we cannot do it with a spinlock held ...
  3746. */
  3747. retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
  3748. out_nounlock:
  3749. return retval;
  3750. out_unlock:
  3751. read_unlock(&tasklist_lock);
  3752. return retval;
  3753. }
  3754. long sched_setaffinity(pid_t pid, cpumask_t new_mask)
  3755. {
  3756. cpumask_t cpus_allowed;
  3757. struct task_struct *p;
  3758. int retval;
  3759. mutex_lock(&sched_hotcpu_mutex);
  3760. read_lock(&tasklist_lock);
  3761. p = find_process_by_pid(pid);
  3762. if (!p) {
  3763. read_unlock(&tasklist_lock);
  3764. mutex_unlock(&sched_hotcpu_mutex);
  3765. return -ESRCH;
  3766. }
  3767. /*
  3768. * It is not safe to call set_cpus_allowed with the
  3769. * tasklist_lock held. We will bump the task_struct's
  3770. * usage count and then drop tasklist_lock.
  3771. */
  3772. get_task_struct(p);
  3773. read_unlock(&tasklist_lock);
  3774. retval = -EPERM;
  3775. if ((current->euid != p->euid) && (current->euid != p->uid) &&
  3776. !capable(CAP_SYS_NICE))
  3777. goto out_unlock;
  3778. retval = security_task_setscheduler(p, 0, NULL);
  3779. if (retval)
  3780. goto out_unlock;
  3781. cpus_allowed = cpuset_cpus_allowed(p);
  3782. cpus_and(new_mask, new_mask, cpus_allowed);
  3783. retval = set_cpus_allowed(p, new_mask);
  3784. out_unlock:
  3785. put_task_struct(p);
  3786. mutex_unlock(&sched_hotcpu_mutex);
  3787. return retval;
  3788. }
  3789. static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  3790. cpumask_t *new_mask)
  3791. {
  3792. if (len < sizeof(cpumask_t)) {
  3793. memset(new_mask, 0, sizeof(cpumask_t));
  3794. } else if (len > sizeof(cpumask_t)) {
  3795. len = sizeof(cpumask_t);
  3796. }
  3797. return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
  3798. }
  3799. /**
  3800. * sys_sched_setaffinity - set the cpu affinity of a process
  3801. * @pid: pid of the process
  3802. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  3803. * @user_mask_ptr: user-space pointer to the new cpu mask
  3804. */
  3805. asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
  3806. unsigned long __user *user_mask_ptr)
  3807. {
  3808. cpumask_t new_mask;
  3809. int retval;
  3810. retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
  3811. if (retval)
  3812. return retval;
  3813. return sched_setaffinity(pid, new_mask);
  3814. }
  3815. /*
  3816. * Represents all cpu's present in the system
  3817. * In systems capable of hotplug, this map could dynamically grow
  3818. * as new cpu's are detected in the system via any platform specific
  3819. * method, such as ACPI for e.g.
  3820. */
  3821. cpumask_t cpu_present_map __read_mostly;
  3822. EXPORT_SYMBOL(cpu_present_map);
  3823. #ifndef CONFIG_SMP
  3824. cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL;
  3825. EXPORT_SYMBOL(cpu_online_map);
  3826. cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
  3827. EXPORT_SYMBOL(cpu_possible_map);
  3828. #endif
  3829. long sched_getaffinity(pid_t pid, cpumask_t *mask)
  3830. {
  3831. struct task_struct *p;
  3832. int retval;
  3833. mutex_lock(&sched_hotcpu_mutex);
  3834. read_lock(&tasklist_lock);
  3835. retval = -ESRCH;
  3836. p = find_process_by_pid(pid);
  3837. if (!p)
  3838. goto out_unlock;
  3839. retval = security_task_getscheduler(p);
  3840. if (retval)
  3841. goto out_unlock;
  3842. cpus_and(*mask, p->cpus_allowed, cpu_online_map);
  3843. out_unlock:
  3844. read_unlock(&tasklist_lock);
  3845. mutex_unlock(&sched_hotcpu_mutex);
  3846. if (retval)
  3847. return retval;
  3848. return 0;
  3849. }
  3850. /**
  3851. * sys_sched_getaffinity - get the cpu affinity of a process
  3852. * @pid: pid of the process
  3853. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  3854. * @user_mask_ptr: user-space pointer to hold the current cpu mask
  3855. */
  3856. asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
  3857. unsigned long __user *user_mask_ptr)
  3858. {
  3859. int ret;
  3860. cpumask_t mask;
  3861. if (len < sizeof(cpumask_t))
  3862. return -EINVAL;
  3863. ret = sched_getaffinity(pid, &mask);
  3864. if (ret < 0)
  3865. return ret;
  3866. if (copy_to_user(user_mask_ptr, &mask, sizeof(cpumask_t)))
  3867. return -EFAULT;
  3868. return sizeof(cpumask_t);
  3869. }
  3870. /**
  3871. * sys_sched_yield - yield the current processor to other threads.
  3872. *
  3873. * This function yields the current CPU to other tasks. If there are no
  3874. * other threads running on this CPU then this function will return.
  3875. */
  3876. asmlinkage long sys_sched_yield(void)
  3877. {
  3878. struct rq *rq = this_rq_lock();
  3879. schedstat_inc(rq, yld_cnt);
  3880. if (unlikely(rq->nr_running == 1))
  3881. schedstat_inc(rq, yld_act_empty);
  3882. else
  3883. current->sched_class->yield_task(rq, current);
  3884. /*
  3885. * Since we are going to call schedule() anyway, there's
  3886. * no need to preempt or enable interrupts:
  3887. */
  3888. __release(rq->lock);
  3889. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  3890. _raw_spin_unlock(&rq->lock);
  3891. preempt_enable_no_resched();
  3892. schedule();
  3893. return 0;
  3894. }
  3895. static void __cond_resched(void)
  3896. {
  3897. #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  3898. __might_sleep(__FILE__, __LINE__);
  3899. #endif
  3900. /*
  3901. * The BKS might be reacquired before we have dropped
  3902. * PREEMPT_ACTIVE, which could trigger a second
  3903. * cond_resched() call.
  3904. */
  3905. do {
  3906. add_preempt_count(PREEMPT_ACTIVE);
  3907. schedule();
  3908. sub_preempt_count(PREEMPT_ACTIVE);
  3909. } while (need_resched());
  3910. }
  3911. int __sched cond_resched(void)
  3912. {
  3913. if (need_resched() && !(preempt_count() & PREEMPT_ACTIVE) &&
  3914. system_state == SYSTEM_RUNNING) {
  3915. __cond_resched();
  3916. return 1;
  3917. }
  3918. return 0;
  3919. }
  3920. EXPORT_SYMBOL(cond_resched);
  3921. /*
  3922. * cond_resched_lock() - if a reschedule is pending, drop the given lock,
  3923. * call schedule, and on return reacquire the lock.
  3924. *
  3925. * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  3926. * operations here to prevent schedule() from being called twice (once via
  3927. * spin_unlock(), once by hand).
  3928. */
  3929. int cond_resched_lock(spinlock_t *lock)
  3930. {
  3931. int ret = 0;
  3932. if (need_lockbreak(lock)) {
  3933. spin_unlock(lock);
  3934. cpu_relax();
  3935. ret = 1;
  3936. spin_lock(lock);
  3937. }
  3938. if (need_resched() && system_state == SYSTEM_RUNNING) {
  3939. spin_release(&lock->dep_map, 1, _THIS_IP_);
  3940. _raw_spin_unlock(lock);
  3941. preempt_enable_no_resched();
  3942. __cond_resched();
  3943. ret = 1;
  3944. spin_lock(lock);
  3945. }
  3946. return ret;
  3947. }
  3948. EXPORT_SYMBOL(cond_resched_lock);
  3949. int __sched cond_resched_softirq(void)
  3950. {
  3951. BUG_ON(!in_softirq());
  3952. if (need_resched() && system_state == SYSTEM_RUNNING) {
  3953. local_bh_enable();
  3954. __cond_resched();
  3955. local_bh_disable();
  3956. return 1;
  3957. }
  3958. return 0;
  3959. }
  3960. EXPORT_SYMBOL(cond_resched_softirq);
  3961. /**
  3962. * yield - yield the current processor to other threads.
  3963. *
  3964. * This is a shortcut for kernel-space yielding - it marks the
  3965. * thread runnable and calls sys_sched_yield().
  3966. */
  3967. void __sched yield(void)
  3968. {
  3969. set_current_state(TASK_RUNNING);
  3970. sys_sched_yield();
  3971. }
  3972. EXPORT_SYMBOL(yield);
  3973. /*
  3974. * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  3975. * that process accounting knows that this is a task in IO wait state.
  3976. *
  3977. * But don't do that if it is a deliberate, throttling IO wait (this task
  3978. * has set its backing_dev_info: the queue against which it should throttle)
  3979. */
  3980. void __sched io_schedule(void)
  3981. {
  3982. struct rq *rq = &__raw_get_cpu_var(runqueues);
  3983. delayacct_blkio_start();
  3984. atomic_inc(&rq->nr_iowait);
  3985. schedule();
  3986. atomic_dec(&rq->nr_iowait);
  3987. delayacct_blkio_end();
  3988. }
  3989. EXPORT_SYMBOL(io_schedule);
  3990. long __sched io_schedule_timeout(long timeout)
  3991. {
  3992. struct rq *rq = &__raw_get_cpu_var(runqueues);
  3993. long ret;
  3994. delayacct_blkio_start();
  3995. atomic_inc(&rq->nr_iowait);
  3996. ret = schedule_timeout(timeout);
  3997. atomic_dec(&rq->nr_iowait);
  3998. delayacct_blkio_end();
  3999. return ret;
  4000. }
  4001. /**
  4002. * sys_sched_get_priority_max - return maximum RT priority.
  4003. * @policy: scheduling class.
  4004. *
  4005. * this syscall returns the maximum rt_priority that can be used
  4006. * by a given scheduling class.
  4007. */
  4008. asmlinkage long sys_sched_get_priority_max(int policy)
  4009. {
  4010. int ret = -EINVAL;
  4011. switch (policy) {
  4012. case SCHED_FIFO:
  4013. case SCHED_RR:
  4014. ret = MAX_USER_RT_PRIO-1;
  4015. break;
  4016. case SCHED_NORMAL:
  4017. case SCHED_BATCH:
  4018. case SCHED_IDLE:
  4019. ret = 0;
  4020. break;
  4021. }
  4022. return ret;
  4023. }
  4024. /**
  4025. * sys_sched_get_priority_min - return minimum RT priority.
  4026. * @policy: scheduling class.
  4027. *
  4028. * this syscall returns the minimum rt_priority that can be used
  4029. * by a given scheduling class.
  4030. */
  4031. asmlinkage long sys_sched_get_priority_min(int policy)
  4032. {
  4033. int ret = -EINVAL;
  4034. switch (policy) {
  4035. case SCHED_FIFO:
  4036. case SCHED_RR:
  4037. ret = 1;
  4038. break;
  4039. case SCHED_NORMAL:
  4040. case SCHED_BATCH:
  4041. case SCHED_IDLE:
  4042. ret = 0;
  4043. }
  4044. return ret;
  4045. }
  4046. /**
  4047. * sys_sched_rr_get_interval - return the default timeslice of a process.
  4048. * @pid: pid of the process.
  4049. * @interval: userspace pointer to the timeslice value.
  4050. *
  4051. * this syscall writes the default timeslice value of a given process
  4052. * into the user-space timespec buffer. A value of '0' means infinity.
  4053. */
  4054. asmlinkage
  4055. long sys_sched_rr_get_interval(pid_t pid, struct timespec __user *interval)
  4056. {
  4057. struct task_struct *p;
  4058. int retval = -EINVAL;
  4059. struct timespec t;
  4060. if (pid < 0)
  4061. goto out_nounlock;
  4062. retval = -ESRCH;
  4063. read_lock(&tasklist_lock);
  4064. p = find_process_by_pid(pid);
  4065. if (!p)
  4066. goto out_unlock;
  4067. retval = security_task_getscheduler(p);
  4068. if (retval)
  4069. goto out_unlock;
  4070. jiffies_to_timespec(p->policy == SCHED_FIFO ?
  4071. 0 : static_prio_timeslice(p->static_prio), &t);
  4072. read_unlock(&tasklist_lock);
  4073. retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  4074. out_nounlock:
  4075. return retval;
  4076. out_unlock:
  4077. read_unlock(&tasklist_lock);
  4078. return retval;
  4079. }
  4080. static const char stat_nam[] = "RSDTtZX";
  4081. static void show_task(struct task_struct *p)
  4082. {
  4083. unsigned long free = 0;
  4084. unsigned state;
  4085. state = p->state ? __ffs(p->state) + 1 : 0;
  4086. printk("%-13.13s %c", p->comm,
  4087. state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  4088. #if BITS_PER_LONG == 32
  4089. if (state == TASK_RUNNING)
  4090. printk(" running ");
  4091. else
  4092. printk(" %08lx ", thread_saved_pc(p));
  4093. #else
  4094. if (state == TASK_RUNNING)
  4095. printk(" running task ");
  4096. else
  4097. printk(" %016lx ", thread_saved_pc(p));
  4098. #endif
  4099. #ifdef CONFIG_DEBUG_STACK_USAGE
  4100. {
  4101. unsigned long *n = end_of_stack(p);
  4102. while (!*n)
  4103. n++;
  4104. free = (unsigned long)n - (unsigned long)end_of_stack(p);
  4105. }
  4106. #endif
  4107. printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid);
  4108. if (state != TASK_RUNNING)
  4109. show_stack(p, NULL);
  4110. }
  4111. void show_state_filter(unsigned long state_filter)
  4112. {
  4113. struct task_struct *g, *p;
  4114. #if BITS_PER_LONG == 32
  4115. printk(KERN_INFO
  4116. " task PC stack pid father\n");
  4117. #else
  4118. printk(KERN_INFO
  4119. " task PC stack pid father\n");
  4120. #endif
  4121. read_lock(&tasklist_lock);
  4122. do_each_thread(g, p) {
  4123. /*
  4124. * reset the NMI-timeout, listing all files on a slow
  4125. * console might take alot of time:
  4126. */
  4127. touch_nmi_watchdog();
  4128. if (!state_filter || (p->state & state_filter))
  4129. show_task(p);
  4130. } while_each_thread(g, p);
  4131. touch_all_softlockup_watchdogs();
  4132. #ifdef CONFIG_SCHED_DEBUG
  4133. sysrq_sched_debug_show();
  4134. #endif
  4135. read_unlock(&tasklist_lock);
  4136. /*
  4137. * Only show locks if all tasks are dumped:
  4138. */
  4139. if (state_filter == -1)
  4140. debug_show_all_locks();
  4141. }
  4142. void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  4143. {
  4144. idle->sched_class = &idle_sched_class;
  4145. }
  4146. /**
  4147. * init_idle - set up an idle thread for a given CPU
  4148. * @idle: task in question
  4149. * @cpu: cpu the idle task belongs to
  4150. *
  4151. * NOTE: this function does not set the idle thread's NEED_RESCHED
  4152. * flag, to make booting more robust.
  4153. */
  4154. void __cpuinit init_idle(struct task_struct *idle, int cpu)
  4155. {
  4156. struct rq *rq = cpu_rq(cpu);
  4157. unsigned long flags;
  4158. __sched_fork(idle);
  4159. idle->se.exec_start = sched_clock();
  4160. idle->prio = idle->normal_prio = MAX_PRIO;
  4161. idle->cpus_allowed = cpumask_of_cpu(cpu);
  4162. __set_task_cpu(idle, cpu);
  4163. spin_lock_irqsave(&rq->lock, flags);
  4164. rq->curr = rq->idle = idle;
  4165. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  4166. idle->oncpu = 1;
  4167. #endif
  4168. spin_unlock_irqrestore(&rq->lock, flags);
  4169. /* Set the preempt count _outside_ the spinlocks! */
  4170. #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL)
  4171. task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
  4172. #else
  4173. task_thread_info(idle)->preempt_count = 0;
  4174. #endif
  4175. /*
  4176. * The idle tasks have their own, simple scheduling class:
  4177. */
  4178. idle->sched_class = &idle_sched_class;
  4179. }
  4180. /*
  4181. * In a system that switches off the HZ timer nohz_cpu_mask
  4182. * indicates which cpus entered this state. This is used
  4183. * in the rcu update to wait only for active cpus. For system
  4184. * which do not switch off the HZ timer nohz_cpu_mask should
  4185. * always be CPU_MASK_NONE.
  4186. */
  4187. cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
  4188. /*
  4189. * Increase the granularity value when there are more CPUs,
  4190. * because with more CPUs the 'effective latency' as visible
  4191. * to users decreases. But the relationship is not linear,
  4192. * so pick a second-best guess by going with the log2 of the
  4193. * number of CPUs.
  4194. *
  4195. * This idea comes from the SD scheduler of Con Kolivas:
  4196. */
  4197. static inline void sched_init_granularity(void)
  4198. {
  4199. unsigned int factor = 1 + ilog2(num_online_cpus());
  4200. const unsigned long gran_limit = 100000000;
  4201. sysctl_sched_granularity *= factor;
  4202. if (sysctl_sched_granularity > gran_limit)
  4203. sysctl_sched_granularity = gran_limit;
  4204. sysctl_sched_runtime_limit = sysctl_sched_granularity * 4;
  4205. sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2;
  4206. }
  4207. #ifdef CONFIG_SMP
  4208. /*
  4209. * This is how migration works:
  4210. *
  4211. * 1) we queue a struct migration_req structure in the source CPU's
  4212. * runqueue and wake up that CPU's migration thread.
  4213. * 2) we down() the locked semaphore => thread blocks.
  4214. * 3) migration thread wakes up (implicitly it forces the migrated
  4215. * thread off the CPU)
  4216. * 4) it gets the migration request and checks whether the migrated
  4217. * task is still in the wrong runqueue.
  4218. * 5) if it's in the wrong runqueue then the migration thread removes
  4219. * it and puts it into the right queue.
  4220. * 6) migration thread up()s the semaphore.
  4221. * 7) we wake up and the migration is done.
  4222. */
  4223. /*
  4224. * Change a given task's CPU affinity. Migrate the thread to a
  4225. * proper CPU and schedule it away if the CPU it's executing on
  4226. * is removed from the allowed bitmask.
  4227. *
  4228. * NOTE: the caller must have a valid reference to the task, the
  4229. * task must not exit() & deallocate itself prematurely. The
  4230. * call is not atomic; no spinlocks may be held.
  4231. */
  4232. int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
  4233. {
  4234. struct migration_req req;
  4235. unsigned long flags;
  4236. struct rq *rq;
  4237. int ret = 0;
  4238. rq = task_rq_lock(p, &flags);
  4239. if (!cpus_intersects(new_mask, cpu_online_map)) {
  4240. ret = -EINVAL;
  4241. goto out;
  4242. }
  4243. p->cpus_allowed = new_mask;
  4244. /* Can the task run on the task's current CPU? If so, we're done */
  4245. if (cpu_isset(task_cpu(p), new_mask))
  4246. goto out;
  4247. if (migrate_task(p, any_online_cpu(new_mask), &req)) {
  4248. /* Need help from migration thread: drop lock and wait. */
  4249. task_rq_unlock(rq, &flags);
  4250. wake_up_process(rq->migration_thread);
  4251. wait_for_completion(&req.done);
  4252. tlb_migrate_finish(p->mm);
  4253. return 0;
  4254. }
  4255. out:
  4256. task_rq_unlock(rq, &flags);
  4257. return ret;
  4258. }
  4259. EXPORT_SYMBOL_GPL(set_cpus_allowed);
  4260. /*
  4261. * Move (not current) task off this cpu, onto dest cpu. We're doing
  4262. * this because either it can't run here any more (set_cpus_allowed()
  4263. * away from this CPU, or CPU going down), or because we're
  4264. * attempting to rebalance this task on exec (sched_exec).
  4265. *
  4266. * So we race with normal scheduler movements, but that's OK, as long
  4267. * as the task is no longer on this CPU.
  4268. *
  4269. * Returns non-zero if task was successfully migrated.
  4270. */
  4271. static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  4272. {
  4273. struct rq *rq_dest, *rq_src;
  4274. int ret = 0, on_rq;
  4275. if (unlikely(cpu_is_offline(dest_cpu)))
  4276. return ret;
  4277. rq_src = cpu_rq(src_cpu);
  4278. rq_dest = cpu_rq(dest_cpu);
  4279. double_rq_lock(rq_src, rq_dest);
  4280. /* Already moved. */
  4281. if (task_cpu(p) != src_cpu)
  4282. goto out;
  4283. /* Affinity changed (again). */
  4284. if (!cpu_isset(dest_cpu, p->cpus_allowed))
  4285. goto out;
  4286. on_rq = p->se.on_rq;
  4287. if (on_rq)
  4288. deactivate_task(rq_src, p, 0);
  4289. set_task_cpu(p, dest_cpu);
  4290. if (on_rq) {
  4291. activate_task(rq_dest, p, 0);
  4292. check_preempt_curr(rq_dest, p);
  4293. }
  4294. ret = 1;
  4295. out:
  4296. double_rq_unlock(rq_src, rq_dest);
  4297. return ret;
  4298. }
  4299. /*
  4300. * migration_thread - this is a highprio system thread that performs
  4301. * thread migration by bumping thread off CPU then 'pushing' onto
  4302. * another runqueue.
  4303. */
  4304. static int migration_thread(void *data)
  4305. {
  4306. int cpu = (long)data;
  4307. struct rq *rq;
  4308. rq = cpu_rq(cpu);
  4309. BUG_ON(rq->migration_thread != current);
  4310. set_current_state(TASK_INTERRUPTIBLE);
  4311. while (!kthread_should_stop()) {
  4312. struct migration_req *req;
  4313. struct list_head *head;
  4314. spin_lock_irq(&rq->lock);
  4315. if (cpu_is_offline(cpu)) {
  4316. spin_unlock_irq(&rq->lock);
  4317. goto wait_to_die;
  4318. }
  4319. if (rq->active_balance) {
  4320. active_load_balance(rq, cpu);
  4321. rq->active_balance = 0;
  4322. }
  4323. head = &rq->migration_queue;
  4324. if (list_empty(head)) {
  4325. spin_unlock_irq(&rq->lock);
  4326. schedule();
  4327. set_current_state(TASK_INTERRUPTIBLE);
  4328. continue;
  4329. }
  4330. req = list_entry(head->next, struct migration_req, list);
  4331. list_del_init(head->next);
  4332. spin_unlock(&rq->lock);
  4333. __migrate_task(req->task, cpu, req->dest_cpu);
  4334. local_irq_enable();
  4335. complete(&req->done);
  4336. }
  4337. __set_current_state(TASK_RUNNING);
  4338. return 0;
  4339. wait_to_die:
  4340. /* Wait for kthread_stop */
  4341. set_current_state(TASK_INTERRUPTIBLE);
  4342. while (!kthread_should_stop()) {
  4343. schedule();
  4344. set_current_state(TASK_INTERRUPTIBLE);
  4345. }
  4346. __set_current_state(TASK_RUNNING);
  4347. return 0;
  4348. }
  4349. #ifdef CONFIG_HOTPLUG_CPU
  4350. /*
  4351. * Figure out where task on dead CPU should go, use force if neccessary.
  4352. * NOTE: interrupts should be disabled by the caller
  4353. */
  4354. static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
  4355. {
  4356. unsigned long flags;
  4357. cpumask_t mask;
  4358. struct rq *rq;
  4359. int dest_cpu;
  4360. restart:
  4361. /* On same node? */
  4362. mask = node_to_cpumask(cpu_to_node(dead_cpu));
  4363. cpus_and(mask, mask, p->cpus_allowed);
  4364. dest_cpu = any_online_cpu(mask);
  4365. /* On any allowed CPU? */
  4366. if (dest_cpu == NR_CPUS)
  4367. dest_cpu = any_online_cpu(p->cpus_allowed);
  4368. /* No more Mr. Nice Guy. */
  4369. if (dest_cpu == NR_CPUS) {
  4370. rq = task_rq_lock(p, &flags);
  4371. cpus_setall(p->cpus_allowed);
  4372. dest_cpu = any_online_cpu(p->cpus_allowed);
  4373. task_rq_unlock(rq, &flags);
  4374. /*
  4375. * Don't tell them about moving exiting tasks or
  4376. * kernel threads (both mm NULL), since they never
  4377. * leave kernel.
  4378. */
  4379. if (p->mm && printk_ratelimit())
  4380. printk(KERN_INFO "process %d (%s) no "
  4381. "longer affine to cpu%d\n",
  4382. p->pid, p->comm, dead_cpu);
  4383. }
  4384. if (!__migrate_task(p, dead_cpu, dest_cpu))
  4385. goto restart;
  4386. }
  4387. /*
  4388. * While a dead CPU has no uninterruptible tasks queued at this point,
  4389. * it might still have a nonzero ->nr_uninterruptible counter, because
  4390. * for performance reasons the counter is not stricly tracking tasks to
  4391. * their home CPUs. So we just add the counter to another CPU's counter,
  4392. * to keep the global sum constant after CPU-down:
  4393. */
  4394. static void migrate_nr_uninterruptible(struct rq *rq_src)
  4395. {
  4396. struct rq *rq_dest = cpu_rq(any_online_cpu(CPU_MASK_ALL));
  4397. unsigned long flags;
  4398. local_irq_save(flags);
  4399. double_rq_lock(rq_src, rq_dest);
  4400. rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
  4401. rq_src->nr_uninterruptible = 0;
  4402. double_rq_unlock(rq_src, rq_dest);
  4403. local_irq_restore(flags);
  4404. }
  4405. /* Run through task list and migrate tasks from the dead cpu. */
  4406. static void migrate_live_tasks(int src_cpu)
  4407. {
  4408. struct task_struct *p, *t;
  4409. write_lock_irq(&tasklist_lock);
  4410. do_each_thread(t, p) {
  4411. if (p == current)
  4412. continue;
  4413. if (task_cpu(p) == src_cpu)
  4414. move_task_off_dead_cpu(src_cpu, p);
  4415. } while_each_thread(t, p);
  4416. write_unlock_irq(&tasklist_lock);
  4417. }
  4418. /*
  4419. * Schedules idle task to be the next runnable task on current CPU.
  4420. * It does so by boosting its priority to highest possible and adding it to
  4421. * the _front_ of the runqueue. Used by CPU offline code.
  4422. */
  4423. void sched_idle_next(void)
  4424. {
  4425. int this_cpu = smp_processor_id();
  4426. struct rq *rq = cpu_rq(this_cpu);
  4427. struct task_struct *p = rq->idle;
  4428. unsigned long flags;
  4429. /* cpu has to be offline */
  4430. BUG_ON(cpu_online(this_cpu));
  4431. /*
  4432. * Strictly not necessary since rest of the CPUs are stopped by now
  4433. * and interrupts disabled on the current cpu.
  4434. */
  4435. spin_lock_irqsave(&rq->lock, flags);
  4436. __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
  4437. /* Add idle task to the _front_ of its priority queue: */
  4438. activate_idle_task(p, rq);
  4439. spin_unlock_irqrestore(&rq->lock, flags);
  4440. }
  4441. /*
  4442. * Ensures that the idle task is using init_mm right before its cpu goes
  4443. * offline.
  4444. */
  4445. void idle_task_exit(void)
  4446. {
  4447. struct mm_struct *mm = current->active_mm;
  4448. BUG_ON(cpu_online(smp_processor_id()));
  4449. if (mm != &init_mm)
  4450. switch_mm(mm, &init_mm, current);
  4451. mmdrop(mm);
  4452. }
  4453. /* called under rq->lock with disabled interrupts */
  4454. static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
  4455. {
  4456. struct rq *rq = cpu_rq(dead_cpu);
  4457. /* Must be exiting, otherwise would be on tasklist. */
  4458. BUG_ON(p->exit_state != EXIT_ZOMBIE && p->exit_state != EXIT_DEAD);
  4459. /* Cannot have done final schedule yet: would have vanished. */
  4460. BUG_ON(p->state == TASK_DEAD);
  4461. get_task_struct(p);
  4462. /*
  4463. * Drop lock around migration; if someone else moves it,
  4464. * that's OK. No task can be added to this CPU, so iteration is
  4465. * fine.
  4466. * NOTE: interrupts should be left disabled --dev@
  4467. */
  4468. spin_unlock(&rq->lock);
  4469. move_task_off_dead_cpu(dead_cpu, p);
  4470. spin_lock(&rq->lock);
  4471. put_task_struct(p);
  4472. }
  4473. /* release_task() removes task from tasklist, so we won't find dead tasks. */
  4474. static void migrate_dead_tasks(unsigned int dead_cpu)
  4475. {
  4476. struct rq *rq = cpu_rq(dead_cpu);
  4477. struct task_struct *next;
  4478. for ( ; ; ) {
  4479. if (!rq->nr_running)
  4480. break;
  4481. next = pick_next_task(rq, rq->curr, rq_clock(rq));
  4482. if (!next)
  4483. break;
  4484. migrate_dead(dead_cpu, next);
  4485. }
  4486. }
  4487. #endif /* CONFIG_HOTPLUG_CPU */
  4488. /*
  4489. * migration_call - callback that gets triggered when a CPU is added.
  4490. * Here we can start up the necessary migration thread for the new CPU.
  4491. */
  4492. static int __cpuinit
  4493. migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  4494. {
  4495. struct task_struct *p;
  4496. int cpu = (long)hcpu;
  4497. unsigned long flags;
  4498. struct rq *rq;
  4499. switch (action) {
  4500. case CPU_LOCK_ACQUIRE:
  4501. mutex_lock(&sched_hotcpu_mutex);
  4502. break;
  4503. case CPU_UP_PREPARE:
  4504. case CPU_UP_PREPARE_FROZEN:
  4505. p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
  4506. if (IS_ERR(p))
  4507. return NOTIFY_BAD;
  4508. kthread_bind(p, cpu);
  4509. /* Must be high prio: stop_machine expects to yield to it. */
  4510. rq = task_rq_lock(p, &flags);
  4511. __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
  4512. task_rq_unlock(rq, &flags);
  4513. cpu_rq(cpu)->migration_thread = p;
  4514. break;
  4515. case CPU_ONLINE:
  4516. case CPU_ONLINE_FROZEN:
  4517. /* Strictly unneccessary, as first user will wake it. */
  4518. wake_up_process(cpu_rq(cpu)->migration_thread);
  4519. break;
  4520. #ifdef CONFIG_HOTPLUG_CPU
  4521. case CPU_UP_CANCELED:
  4522. case CPU_UP_CANCELED_FROZEN:
  4523. if (!cpu_rq(cpu)->migration_thread)
  4524. break;
  4525. /* Unbind it from offline cpu so it can run. Fall thru. */
  4526. kthread_bind(cpu_rq(cpu)->migration_thread,
  4527. any_online_cpu(cpu_online_map));
  4528. kthread_stop(cpu_rq(cpu)->migration_thread);
  4529. cpu_rq(cpu)->migration_thread = NULL;
  4530. break;
  4531. case CPU_DEAD:
  4532. case CPU_DEAD_FROZEN:
  4533. migrate_live_tasks(cpu);
  4534. rq = cpu_rq(cpu);
  4535. kthread_stop(rq->migration_thread);
  4536. rq->migration_thread = NULL;
  4537. /* Idle task back to normal (off runqueue, low prio) */
  4538. rq = task_rq_lock(rq->idle, &flags);
  4539. deactivate_task(rq, rq->idle, 0);
  4540. rq->idle->static_prio = MAX_PRIO;
  4541. __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
  4542. rq->idle->sched_class = &idle_sched_class;
  4543. migrate_dead_tasks(cpu);
  4544. task_rq_unlock(rq, &flags);
  4545. migrate_nr_uninterruptible(rq);
  4546. BUG_ON(rq->nr_running != 0);
  4547. /* No need to migrate the tasks: it was best-effort if
  4548. * they didn't take sched_hotcpu_mutex. Just wake up
  4549. * the requestors. */
  4550. spin_lock_irq(&rq->lock);
  4551. while (!list_empty(&rq->migration_queue)) {
  4552. struct migration_req *req;
  4553. req = list_entry(rq->migration_queue.next,
  4554. struct migration_req, list);
  4555. list_del_init(&req->list);
  4556. complete(&req->done);
  4557. }
  4558. spin_unlock_irq(&rq->lock);
  4559. break;
  4560. #endif
  4561. case CPU_LOCK_RELEASE:
  4562. mutex_unlock(&sched_hotcpu_mutex);
  4563. break;
  4564. }
  4565. return NOTIFY_OK;
  4566. }
  4567. /* Register at highest priority so that task migration (migrate_all_tasks)
  4568. * happens before everything else.
  4569. */
  4570. static struct notifier_block __cpuinitdata migration_notifier = {
  4571. .notifier_call = migration_call,
  4572. .priority = 10
  4573. };
  4574. int __init migration_init(void)
  4575. {
  4576. void *cpu = (void *)(long)smp_processor_id();
  4577. int err;
  4578. /* Start one for the boot CPU: */
  4579. err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
  4580. BUG_ON(err == NOTIFY_BAD);
  4581. migration_call(&migration_notifier, CPU_ONLINE, cpu);
  4582. register_cpu_notifier(&migration_notifier);
  4583. return 0;
  4584. }
  4585. #endif
  4586. #ifdef CONFIG_SMP
  4587. /* Number of possible processor ids */
  4588. int nr_cpu_ids __read_mostly = NR_CPUS;
  4589. EXPORT_SYMBOL(nr_cpu_ids);
  4590. #undef SCHED_DOMAIN_DEBUG
  4591. #ifdef SCHED_DOMAIN_DEBUG
  4592. static void sched_domain_debug(struct sched_domain *sd, int cpu)
  4593. {
  4594. int level = 0;
  4595. if (!sd) {
  4596. printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
  4597. return;
  4598. }
  4599. printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
  4600. do {
  4601. int i;
  4602. char str[NR_CPUS];
  4603. struct sched_group *group = sd->groups;
  4604. cpumask_t groupmask;
  4605. cpumask_scnprintf(str, NR_CPUS, sd->span);
  4606. cpus_clear(groupmask);
  4607. printk(KERN_DEBUG);
  4608. for (i = 0; i < level + 1; i++)
  4609. printk(" ");
  4610. printk("domain %d: ", level);
  4611. if (!(sd->flags & SD_LOAD_BALANCE)) {
  4612. printk("does not load-balance\n");
  4613. if (sd->parent)
  4614. printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
  4615. " has parent");
  4616. break;
  4617. }
  4618. printk("span %s\n", str);
  4619. if (!cpu_isset(cpu, sd->span))
  4620. printk(KERN_ERR "ERROR: domain->span does not contain "
  4621. "CPU%d\n", cpu);
  4622. if (!cpu_isset(cpu, group->cpumask))
  4623. printk(KERN_ERR "ERROR: domain->groups does not contain"
  4624. " CPU%d\n", cpu);
  4625. printk(KERN_DEBUG);
  4626. for (i = 0; i < level + 2; i++)
  4627. printk(" ");
  4628. printk("groups:");
  4629. do {
  4630. if (!group) {
  4631. printk("\n");
  4632. printk(KERN_ERR "ERROR: group is NULL\n");
  4633. break;
  4634. }
  4635. if (!group->__cpu_power) {
  4636. printk("\n");
  4637. printk(KERN_ERR "ERROR: domain->cpu_power not "
  4638. "set\n");
  4639. }
  4640. if (!cpus_weight(group->cpumask)) {
  4641. printk("\n");
  4642. printk(KERN_ERR "ERROR: empty group\n");
  4643. }
  4644. if (cpus_intersects(groupmask, group->cpumask)) {
  4645. printk("\n");
  4646. printk(KERN_ERR "ERROR: repeated CPUs\n");
  4647. }
  4648. cpus_or(groupmask, groupmask, group->cpumask);
  4649. cpumask_scnprintf(str, NR_CPUS, group->cpumask);
  4650. printk(" %s", str);
  4651. group = group->next;
  4652. } while (group != sd->groups);
  4653. printk("\n");
  4654. if (!cpus_equal(sd->span, groupmask))
  4655. printk(KERN_ERR "ERROR: groups don't span "
  4656. "domain->span\n");
  4657. level++;
  4658. sd = sd->parent;
  4659. if (!sd)
  4660. continue;
  4661. if (!cpus_subset(groupmask, sd->span))
  4662. printk(KERN_ERR "ERROR: parent span is not a superset "
  4663. "of domain->span\n");
  4664. } while (sd);
  4665. }
  4666. #else
  4667. # define sched_domain_debug(sd, cpu) do { } while (0)
  4668. #endif
  4669. static int sd_degenerate(struct sched_domain *sd)
  4670. {
  4671. if (cpus_weight(sd->span) == 1)
  4672. return 1;
  4673. /* Following flags need at least 2 groups */
  4674. if (sd->flags & (SD_LOAD_BALANCE |
  4675. SD_BALANCE_NEWIDLE |
  4676. SD_BALANCE_FORK |
  4677. SD_BALANCE_EXEC |
  4678. SD_SHARE_CPUPOWER |
  4679. SD_SHARE_PKG_RESOURCES)) {
  4680. if (sd->groups != sd->groups->next)
  4681. return 0;
  4682. }
  4683. /* Following flags don't use groups */
  4684. if (sd->flags & (SD_WAKE_IDLE |
  4685. SD_WAKE_AFFINE |
  4686. SD_WAKE_BALANCE))
  4687. return 0;
  4688. return 1;
  4689. }
  4690. static int
  4691. sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
  4692. {
  4693. unsigned long cflags = sd->flags, pflags = parent->flags;
  4694. if (sd_degenerate(parent))
  4695. return 1;
  4696. if (!cpus_equal(sd->span, parent->span))
  4697. return 0;
  4698. /* Does parent contain flags not in child? */
  4699. /* WAKE_BALANCE is a subset of WAKE_AFFINE */
  4700. if (cflags & SD_WAKE_AFFINE)
  4701. pflags &= ~SD_WAKE_BALANCE;
  4702. /* Flags needing groups don't count if only 1 group in parent */
  4703. if (parent->groups == parent->groups->next) {
  4704. pflags &= ~(SD_LOAD_BALANCE |
  4705. SD_BALANCE_NEWIDLE |
  4706. SD_BALANCE_FORK |
  4707. SD_BALANCE_EXEC |
  4708. SD_SHARE_CPUPOWER |
  4709. SD_SHARE_PKG_RESOURCES);
  4710. }
  4711. if (~cflags & pflags)
  4712. return 0;
  4713. return 1;
  4714. }
  4715. /*
  4716. * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  4717. * hold the hotplug lock.
  4718. */
  4719. static void cpu_attach_domain(struct sched_domain *sd, int cpu)
  4720. {
  4721. struct rq *rq = cpu_rq(cpu);
  4722. struct sched_domain *tmp;
  4723. /* Remove the sched domains which do not contribute to scheduling. */
  4724. for (tmp = sd; tmp; tmp = tmp->parent) {
  4725. struct sched_domain *parent = tmp->parent;
  4726. if (!parent)
  4727. break;
  4728. if (sd_parent_degenerate(tmp, parent)) {
  4729. tmp->parent = parent->parent;
  4730. if (parent->parent)
  4731. parent->parent->child = tmp;
  4732. }
  4733. }
  4734. if (sd && sd_degenerate(sd)) {
  4735. sd = sd->parent;
  4736. if (sd)
  4737. sd->child = NULL;
  4738. }
  4739. sched_domain_debug(sd, cpu);
  4740. rcu_assign_pointer(rq->sd, sd);
  4741. }
  4742. /* cpus with isolated domains */
  4743. static cpumask_t cpu_isolated_map = CPU_MASK_NONE;
  4744. /* Setup the mask of cpus configured for isolated domains */
  4745. static int __init isolated_cpu_setup(char *str)
  4746. {
  4747. int ints[NR_CPUS], i;
  4748. str = get_options(str, ARRAY_SIZE(ints), ints);
  4749. cpus_clear(cpu_isolated_map);
  4750. for (i = 1; i <= ints[0]; i++)
  4751. if (ints[i] < NR_CPUS)
  4752. cpu_set(ints[i], cpu_isolated_map);
  4753. return 1;
  4754. }
  4755. __setup ("isolcpus=", isolated_cpu_setup);
  4756. /*
  4757. * init_sched_build_groups takes the cpumask we wish to span, and a pointer
  4758. * to a function which identifies what group(along with sched group) a CPU
  4759. * belongs to. The return value of group_fn must be a >= 0 and < NR_CPUS
  4760. * (due to the fact that we keep track of groups covered with a cpumask_t).
  4761. *
  4762. * init_sched_build_groups will build a circular linked list of the groups
  4763. * covered by the given span, and will set each group's ->cpumask correctly,
  4764. * and ->cpu_power to 0.
  4765. */
  4766. static void
  4767. init_sched_build_groups(cpumask_t span, const cpumask_t *cpu_map,
  4768. int (*group_fn)(int cpu, const cpumask_t *cpu_map,
  4769. struct sched_group **sg))
  4770. {
  4771. struct sched_group *first = NULL, *last = NULL;
  4772. cpumask_t covered = CPU_MASK_NONE;
  4773. int i;
  4774. for_each_cpu_mask(i, span) {
  4775. struct sched_group *sg;
  4776. int group = group_fn(i, cpu_map, &sg);
  4777. int j;
  4778. if (cpu_isset(i, covered))
  4779. continue;
  4780. sg->cpumask = CPU_MASK_NONE;
  4781. sg->__cpu_power = 0;
  4782. for_each_cpu_mask(j, span) {
  4783. if (group_fn(j, cpu_map, NULL) != group)
  4784. continue;
  4785. cpu_set(j, covered);
  4786. cpu_set(j, sg->cpumask);
  4787. }
  4788. if (!first)
  4789. first = sg;
  4790. if (last)
  4791. last->next = sg;
  4792. last = sg;
  4793. }
  4794. last->next = first;
  4795. }
  4796. #define SD_NODES_PER_DOMAIN 16
  4797. #ifdef CONFIG_NUMA
  4798. /**
  4799. * find_next_best_node - find the next node to include in a sched_domain
  4800. * @node: node whose sched_domain we're building
  4801. * @used_nodes: nodes already in the sched_domain
  4802. *
  4803. * Find the next node to include in a given scheduling domain. Simply
  4804. * finds the closest node not already in the @used_nodes map.
  4805. *
  4806. * Should use nodemask_t.
  4807. */
  4808. static int find_next_best_node(int node, unsigned long *used_nodes)
  4809. {
  4810. int i, n, val, min_val, best_node = 0;
  4811. min_val = INT_MAX;
  4812. for (i = 0; i < MAX_NUMNODES; i++) {
  4813. /* Start at @node */
  4814. n = (node + i) % MAX_NUMNODES;
  4815. if (!nr_cpus_node(n))
  4816. continue;
  4817. /* Skip already used nodes */
  4818. if (test_bit(n, used_nodes))
  4819. continue;
  4820. /* Simple min distance search */
  4821. val = node_distance(node, n);
  4822. if (val < min_val) {
  4823. min_val = val;
  4824. best_node = n;
  4825. }
  4826. }
  4827. set_bit(best_node, used_nodes);
  4828. return best_node;
  4829. }
  4830. /**
  4831. * sched_domain_node_span - get a cpumask for a node's sched_domain
  4832. * @node: node whose cpumask we're constructing
  4833. * @size: number of nodes to include in this span
  4834. *
  4835. * Given a node, construct a good cpumask for its sched_domain to span. It
  4836. * should be one that prevents unnecessary balancing, but also spreads tasks
  4837. * out optimally.
  4838. */
  4839. static cpumask_t sched_domain_node_span(int node)
  4840. {
  4841. DECLARE_BITMAP(used_nodes, MAX_NUMNODES);
  4842. cpumask_t span, nodemask;
  4843. int i;
  4844. cpus_clear(span);
  4845. bitmap_zero(used_nodes, MAX_NUMNODES);
  4846. nodemask = node_to_cpumask(node);
  4847. cpus_or(span, span, nodemask);
  4848. set_bit(node, used_nodes);
  4849. for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
  4850. int next_node = find_next_best_node(node, used_nodes);
  4851. nodemask = node_to_cpumask(next_node);
  4852. cpus_or(span, span, nodemask);
  4853. }
  4854. return span;
  4855. }
  4856. #endif
  4857. int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
  4858. /*
  4859. * SMT sched-domains:
  4860. */
  4861. #ifdef CONFIG_SCHED_SMT
  4862. static DEFINE_PER_CPU(struct sched_domain, cpu_domains);
  4863. static DEFINE_PER_CPU(struct sched_group, sched_group_cpus);
  4864. static int cpu_to_cpu_group(int cpu, const cpumask_t *cpu_map,
  4865. struct sched_group **sg)
  4866. {
  4867. if (sg)
  4868. *sg = &per_cpu(sched_group_cpus, cpu);
  4869. return cpu;
  4870. }
  4871. #endif
  4872. /*
  4873. * multi-core sched-domains:
  4874. */
  4875. #ifdef CONFIG_SCHED_MC
  4876. static DEFINE_PER_CPU(struct sched_domain, core_domains);
  4877. static DEFINE_PER_CPU(struct sched_group, sched_group_core);
  4878. #endif
  4879. #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
  4880. static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
  4881. struct sched_group **sg)
  4882. {
  4883. int group;
  4884. cpumask_t mask = cpu_sibling_map[cpu];
  4885. cpus_and(mask, mask, *cpu_map);
  4886. group = first_cpu(mask);
  4887. if (sg)
  4888. *sg = &per_cpu(sched_group_core, group);
  4889. return group;
  4890. }
  4891. #elif defined(CONFIG_SCHED_MC)
  4892. static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
  4893. struct sched_group **sg)
  4894. {
  4895. if (sg)
  4896. *sg = &per_cpu(sched_group_core, cpu);
  4897. return cpu;
  4898. }
  4899. #endif
  4900. static DEFINE_PER_CPU(struct sched_domain, phys_domains);
  4901. static DEFINE_PER_CPU(struct sched_group, sched_group_phys);
  4902. static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
  4903. struct sched_group **sg)
  4904. {
  4905. int group;
  4906. #ifdef CONFIG_SCHED_MC
  4907. cpumask_t mask = cpu_coregroup_map(cpu);
  4908. cpus_and(mask, mask, *cpu_map);
  4909. group = first_cpu(mask);
  4910. #elif defined(CONFIG_SCHED_SMT)
  4911. cpumask_t mask = cpu_sibling_map[cpu];
  4912. cpus_and(mask, mask, *cpu_map);
  4913. group = first_cpu(mask);
  4914. #else
  4915. group = cpu;
  4916. #endif
  4917. if (sg)
  4918. *sg = &per_cpu(sched_group_phys, group);
  4919. return group;
  4920. }
  4921. #ifdef CONFIG_NUMA
  4922. /*
  4923. * The init_sched_build_groups can't handle what we want to do with node
  4924. * groups, so roll our own. Now each node has its own list of groups which
  4925. * gets dynamically allocated.
  4926. */
  4927. static DEFINE_PER_CPU(struct sched_domain, node_domains);
  4928. static struct sched_group **sched_group_nodes_bycpu[NR_CPUS];
  4929. static DEFINE_PER_CPU(struct sched_domain, allnodes_domains);
  4930. static DEFINE_PER_CPU(struct sched_group, sched_group_allnodes);
  4931. static int cpu_to_allnodes_group(int cpu, const cpumask_t *cpu_map,
  4932. struct sched_group **sg)
  4933. {
  4934. cpumask_t nodemask = node_to_cpumask(cpu_to_node(cpu));
  4935. int group;
  4936. cpus_and(nodemask, nodemask, *cpu_map);
  4937. group = first_cpu(nodemask);
  4938. if (sg)
  4939. *sg = &per_cpu(sched_group_allnodes, group);
  4940. return group;
  4941. }
  4942. static void init_numa_sched_groups_power(struct sched_group *group_head)
  4943. {
  4944. struct sched_group *sg = group_head;
  4945. int j;
  4946. if (!sg)
  4947. return;
  4948. next_sg:
  4949. for_each_cpu_mask(j, sg->cpumask) {
  4950. struct sched_domain *sd;
  4951. sd = &per_cpu(phys_domains, j);
  4952. if (j != first_cpu(sd->groups->cpumask)) {
  4953. /*
  4954. * Only add "power" once for each
  4955. * physical package.
  4956. */
  4957. continue;
  4958. }
  4959. sg_inc_cpu_power(sg, sd->groups->__cpu_power);
  4960. }
  4961. sg = sg->next;
  4962. if (sg != group_head)
  4963. goto next_sg;
  4964. }
  4965. #endif
  4966. #ifdef CONFIG_NUMA
  4967. /* Free memory allocated for various sched_group structures */
  4968. static void free_sched_groups(const cpumask_t *cpu_map)
  4969. {
  4970. int cpu, i;
  4971. for_each_cpu_mask(cpu, *cpu_map) {
  4972. struct sched_group **sched_group_nodes
  4973. = sched_group_nodes_bycpu[cpu];
  4974. if (!sched_group_nodes)
  4975. continue;
  4976. for (i = 0; i < MAX_NUMNODES; i++) {
  4977. cpumask_t nodemask = node_to_cpumask(i);
  4978. struct sched_group *oldsg, *sg = sched_group_nodes[i];
  4979. cpus_and(nodemask, nodemask, *cpu_map);
  4980. if (cpus_empty(nodemask))
  4981. continue;
  4982. if (sg == NULL)
  4983. continue;
  4984. sg = sg->next;
  4985. next_sg:
  4986. oldsg = sg;
  4987. sg = sg->next;
  4988. kfree(oldsg);
  4989. if (oldsg != sched_group_nodes[i])
  4990. goto next_sg;
  4991. }
  4992. kfree(sched_group_nodes);
  4993. sched_group_nodes_bycpu[cpu] = NULL;
  4994. }
  4995. }
  4996. #else
  4997. static void free_sched_groups(const cpumask_t *cpu_map)
  4998. {
  4999. }
  5000. #endif
  5001. /*
  5002. * Initialize sched groups cpu_power.
  5003. *
  5004. * cpu_power indicates the capacity of sched group, which is used while
  5005. * distributing the load between different sched groups in a sched domain.
  5006. * Typically cpu_power for all the groups in a sched domain will be same unless
  5007. * there are asymmetries in the topology. If there are asymmetries, group
  5008. * having more cpu_power will pickup more load compared to the group having
  5009. * less cpu_power.
  5010. *
  5011. * cpu_power will be a multiple of SCHED_LOAD_SCALE. This multiple represents
  5012. * the maximum number of tasks a group can handle in the presence of other idle
  5013. * or lightly loaded groups in the same sched domain.
  5014. */
  5015. static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  5016. {
  5017. struct sched_domain *child;
  5018. struct sched_group *group;
  5019. WARN_ON(!sd || !sd->groups);
  5020. if (cpu != first_cpu(sd->groups->cpumask))
  5021. return;
  5022. child = sd->child;
  5023. sd->groups->__cpu_power = 0;
  5024. /*
  5025. * For perf policy, if the groups in child domain share resources
  5026. * (for example cores sharing some portions of the cache hierarchy
  5027. * or SMT), then set this domain groups cpu_power such that each group
  5028. * can handle only one task, when there are other idle groups in the
  5029. * same sched domain.
  5030. */
  5031. if (!child || (!(sd->flags & SD_POWERSAVINGS_BALANCE) &&
  5032. (child->flags &
  5033. (SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES)))) {
  5034. sg_inc_cpu_power(sd->groups, SCHED_LOAD_SCALE);
  5035. return;
  5036. }
  5037. /*
  5038. * add cpu_power of each child group to this groups cpu_power
  5039. */
  5040. group = child->groups;
  5041. do {
  5042. sg_inc_cpu_power(sd->groups, group->__cpu_power);
  5043. group = group->next;
  5044. } while (group != child->groups);
  5045. }
  5046. /*
  5047. * Build sched domains for a given set of cpus and attach the sched domains
  5048. * to the individual cpus
  5049. */
  5050. static int build_sched_domains(const cpumask_t *cpu_map)
  5051. {
  5052. int i;
  5053. #ifdef CONFIG_NUMA
  5054. struct sched_group **sched_group_nodes = NULL;
  5055. int sd_allnodes = 0;
  5056. /*
  5057. * Allocate the per-node list of sched groups
  5058. */
  5059. sched_group_nodes = kzalloc(sizeof(struct sched_group *)*MAX_NUMNODES,
  5060. GFP_KERNEL);
  5061. if (!sched_group_nodes) {
  5062. printk(KERN_WARNING "Can not alloc sched group node list\n");
  5063. return -ENOMEM;
  5064. }
  5065. sched_group_nodes_bycpu[first_cpu(*cpu_map)] = sched_group_nodes;
  5066. #endif
  5067. /*
  5068. * Set up domains for cpus specified by the cpu_map.
  5069. */
  5070. for_each_cpu_mask(i, *cpu_map) {
  5071. struct sched_domain *sd = NULL, *p;
  5072. cpumask_t nodemask = node_to_cpumask(cpu_to_node(i));
  5073. cpus_and(nodemask, nodemask, *cpu_map);
  5074. #ifdef CONFIG_NUMA
  5075. if (cpus_weight(*cpu_map) >
  5076. SD_NODES_PER_DOMAIN*cpus_weight(nodemask)) {
  5077. sd = &per_cpu(allnodes_domains, i);
  5078. *sd = SD_ALLNODES_INIT;
  5079. sd->span = *cpu_map;
  5080. cpu_to_allnodes_group(i, cpu_map, &sd->groups);
  5081. p = sd;
  5082. sd_allnodes = 1;
  5083. } else
  5084. p = NULL;
  5085. sd = &per_cpu(node_domains, i);
  5086. *sd = SD_NODE_INIT;
  5087. sd->span = sched_domain_node_span(cpu_to_node(i));
  5088. sd->parent = p;
  5089. if (p)
  5090. p->child = sd;
  5091. cpus_and(sd->span, sd->span, *cpu_map);
  5092. #endif
  5093. p = sd;
  5094. sd = &per_cpu(phys_domains, i);
  5095. *sd = SD_CPU_INIT;
  5096. sd->span = nodemask;
  5097. sd->parent = p;
  5098. if (p)
  5099. p->child = sd;
  5100. cpu_to_phys_group(i, cpu_map, &sd->groups);
  5101. #ifdef CONFIG_SCHED_MC
  5102. p = sd;
  5103. sd = &per_cpu(core_domains, i);
  5104. *sd = SD_MC_INIT;
  5105. sd->span = cpu_coregroup_map(i);
  5106. cpus_and(sd->span, sd->span, *cpu_map);
  5107. sd->parent = p;
  5108. p->child = sd;
  5109. cpu_to_core_group(i, cpu_map, &sd->groups);
  5110. #endif
  5111. #ifdef CONFIG_SCHED_SMT
  5112. p = sd;
  5113. sd = &per_cpu(cpu_domains, i);
  5114. *sd = SD_SIBLING_INIT;
  5115. sd->span = cpu_sibling_map[i];
  5116. cpus_and(sd->span, sd->span, *cpu_map);
  5117. sd->parent = p;
  5118. p->child = sd;
  5119. cpu_to_cpu_group(i, cpu_map, &sd->groups);
  5120. #endif
  5121. }
  5122. #ifdef CONFIG_SCHED_SMT
  5123. /* Set up CPU (sibling) groups */
  5124. for_each_cpu_mask(i, *cpu_map) {
  5125. cpumask_t this_sibling_map = cpu_sibling_map[i];
  5126. cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
  5127. if (i != first_cpu(this_sibling_map))
  5128. continue;
  5129. init_sched_build_groups(this_sibling_map, cpu_map,
  5130. &cpu_to_cpu_group);
  5131. }
  5132. #endif
  5133. #ifdef CONFIG_SCHED_MC
  5134. /* Set up multi-core groups */
  5135. for_each_cpu_mask(i, *cpu_map) {
  5136. cpumask_t this_core_map = cpu_coregroup_map(i);
  5137. cpus_and(this_core_map, this_core_map, *cpu_map);
  5138. if (i != first_cpu(this_core_map))
  5139. continue;
  5140. init_sched_build_groups(this_core_map, cpu_map,
  5141. &cpu_to_core_group);
  5142. }
  5143. #endif
  5144. /* Set up physical groups */
  5145. for (i = 0; i < MAX_NUMNODES; i++) {
  5146. cpumask_t nodemask = node_to_cpumask(i);
  5147. cpus_and(nodemask, nodemask, *cpu_map);
  5148. if (cpus_empty(nodemask))
  5149. continue;
  5150. init_sched_build_groups(nodemask, cpu_map, &cpu_to_phys_group);
  5151. }
  5152. #ifdef CONFIG_NUMA
  5153. /* Set up node groups */
  5154. if (sd_allnodes)
  5155. init_sched_build_groups(*cpu_map, cpu_map,
  5156. &cpu_to_allnodes_group);
  5157. for (i = 0; i < MAX_NUMNODES; i++) {
  5158. /* Set up node groups */
  5159. struct sched_group *sg, *prev;
  5160. cpumask_t nodemask = node_to_cpumask(i);
  5161. cpumask_t domainspan;
  5162. cpumask_t covered = CPU_MASK_NONE;
  5163. int j;
  5164. cpus_and(nodemask, nodemask, *cpu_map);
  5165. if (cpus_empty(nodemask)) {
  5166. sched_group_nodes[i] = NULL;
  5167. continue;
  5168. }
  5169. domainspan = sched_domain_node_span(i);
  5170. cpus_and(domainspan, domainspan, *cpu_map);
  5171. sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);
  5172. if (!sg) {
  5173. printk(KERN_WARNING "Can not alloc domain group for "
  5174. "node %d\n", i);
  5175. goto error;
  5176. }
  5177. sched_group_nodes[i] = sg;
  5178. for_each_cpu_mask(j, nodemask) {
  5179. struct sched_domain *sd;
  5180. sd = &per_cpu(node_domains, j);
  5181. sd->groups = sg;
  5182. }
  5183. sg->__cpu_power = 0;
  5184. sg->cpumask = nodemask;
  5185. sg->next = sg;
  5186. cpus_or(covered, covered, nodemask);
  5187. prev = sg;
  5188. for (j = 0; j < MAX_NUMNODES; j++) {
  5189. cpumask_t tmp, notcovered;
  5190. int n = (i + j) % MAX_NUMNODES;
  5191. cpus_complement(notcovered, covered);
  5192. cpus_and(tmp, notcovered, *cpu_map);
  5193. cpus_and(tmp, tmp, domainspan);
  5194. if (cpus_empty(tmp))
  5195. break;
  5196. nodemask = node_to_cpumask(n);
  5197. cpus_and(tmp, tmp, nodemask);
  5198. if (cpus_empty(tmp))
  5199. continue;
  5200. sg = kmalloc_node(sizeof(struct sched_group),
  5201. GFP_KERNEL, i);
  5202. if (!sg) {
  5203. printk(KERN_WARNING
  5204. "Can not alloc domain group for node %d\n", j);
  5205. goto error;
  5206. }
  5207. sg->__cpu_power = 0;
  5208. sg->cpumask = tmp;
  5209. sg->next = prev->next;
  5210. cpus_or(covered, covered, tmp);
  5211. prev->next = sg;
  5212. prev = sg;
  5213. }
  5214. }
  5215. #endif
  5216. /* Calculate CPU power for physical packages and nodes */
  5217. #ifdef CONFIG_SCHED_SMT
  5218. for_each_cpu_mask(i, *cpu_map) {
  5219. struct sched_domain *sd = &per_cpu(cpu_domains, i);
  5220. init_sched_groups_power(i, sd);
  5221. }
  5222. #endif
  5223. #ifdef CONFIG_SCHED_MC
  5224. for_each_cpu_mask(i, *cpu_map) {
  5225. struct sched_domain *sd = &per_cpu(core_domains, i);
  5226. init_sched_groups_power(i, sd);
  5227. }
  5228. #endif
  5229. for_each_cpu_mask(i, *cpu_map) {
  5230. struct sched_domain *sd = &per_cpu(phys_domains, i);
  5231. init_sched_groups_power(i, sd);
  5232. }
  5233. #ifdef CONFIG_NUMA
  5234. for (i = 0; i < MAX_NUMNODES; i++)
  5235. init_numa_sched_groups_power(sched_group_nodes[i]);
  5236. if (sd_allnodes) {
  5237. struct sched_group *sg;
  5238. cpu_to_allnodes_group(first_cpu(*cpu_map), cpu_map, &sg);
  5239. init_numa_sched_groups_power(sg);
  5240. }
  5241. #endif
  5242. /* Attach the domains */
  5243. for_each_cpu_mask(i, *cpu_map) {
  5244. struct sched_domain *sd;
  5245. #ifdef CONFIG_SCHED_SMT
  5246. sd = &per_cpu(cpu_domains, i);
  5247. #elif defined(CONFIG_SCHED_MC)
  5248. sd = &per_cpu(core_domains, i);
  5249. #else
  5250. sd = &per_cpu(phys_domains, i);
  5251. #endif
  5252. cpu_attach_domain(sd, i);
  5253. }
  5254. return 0;
  5255. #ifdef CONFIG_NUMA
  5256. error:
  5257. free_sched_groups(cpu_map);
  5258. return -ENOMEM;
  5259. #endif
  5260. }
  5261. /*
  5262. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  5263. */
  5264. static int arch_init_sched_domains(const cpumask_t *cpu_map)
  5265. {
  5266. cpumask_t cpu_default_map;
  5267. int err;
  5268. /*
  5269. * Setup mask for cpus without special case scheduling requirements.
  5270. * For now this just excludes isolated cpus, but could be used to
  5271. * exclude other special cases in the future.
  5272. */
  5273. cpus_andnot(cpu_default_map, *cpu_map, cpu_isolated_map);
  5274. err = build_sched_domains(&cpu_default_map);
  5275. return err;
  5276. }
  5277. static void arch_destroy_sched_domains(const cpumask_t *cpu_map)
  5278. {
  5279. free_sched_groups(cpu_map);
  5280. }
  5281. /*
  5282. * Detach sched domains from a group of cpus specified in cpu_map
  5283. * These cpus will now be attached to the NULL domain
  5284. */
  5285. static void detach_destroy_domains(const cpumask_t *cpu_map)
  5286. {
  5287. int i;
  5288. for_each_cpu_mask(i, *cpu_map)
  5289. cpu_attach_domain(NULL, i);
  5290. synchronize_sched();
  5291. arch_destroy_sched_domains(cpu_map);
  5292. }
  5293. /*
  5294. * Partition sched domains as specified by the cpumasks below.
  5295. * This attaches all cpus from the cpumasks to the NULL domain,
  5296. * waits for a RCU quiescent period, recalculates sched
  5297. * domain information and then attaches them back to the
  5298. * correct sched domains
  5299. * Call with hotplug lock held
  5300. */
  5301. int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
  5302. {
  5303. cpumask_t change_map;
  5304. int err = 0;
  5305. cpus_and(*partition1, *partition1, cpu_online_map);
  5306. cpus_and(*partition2, *partition2, cpu_online_map);
  5307. cpus_or(change_map, *partition1, *partition2);
  5308. /* Detach sched domains from all of the affected cpus */
  5309. detach_destroy_domains(&change_map);
  5310. if (!cpus_empty(*partition1))
  5311. err = build_sched_domains(partition1);
  5312. if (!err && !cpus_empty(*partition2))
  5313. err = build_sched_domains(partition2);
  5314. return err;
  5315. }
  5316. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  5317. int arch_reinit_sched_domains(void)
  5318. {
  5319. int err;
  5320. mutex_lock(&sched_hotcpu_mutex);
  5321. detach_destroy_domains(&cpu_online_map);
  5322. err = arch_init_sched_domains(&cpu_online_map);
  5323. mutex_unlock(&sched_hotcpu_mutex);
  5324. return err;
  5325. }
  5326. static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
  5327. {
  5328. int ret;
  5329. if (buf[0] != '0' && buf[0] != '1')
  5330. return -EINVAL;
  5331. if (smt)
  5332. sched_smt_power_savings = (buf[0] == '1');
  5333. else
  5334. sched_mc_power_savings = (buf[0] == '1');
  5335. ret = arch_reinit_sched_domains();
  5336. return ret ? ret : count;
  5337. }
  5338. int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
  5339. {
  5340. int err = 0;
  5341. #ifdef CONFIG_SCHED_SMT
  5342. if (smt_capable())
  5343. err = sysfs_create_file(&cls->kset.kobj,
  5344. &attr_sched_smt_power_savings.attr);
  5345. #endif
  5346. #ifdef CONFIG_SCHED_MC
  5347. if (!err && mc_capable())
  5348. err = sysfs_create_file(&cls->kset.kobj,
  5349. &attr_sched_mc_power_savings.attr);
  5350. #endif
  5351. return err;
  5352. }
  5353. #endif
  5354. #ifdef CONFIG_SCHED_MC
  5355. static ssize_t sched_mc_power_savings_show(struct sys_device *dev, char *page)
  5356. {
  5357. return sprintf(page, "%u\n", sched_mc_power_savings);
  5358. }
  5359. static ssize_t sched_mc_power_savings_store(struct sys_device *dev,
  5360. const char *buf, size_t count)
  5361. {
  5362. return sched_power_savings_store(buf, count, 0);
  5363. }
  5364. SYSDEV_ATTR(sched_mc_power_savings, 0644, sched_mc_power_savings_show,
  5365. sched_mc_power_savings_store);
  5366. #endif
  5367. #ifdef CONFIG_SCHED_SMT
  5368. static ssize_t sched_smt_power_savings_show(struct sys_device *dev, char *page)
  5369. {
  5370. return sprintf(page, "%u\n", sched_smt_power_savings);
  5371. }
  5372. static ssize_t sched_smt_power_savings_store(struct sys_device *dev,
  5373. const char *buf, size_t count)
  5374. {
  5375. return sched_power_savings_store(buf, count, 1);
  5376. }
  5377. SYSDEV_ATTR(sched_smt_power_savings, 0644, sched_smt_power_savings_show,
  5378. sched_smt_power_savings_store);
  5379. #endif
  5380. /*
  5381. * Force a reinitialization of the sched domains hierarchy. The domains
  5382. * and groups cannot be updated in place without racing with the balancing
  5383. * code, so we temporarily attach all running cpus to the NULL domain
  5384. * which will prevent rebalancing while the sched domains are recalculated.
  5385. */
  5386. static int update_sched_domains(struct notifier_block *nfb,
  5387. unsigned long action, void *hcpu)
  5388. {
  5389. switch (action) {
  5390. case CPU_UP_PREPARE:
  5391. case CPU_UP_PREPARE_FROZEN:
  5392. case CPU_DOWN_PREPARE:
  5393. case CPU_DOWN_PREPARE_FROZEN:
  5394. detach_destroy_domains(&cpu_online_map);
  5395. return NOTIFY_OK;
  5396. case CPU_UP_CANCELED:
  5397. case CPU_UP_CANCELED_FROZEN:
  5398. case CPU_DOWN_FAILED:
  5399. case CPU_DOWN_FAILED_FROZEN:
  5400. case CPU_ONLINE:
  5401. case CPU_ONLINE_FROZEN:
  5402. case CPU_DEAD:
  5403. case CPU_DEAD_FROZEN:
  5404. /*
  5405. * Fall through and re-initialise the domains.
  5406. */
  5407. break;
  5408. default:
  5409. return NOTIFY_DONE;
  5410. }
  5411. /* The hotplug lock is already held by cpu_up/cpu_down */
  5412. arch_init_sched_domains(&cpu_online_map);
  5413. return NOTIFY_OK;
  5414. }
  5415. void __init sched_init_smp(void)
  5416. {
  5417. cpumask_t non_isolated_cpus;
  5418. mutex_lock(&sched_hotcpu_mutex);
  5419. arch_init_sched_domains(&cpu_online_map);
  5420. cpus_andnot(non_isolated_cpus, cpu_possible_map, cpu_isolated_map);
  5421. if (cpus_empty(non_isolated_cpus))
  5422. cpu_set(smp_processor_id(), non_isolated_cpus);
  5423. mutex_unlock(&sched_hotcpu_mutex);
  5424. /* XXX: Theoretical race here - CPU may be hotplugged now */
  5425. hotcpu_notifier(update_sched_domains, 0);
  5426. /* Move init over to a non-isolated CPU */
  5427. if (set_cpus_allowed(current, non_isolated_cpus) < 0)
  5428. BUG();
  5429. sched_init_granularity();
  5430. }
  5431. #else
  5432. void __init sched_init_smp(void)
  5433. {
  5434. sched_init_granularity();
  5435. }
  5436. #endif /* CONFIG_SMP */
  5437. int in_sched_functions(unsigned long addr)
  5438. {
  5439. /* Linker adds these: start and end of __sched functions */
  5440. extern char __sched_text_start[], __sched_text_end[];
  5441. return in_lock_functions(addr) ||
  5442. (addr >= (unsigned long)__sched_text_start
  5443. && addr < (unsigned long)__sched_text_end);
  5444. }
  5445. static inline void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
  5446. {
  5447. cfs_rq->tasks_timeline = RB_ROOT;
  5448. cfs_rq->fair_clock = 1;
  5449. #ifdef CONFIG_FAIR_GROUP_SCHED
  5450. cfs_rq->rq = rq;
  5451. #endif
  5452. }
  5453. void __init sched_init(void)
  5454. {
  5455. u64 now = sched_clock();
  5456. int highest_cpu = 0;
  5457. int i, j;
  5458. /*
  5459. * Link up the scheduling class hierarchy:
  5460. */
  5461. rt_sched_class.next = &fair_sched_class;
  5462. fair_sched_class.next = &idle_sched_class;
  5463. idle_sched_class.next = NULL;
  5464. for_each_possible_cpu(i) {
  5465. struct rt_prio_array *array;
  5466. struct rq *rq;
  5467. rq = cpu_rq(i);
  5468. spin_lock_init(&rq->lock);
  5469. lockdep_set_class(&rq->lock, &rq->rq_lock_key);
  5470. rq->nr_running = 0;
  5471. rq->clock = 1;
  5472. init_cfs_rq(&rq->cfs, rq);
  5473. #ifdef CONFIG_FAIR_GROUP_SCHED
  5474. INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
  5475. list_add(&rq->cfs.leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
  5476. #endif
  5477. rq->ls.load_update_last = now;
  5478. rq->ls.load_update_start = now;
  5479. for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
  5480. rq->cpu_load[j] = 0;
  5481. #ifdef CONFIG_SMP
  5482. rq->sd = NULL;
  5483. rq->active_balance = 0;
  5484. rq->next_balance = jiffies;
  5485. rq->push_cpu = 0;
  5486. rq->cpu = i;
  5487. rq->migration_thread = NULL;
  5488. INIT_LIST_HEAD(&rq->migration_queue);
  5489. #endif
  5490. atomic_set(&rq->nr_iowait, 0);
  5491. array = &rq->rt.active;
  5492. for (j = 0; j < MAX_RT_PRIO; j++) {
  5493. INIT_LIST_HEAD(array->queue + j);
  5494. __clear_bit(j, array->bitmap);
  5495. }
  5496. highest_cpu = i;
  5497. /* delimiter for bitsearch: */
  5498. __set_bit(MAX_RT_PRIO, array->bitmap);
  5499. }
  5500. set_load_weight(&init_task);
  5501. #ifdef CONFIG_PREEMPT_NOTIFIERS
  5502. INIT_HLIST_HEAD(&init_task.preempt_notifiers);
  5503. #endif
  5504. #ifdef CONFIG_SMP
  5505. nr_cpu_ids = highest_cpu + 1;
  5506. open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
  5507. #endif
  5508. #ifdef CONFIG_RT_MUTEXES
  5509. plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
  5510. #endif
  5511. /*
  5512. * The boot idle thread does lazy MMU switching as well:
  5513. */
  5514. atomic_inc(&init_mm.mm_count);
  5515. enter_lazy_tlb(&init_mm, current);
  5516. /*
  5517. * Make us the idle thread. Technically, schedule() should not be
  5518. * called from this thread, however somewhere below it might be,
  5519. * but because we are the idle thread, we just pick up running again
  5520. * when this runqueue becomes "idle".
  5521. */
  5522. init_idle(current, smp_processor_id());
  5523. /*
  5524. * During early bootup we pretend to be a normal task:
  5525. */
  5526. current->sched_class = &fair_sched_class;
  5527. }
  5528. #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  5529. void __might_sleep(char *file, int line)
  5530. {
  5531. #ifdef in_atomic
  5532. static unsigned long prev_jiffy; /* ratelimiting */
  5533. if ((in_atomic() || irqs_disabled()) &&
  5534. system_state == SYSTEM_RUNNING && !oops_in_progress) {
  5535. if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
  5536. return;
  5537. prev_jiffy = jiffies;
  5538. printk(KERN_ERR "BUG: sleeping function called from invalid"
  5539. " context at %s:%d\n", file, line);
  5540. printk("in_atomic():%d, irqs_disabled():%d\n",
  5541. in_atomic(), irqs_disabled());
  5542. debug_show_held_locks(current);
  5543. if (irqs_disabled())
  5544. print_irqtrace_events(current);
  5545. dump_stack();
  5546. }
  5547. #endif
  5548. }
  5549. EXPORT_SYMBOL(__might_sleep);
  5550. #endif
  5551. #ifdef CONFIG_MAGIC_SYSRQ
  5552. void normalize_rt_tasks(void)
  5553. {
  5554. struct task_struct *g, *p;
  5555. unsigned long flags;
  5556. struct rq *rq;
  5557. int on_rq;
  5558. read_lock_irq(&tasklist_lock);
  5559. do_each_thread(g, p) {
  5560. p->se.fair_key = 0;
  5561. p->se.wait_runtime = 0;
  5562. p->se.wait_start_fair = 0;
  5563. p->se.wait_start = 0;
  5564. p->se.exec_start = 0;
  5565. p->se.sleep_start = 0;
  5566. p->se.sleep_start_fair = 0;
  5567. p->se.block_start = 0;
  5568. task_rq(p)->cfs.fair_clock = 0;
  5569. task_rq(p)->clock = 0;
  5570. if (!rt_task(p)) {
  5571. /*
  5572. * Renice negative nice level userspace
  5573. * tasks back to 0:
  5574. */
  5575. if (TASK_NICE(p) < 0 && p->mm)
  5576. set_user_nice(p, 0);
  5577. continue;
  5578. }
  5579. spin_lock_irqsave(&p->pi_lock, flags);
  5580. rq = __task_rq_lock(p);
  5581. #ifdef CONFIG_SMP
  5582. /*
  5583. * Do not touch the migration thread:
  5584. */
  5585. if (p == rq->migration_thread)
  5586. goto out_unlock;
  5587. #endif
  5588. on_rq = p->se.on_rq;
  5589. if (on_rq)
  5590. deactivate_task(task_rq(p), p, 0);
  5591. __setscheduler(rq, p, SCHED_NORMAL, 0);
  5592. if (on_rq) {
  5593. activate_task(task_rq(p), p, 0);
  5594. resched_task(rq->curr);
  5595. }
  5596. #ifdef CONFIG_SMP
  5597. out_unlock:
  5598. #endif
  5599. __task_rq_unlock(rq);
  5600. spin_unlock_irqrestore(&p->pi_lock, flags);
  5601. } while_each_thread(g, p);
  5602. read_unlock_irq(&tasklist_lock);
  5603. }
  5604. #endif /* CONFIG_MAGIC_SYSRQ */
  5605. #ifdef CONFIG_IA64
  5606. /*
  5607. * These functions are only useful for the IA64 MCA handling.
  5608. *
  5609. * They can only be called when the whole system has been
  5610. * stopped - every CPU needs to be quiescent, and no scheduling
  5611. * activity can take place. Using them for anything else would
  5612. * be a serious bug, and as a result, they aren't even visible
  5613. * under any other configuration.
  5614. */
  5615. /**
  5616. * curr_task - return the current task for a given cpu.
  5617. * @cpu: the processor in question.
  5618. *
  5619. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  5620. */
  5621. struct task_struct *curr_task(int cpu)
  5622. {
  5623. return cpu_curr(cpu);
  5624. }
  5625. /**
  5626. * set_curr_task - set the current task for a given cpu.
  5627. * @cpu: the processor in question.
  5628. * @p: the task pointer to set.
  5629. *
  5630. * Description: This function must only be used when non-maskable interrupts
  5631. * are serviced on a separate stack. It allows the architecture to switch the
  5632. * notion of the current task on a cpu in a non-blocking manner. This function
  5633. * must be called with all CPU's synchronized, and interrupts disabled, the
  5634. * and caller must save the original value of the current task (see
  5635. * curr_task() above) and restore that value before reenabling interrupts and
  5636. * re-starting the system.
  5637. *
  5638. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  5639. */
  5640. void set_curr_task(int cpu, struct task_struct *p)
  5641. {
  5642. cpu_curr(cpu) = p;
  5643. }
  5644. #endif