sched.c 218 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176
  1. /*
  2. * kernel/sched.c
  3. *
  4. * Kernel scheduler and related syscalls
  5. *
  6. * Copyright (C) 1991-2002 Linus Torvalds
  7. *
  8. * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and
  9. * make semaphores SMP safe
  10. * 1998-11-19 Implemented schedule_timeout() and related stuff
  11. * by Andrea Arcangeli
  12. * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar:
  13. * hybrid priority-list and round-robin design with
  14. * an array-switch method of distributing timeslices
  15. * and per-CPU runqueues. Cleanups and useful suggestions
  16. * by Davide Libenzi, preemptible kernel bits by Robert Love.
  17. * 2003-09-03 Interactivity tuning by Con Kolivas.
  18. * 2004-04-02 Scheduler domains code by Nick Piggin
  19. * 2007-04-15 Work begun on replacing all interactivity tuning with a
  20. * fair scheduling design by Con Kolivas.
  21. * 2007-05-05 Load balancing (smp-nice) and other improvements
  22. * by Peter Williams
  23. * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith
  24. * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
  25. * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
  26. * Thomas Gleixner, Mike Kravetz
  27. */
  28. #include <linux/mm.h>
  29. #include <linux/module.h>
  30. #include <linux/nmi.h>
  31. #include <linux/init.h>
  32. #include <linux/uaccess.h>
  33. #include <linux/highmem.h>
  34. #include <linux/smp_lock.h>
  35. #include <asm/mmu_context.h>
  36. #include <linux/interrupt.h>
  37. #include <linux/capability.h>
  38. #include <linux/completion.h>
  39. #include <linux/kernel_stat.h>
  40. #include <linux/debug_locks.h>
  41. #include <linux/perf_event.h>
  42. #include <linux/security.h>
  43. #include <linux/notifier.h>
  44. #include <linux/profile.h>
  45. #include <linux/freezer.h>
  46. #include <linux/vmalloc.h>
  47. #include <linux/blkdev.h>
  48. #include <linux/delay.h>
  49. #include <linux/pid_namespace.h>
  50. #include <linux/smp.h>
  51. #include <linux/threads.h>
  52. #include <linux/timer.h>
  53. #include <linux/rcupdate.h>
  54. #include <linux/cpu.h>
  55. #include <linux/cpuset.h>
  56. #include <linux/percpu.h>
  57. #include <linux/kthread.h>
  58. #include <linux/proc_fs.h>
  59. #include <linux/seq_file.h>
  60. #include <linux/sysctl.h>
  61. #include <linux/syscalls.h>
  62. #include <linux/times.h>
  63. #include <linux/tsacct_kern.h>
  64. #include <linux/kprobes.h>
  65. #include <linux/delayacct.h>
  66. #include <linux/unistd.h>
  67. #include <linux/pagemap.h>
  68. #include <linux/hrtimer.h>
  69. #include <linux/tick.h>
  70. #include <linux/debugfs.h>
  71. #include <linux/ctype.h>
  72. #include <linux/ftrace.h>
  73. #include <asm/tlb.h>
  74. #include <asm/irq_regs.h>
  75. #include "sched_cpupri.h"
  76. #define CREATE_TRACE_POINTS
  77. #include <trace/events/sched.h>
  78. /*
  79. * Convert user-nice values [ -20 ... 0 ... 19 ]
  80. * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
  81. * and back.
  82. */
  83. #define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
  84. #define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
  85. #define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
  86. /*
  87. * 'User priority' is the nice value converted to something we
  88. * can work with better when scaling various scheduler parameters,
  89. * it's a [ 0 ... 39 ] range.
  90. */
  91. #define USER_PRIO(p) ((p)-MAX_RT_PRIO)
  92. #define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
  93. #define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
  94. /*
  95. * Helpers for converting nanosecond timing to jiffy resolution
  96. */
  97. #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ))
  98. #define NICE_0_LOAD SCHED_LOAD_SCALE
  99. #define NICE_0_SHIFT SCHED_LOAD_SHIFT
  100. /*
  101. * These are the 'tuning knobs' of the scheduler:
  102. *
  103. * default timeslice is 100 msecs (used only for SCHED_RR tasks).
  104. * Timeslices get refilled after they expire.
  105. */
  106. #define DEF_TIMESLICE (100 * HZ / 1000)
  107. /*
  108. * single value that denotes runtime == period, ie unlimited time.
  109. */
  110. #define RUNTIME_INF ((u64)~0ULL)
  111. static inline int rt_policy(int policy)
  112. {
  113. if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
  114. return 1;
  115. return 0;
  116. }
  117. static inline int task_has_rt_policy(struct task_struct *p)
  118. {
  119. return rt_policy(p->policy);
  120. }
  121. /*
  122. * This is the priority-queue data structure of the RT scheduling class:
  123. */
  124. struct rt_prio_array {
  125. DECLARE_BITMAP(bitmap, MAX_RT_PRIO+1); /* include 1 bit for delimiter */
  126. struct list_head queue[MAX_RT_PRIO];
  127. };
  128. struct rt_bandwidth {
  129. /* nests inside the rq lock: */
  130. raw_spinlock_t rt_runtime_lock;
  131. ktime_t rt_period;
  132. u64 rt_runtime;
  133. struct hrtimer rt_period_timer;
  134. };
  135. static struct rt_bandwidth def_rt_bandwidth;
  136. static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
  137. static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
  138. {
  139. struct rt_bandwidth *rt_b =
  140. container_of(timer, struct rt_bandwidth, rt_period_timer);
  141. ktime_t now;
  142. int overrun;
  143. int idle = 0;
  144. for (;;) {
  145. now = hrtimer_cb_get_time(timer);
  146. overrun = hrtimer_forward(timer, now, rt_b->rt_period);
  147. if (!overrun)
  148. break;
  149. idle = do_sched_rt_period_timer(rt_b, overrun);
  150. }
  151. return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
  152. }
  153. static
  154. void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
  155. {
  156. rt_b->rt_period = ns_to_ktime(period);
  157. rt_b->rt_runtime = runtime;
  158. raw_spin_lock_init(&rt_b->rt_runtime_lock);
  159. hrtimer_init(&rt_b->rt_period_timer,
  160. CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  161. rt_b->rt_period_timer.function = sched_rt_period_timer;
  162. }
  163. static inline int rt_bandwidth_enabled(void)
  164. {
  165. return sysctl_sched_rt_runtime >= 0;
  166. }
  167. static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
  168. {
  169. ktime_t now;
  170. if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
  171. return;
  172. if (hrtimer_active(&rt_b->rt_period_timer))
  173. return;
  174. raw_spin_lock(&rt_b->rt_runtime_lock);
  175. for (;;) {
  176. unsigned long delta;
  177. ktime_t soft, hard;
  178. if (hrtimer_active(&rt_b->rt_period_timer))
  179. break;
  180. now = hrtimer_cb_get_time(&rt_b->rt_period_timer);
  181. hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period);
  182. soft = hrtimer_get_softexpires(&rt_b->rt_period_timer);
  183. hard = hrtimer_get_expires(&rt_b->rt_period_timer);
  184. delta = ktime_to_ns(ktime_sub(hard, soft));
  185. __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
  186. HRTIMER_MODE_ABS_PINNED, 0);
  187. }
  188. raw_spin_unlock(&rt_b->rt_runtime_lock);
  189. }
  190. #ifdef CONFIG_RT_GROUP_SCHED
  191. static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
  192. {
  193. hrtimer_cancel(&rt_b->rt_period_timer);
  194. }
  195. #endif
  196. /*
  197. * sched_domains_mutex serializes calls to arch_init_sched_domains,
  198. * detach_destroy_domains and partition_sched_domains.
  199. */
  200. static DEFINE_MUTEX(sched_domains_mutex);
  201. #ifdef CONFIG_CGROUP_SCHED
  202. #include <linux/cgroup.h>
  203. struct cfs_rq;
  204. static LIST_HEAD(task_groups);
  205. /* task group related information */
  206. struct task_group {
  207. struct cgroup_subsys_state css;
  208. #ifdef CONFIG_FAIR_GROUP_SCHED
  209. /* schedulable entities of this group on each cpu */
  210. struct sched_entity **se;
  211. /* runqueue "owned" by this group on each cpu */
  212. struct cfs_rq **cfs_rq;
  213. unsigned long shares;
  214. #endif
  215. #ifdef CONFIG_RT_GROUP_SCHED
  216. struct sched_rt_entity **rt_se;
  217. struct rt_rq **rt_rq;
  218. struct rt_bandwidth rt_bandwidth;
  219. #endif
  220. struct rcu_head rcu;
  221. struct list_head list;
  222. struct task_group *parent;
  223. struct list_head siblings;
  224. struct list_head children;
  225. };
  226. #define root_task_group init_task_group
  227. /* task_group_lock serializes add/remove of task groups and also changes to
  228. * a task group's cpu shares.
  229. */
  230. static DEFINE_SPINLOCK(task_group_lock);
  231. #ifdef CONFIG_FAIR_GROUP_SCHED
  232. #ifdef CONFIG_SMP
  233. static int root_task_group_empty(void)
  234. {
  235. return list_empty(&root_task_group.children);
  236. }
  237. #endif
  238. # define INIT_TASK_GROUP_LOAD NICE_0_LOAD
  239. /*
  240. * A weight of 0 or 1 can cause arithmetics problems.
  241. * A weight of a cfs_rq is the sum of weights of which entities
  242. * are queued on this cfs_rq, so a weight of a entity should not be
  243. * too large, so as the shares value of a task group.
  244. * (The default weight is 1024 - so there's no practical
  245. * limitation from this.)
  246. */
  247. #define MIN_SHARES 2
  248. #define MAX_SHARES (1UL << 18)
  249. static int init_task_group_load = INIT_TASK_GROUP_LOAD;
  250. #endif
  251. /* Default task group.
  252. * Every task in system belong to this group at bootup.
  253. */
  254. struct task_group init_task_group;
  255. /* return group to which a task belongs */
  256. static inline struct task_group *task_group(struct task_struct *p)
  257. {
  258. struct task_group *tg;
  259. #ifdef CONFIG_CGROUP_SCHED
  260. tg = container_of(task_subsys_state(p, cpu_cgroup_subsys_id),
  261. struct task_group, css);
  262. #else
  263. tg = &init_task_group;
  264. #endif
  265. return tg;
  266. }
  267. /* Change a task's cfs_rq and parent entity if it moves across CPUs/groups */
  268. static inline void set_task_rq(struct task_struct *p, unsigned int cpu)
  269. {
  270. #ifdef CONFIG_FAIR_GROUP_SCHED
  271. p->se.cfs_rq = task_group(p)->cfs_rq[cpu];
  272. p->se.parent = task_group(p)->se[cpu];
  273. #endif
  274. #ifdef CONFIG_RT_GROUP_SCHED
  275. p->rt.rt_rq = task_group(p)->rt_rq[cpu];
  276. p->rt.parent = task_group(p)->rt_se[cpu];
  277. #endif
  278. }
  279. #else
  280. static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }
  281. static inline struct task_group *task_group(struct task_struct *p)
  282. {
  283. return NULL;
  284. }
  285. #endif /* CONFIG_CGROUP_SCHED */
  286. /* CFS-related fields in a runqueue */
  287. struct cfs_rq {
  288. struct load_weight load;
  289. unsigned long nr_running;
  290. u64 exec_clock;
  291. u64 min_vruntime;
  292. struct rb_root tasks_timeline;
  293. struct rb_node *rb_leftmost;
  294. struct list_head tasks;
  295. struct list_head *balance_iterator;
  296. /*
  297. * 'curr' points to currently running entity on this cfs_rq.
  298. * It is set to NULL otherwise (i.e when none are currently running).
  299. */
  300. struct sched_entity *curr, *next, *last;
  301. unsigned int nr_spread_over;
  302. #ifdef CONFIG_FAIR_GROUP_SCHED
  303. struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
  304. /*
  305. * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
  306. * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
  307. * (like users, containers etc.)
  308. *
  309. * leaf_cfs_rq_list ties together list of leaf cfs_rq's in a cpu. This
  310. * list is used during load balance.
  311. */
  312. struct list_head leaf_cfs_rq_list;
  313. struct task_group *tg; /* group that "owns" this runqueue */
  314. #ifdef CONFIG_SMP
  315. /*
  316. * the part of load.weight contributed by tasks
  317. */
  318. unsigned long task_weight;
  319. /*
  320. * h_load = weight * f(tg)
  321. *
  322. * Where f(tg) is the recursive weight fraction assigned to
  323. * this group.
  324. */
  325. unsigned long h_load;
  326. /*
  327. * this cpu's part of tg->shares
  328. */
  329. unsigned long shares;
  330. /*
  331. * load.weight at the time we set shares
  332. */
  333. unsigned long rq_weight;
  334. #endif
  335. #endif
  336. };
  337. /* Real-Time classes' related field in a runqueue: */
  338. struct rt_rq {
  339. struct rt_prio_array active;
  340. unsigned long rt_nr_running;
  341. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  342. struct {
  343. int curr; /* highest queued rt task prio */
  344. #ifdef CONFIG_SMP
  345. int next; /* next highest */
  346. #endif
  347. } highest_prio;
  348. #endif
  349. #ifdef CONFIG_SMP
  350. unsigned long rt_nr_migratory;
  351. unsigned long rt_nr_total;
  352. int overloaded;
  353. struct plist_head pushable_tasks;
  354. #endif
  355. int rt_throttled;
  356. u64 rt_time;
  357. u64 rt_runtime;
  358. /* Nests inside the rq lock: */
  359. raw_spinlock_t rt_runtime_lock;
  360. #ifdef CONFIG_RT_GROUP_SCHED
  361. unsigned long rt_nr_boosted;
  362. struct rq *rq;
  363. struct list_head leaf_rt_rq_list;
  364. struct task_group *tg;
  365. #endif
  366. };
  367. #ifdef CONFIG_SMP
  368. /*
  369. * We add the notion of a root-domain which will be used to define per-domain
  370. * variables. Each exclusive cpuset essentially defines an island domain by
  371. * fully partitioning the member cpus from any other cpuset. Whenever a new
  372. * exclusive cpuset is created, we also create and attach a new root-domain
  373. * object.
  374. *
  375. */
  376. struct root_domain {
  377. atomic_t refcount;
  378. cpumask_var_t span;
  379. cpumask_var_t online;
  380. /*
  381. * The "RT overload" flag: it gets set if a CPU has more than
  382. * one runnable RT task.
  383. */
  384. cpumask_var_t rto_mask;
  385. atomic_t rto_count;
  386. #ifdef CONFIG_SMP
  387. struct cpupri cpupri;
  388. #endif
  389. };
  390. /*
  391. * By default the system creates a single root-domain with all cpus as
  392. * members (mimicking the global state we have today).
  393. */
  394. static struct root_domain def_root_domain;
  395. #endif
  396. /*
  397. * This is the main, per-CPU runqueue data structure.
  398. *
  399. * Locking rule: those places that want to lock multiple runqueues
  400. * (such as the load balancing or the thread migration code), lock
  401. * acquire operations must be ordered by ascending &runqueue.
  402. */
  403. struct rq {
  404. /* runqueue lock: */
  405. raw_spinlock_t lock;
  406. /*
  407. * nr_running and cpu_load should be in the same cacheline because
  408. * remote CPUs use both these fields when doing load calculation.
  409. */
  410. unsigned long nr_running;
  411. #define CPU_LOAD_IDX_MAX 5
  412. unsigned long cpu_load[CPU_LOAD_IDX_MAX];
  413. #ifdef CONFIG_NO_HZ
  414. unsigned char in_nohz_recently;
  415. #endif
  416. /* capture load from *all* tasks on this cpu: */
  417. struct load_weight load;
  418. unsigned long nr_load_updates;
  419. u64 nr_switches;
  420. struct cfs_rq cfs;
  421. struct rt_rq rt;
  422. #ifdef CONFIG_FAIR_GROUP_SCHED
  423. /* list of leaf cfs_rq on this cpu: */
  424. struct list_head leaf_cfs_rq_list;
  425. #endif
  426. #ifdef CONFIG_RT_GROUP_SCHED
  427. struct list_head leaf_rt_rq_list;
  428. #endif
  429. /*
  430. * This is part of a global counter where only the total sum
  431. * over all CPUs matters. A task can increase this counter on
  432. * one CPU and if it got migrated afterwards it may decrease
  433. * it on another CPU. Always updated under the runqueue lock:
  434. */
  435. unsigned long nr_uninterruptible;
  436. struct task_struct *curr, *idle;
  437. unsigned long next_balance;
  438. struct mm_struct *prev_mm;
  439. u64 clock;
  440. atomic_t nr_iowait;
  441. #ifdef CONFIG_SMP
  442. struct root_domain *rd;
  443. struct sched_domain *sd;
  444. unsigned char idle_at_tick;
  445. /* For active balancing */
  446. int post_schedule;
  447. int active_balance;
  448. int push_cpu;
  449. /* cpu of this runqueue: */
  450. int cpu;
  451. int online;
  452. unsigned long avg_load_per_task;
  453. struct task_struct *migration_thread;
  454. struct list_head migration_queue;
  455. u64 rt_avg;
  456. u64 age_stamp;
  457. u64 idle_stamp;
  458. u64 avg_idle;
  459. #endif
  460. /* calc_load related fields */
  461. unsigned long calc_load_update;
  462. long calc_load_active;
  463. #ifdef CONFIG_SCHED_HRTICK
  464. #ifdef CONFIG_SMP
  465. int hrtick_csd_pending;
  466. struct call_single_data hrtick_csd;
  467. #endif
  468. struct hrtimer hrtick_timer;
  469. #endif
  470. #ifdef CONFIG_SCHEDSTATS
  471. /* latency stats */
  472. struct sched_info rq_sched_info;
  473. unsigned long long rq_cpu_time;
  474. /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */
  475. /* sys_sched_yield() stats */
  476. unsigned int yld_count;
  477. /* schedule() stats */
  478. unsigned int sched_switch;
  479. unsigned int sched_count;
  480. unsigned int sched_goidle;
  481. /* try_to_wake_up() stats */
  482. unsigned int ttwu_count;
  483. unsigned int ttwu_local;
  484. /* BKL stats */
  485. unsigned int bkl_count;
  486. #endif
  487. };
  488. static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
  489. static inline
  490. void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
  491. {
  492. rq->curr->sched_class->check_preempt_curr(rq, p, flags);
  493. }
  494. static inline int cpu_of(struct rq *rq)
  495. {
  496. #ifdef CONFIG_SMP
  497. return rq->cpu;
  498. #else
  499. return 0;
  500. #endif
  501. }
  502. /*
  503. * The domain tree (rq->sd) is protected by RCU's quiescent state transition.
  504. * See detach_destroy_domains: synchronize_sched for details.
  505. *
  506. * The domain tree of any CPU may only be accessed from within
  507. * preempt-disabled sections.
  508. */
  509. #define for_each_domain(cpu, __sd) \
  510. for (__sd = rcu_dereference(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent)
  511. #define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
  512. #define this_rq() (&__get_cpu_var(runqueues))
  513. #define task_rq(p) cpu_rq(task_cpu(p))
  514. #define cpu_curr(cpu) (cpu_rq(cpu)->curr)
  515. #define raw_rq() (&__raw_get_cpu_var(runqueues))
  516. inline void update_rq_clock(struct rq *rq)
  517. {
  518. rq->clock = sched_clock_cpu(cpu_of(rq));
  519. }
  520. /*
  521. * Tunables that become constants when CONFIG_SCHED_DEBUG is off:
  522. */
  523. #ifdef CONFIG_SCHED_DEBUG
  524. # define const_debug __read_mostly
  525. #else
  526. # define const_debug static const
  527. #endif
  528. /**
  529. * runqueue_is_locked
  530. * @cpu: the processor in question.
  531. *
  532. * Returns true if the current cpu runqueue is locked.
  533. * This interface allows printk to be called with the runqueue lock
  534. * held and know whether or not it is OK to wake up the klogd.
  535. */
  536. int runqueue_is_locked(int cpu)
  537. {
  538. return raw_spin_is_locked(&cpu_rq(cpu)->lock);
  539. }
  540. /*
  541. * Debugging: various feature bits
  542. */
  543. #define SCHED_FEAT(name, enabled) \
  544. __SCHED_FEAT_##name ,
  545. enum {
  546. #include "sched_features.h"
  547. };
  548. #undef SCHED_FEAT
  549. #define SCHED_FEAT(name, enabled) \
  550. (1UL << __SCHED_FEAT_##name) * enabled |
  551. const_debug unsigned int sysctl_sched_features =
  552. #include "sched_features.h"
  553. 0;
  554. #undef SCHED_FEAT
  555. #ifdef CONFIG_SCHED_DEBUG
  556. #define SCHED_FEAT(name, enabled) \
  557. #name ,
  558. static __read_mostly char *sched_feat_names[] = {
  559. #include "sched_features.h"
  560. NULL
  561. };
  562. #undef SCHED_FEAT
  563. static int sched_feat_show(struct seq_file *m, void *v)
  564. {
  565. int i;
  566. for (i = 0; sched_feat_names[i]; i++) {
  567. if (!(sysctl_sched_features & (1UL << i)))
  568. seq_puts(m, "NO_");
  569. seq_printf(m, "%s ", sched_feat_names[i]);
  570. }
  571. seq_puts(m, "\n");
  572. return 0;
  573. }
  574. static ssize_t
  575. sched_feat_write(struct file *filp, const char __user *ubuf,
  576. size_t cnt, loff_t *ppos)
  577. {
  578. char buf[64];
  579. char *cmp = buf;
  580. int neg = 0;
  581. int i;
  582. if (cnt > 63)
  583. cnt = 63;
  584. if (copy_from_user(&buf, ubuf, cnt))
  585. return -EFAULT;
  586. buf[cnt] = 0;
  587. if (strncmp(buf, "NO_", 3) == 0) {
  588. neg = 1;
  589. cmp += 3;
  590. }
  591. for (i = 0; sched_feat_names[i]; i++) {
  592. int len = strlen(sched_feat_names[i]);
  593. if (strncmp(cmp, sched_feat_names[i], len) == 0) {
  594. if (neg)
  595. sysctl_sched_features &= ~(1UL << i);
  596. else
  597. sysctl_sched_features |= (1UL << i);
  598. break;
  599. }
  600. }
  601. if (!sched_feat_names[i])
  602. return -EINVAL;
  603. *ppos += cnt;
  604. return cnt;
  605. }
  606. static int sched_feat_open(struct inode *inode, struct file *filp)
  607. {
  608. return single_open(filp, sched_feat_show, NULL);
  609. }
  610. static const struct file_operations sched_feat_fops = {
  611. .open = sched_feat_open,
  612. .write = sched_feat_write,
  613. .read = seq_read,
  614. .llseek = seq_lseek,
  615. .release = single_release,
  616. };
  617. static __init int sched_init_debug(void)
  618. {
  619. debugfs_create_file("sched_features", 0644, NULL, NULL,
  620. &sched_feat_fops);
  621. return 0;
  622. }
  623. late_initcall(sched_init_debug);
  624. #endif
  625. #define sched_feat(x) (sysctl_sched_features & (1UL << __SCHED_FEAT_##x))
  626. /*
  627. * Number of tasks to iterate in a single balance run.
  628. * Limited because this is done with IRQs disabled.
  629. */
  630. const_debug unsigned int sysctl_sched_nr_migrate = 32;
  631. /*
  632. * ratelimit for updating the group shares.
  633. * default: 0.25ms
  634. */
  635. unsigned int sysctl_sched_shares_ratelimit = 250000;
  636. unsigned int normalized_sysctl_sched_shares_ratelimit = 250000;
  637. /*
  638. * Inject some fuzzyness into changing the per-cpu group shares
  639. * this avoids remote rq-locks at the expense of fairness.
  640. * default: 4
  641. */
  642. unsigned int sysctl_sched_shares_thresh = 4;
  643. /*
  644. * period over which we average the RT time consumption, measured
  645. * in ms.
  646. *
  647. * default: 1s
  648. */
  649. const_debug unsigned int sysctl_sched_time_avg = MSEC_PER_SEC;
  650. /*
  651. * period over which we measure -rt task cpu usage in us.
  652. * default: 1s
  653. */
  654. unsigned int sysctl_sched_rt_period = 1000000;
  655. static __read_mostly int scheduler_running;
  656. /*
  657. * part of the period that we allow rt tasks to run in us.
  658. * default: 0.95s
  659. */
  660. int sysctl_sched_rt_runtime = 950000;
  661. static inline u64 global_rt_period(void)
  662. {
  663. return (u64)sysctl_sched_rt_period * NSEC_PER_USEC;
  664. }
  665. static inline u64 global_rt_runtime(void)
  666. {
  667. if (sysctl_sched_rt_runtime < 0)
  668. return RUNTIME_INF;
  669. return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
  670. }
  671. #ifndef prepare_arch_switch
  672. # define prepare_arch_switch(next) do { } while (0)
  673. #endif
  674. #ifndef finish_arch_switch
  675. # define finish_arch_switch(prev) do { } while (0)
  676. #endif
  677. static inline int task_current(struct rq *rq, struct task_struct *p)
  678. {
  679. return rq->curr == p;
  680. }
  681. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  682. static inline int task_running(struct rq *rq, struct task_struct *p)
  683. {
  684. return task_current(rq, p);
  685. }
  686. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  687. {
  688. }
  689. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  690. {
  691. #ifdef CONFIG_DEBUG_SPINLOCK
  692. /* this is a valid case when another task releases the spinlock */
  693. rq->lock.owner = current;
  694. #endif
  695. /*
  696. * If we are tracking spinlock dependencies then we have to
  697. * fix up the runqueue lock - which gets 'carried over' from
  698. * prev into current:
  699. */
  700. spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
  701. raw_spin_unlock_irq(&rq->lock);
  702. }
  703. #else /* __ARCH_WANT_UNLOCKED_CTXSW */
  704. static inline int task_running(struct rq *rq, struct task_struct *p)
  705. {
  706. #ifdef CONFIG_SMP
  707. return p->oncpu;
  708. #else
  709. return task_current(rq, p);
  710. #endif
  711. }
  712. static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
  713. {
  714. #ifdef CONFIG_SMP
  715. /*
  716. * We can optimise this out completely for !SMP, because the
  717. * SMP rebalancing from interrupt is the only thing that cares
  718. * here.
  719. */
  720. next->oncpu = 1;
  721. #endif
  722. #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  723. raw_spin_unlock_irq(&rq->lock);
  724. #else
  725. raw_spin_unlock(&rq->lock);
  726. #endif
  727. }
  728. static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
  729. {
  730. #ifdef CONFIG_SMP
  731. /*
  732. * After ->oncpu is cleared, the task can be moved to a different CPU.
  733. * We must ensure this doesn't happen until the switch is completely
  734. * finished.
  735. */
  736. smp_wmb();
  737. prev->oncpu = 0;
  738. #endif
  739. #ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW
  740. local_irq_enable();
  741. #endif
  742. }
  743. #endif /* __ARCH_WANT_UNLOCKED_CTXSW */
  744. /*
  745. * Check whether the task is waking, we use this to synchronize against
  746. * ttwu() so that task_cpu() reports a stable number.
  747. *
  748. * We need to make an exception for PF_STARTING tasks because the fork
  749. * path might require task_rq_lock() to work, eg. it can call
  750. * set_cpus_allowed_ptr() from the cpuset clone_ns code.
  751. */
  752. static inline int task_is_waking(struct task_struct *p)
  753. {
  754. return unlikely((p->state == TASK_WAKING) && !(p->flags & PF_STARTING));
  755. }
  756. /*
  757. * __task_rq_lock - lock the runqueue a given task resides on.
  758. * Must be called interrupts disabled.
  759. */
  760. static inline struct rq *__task_rq_lock(struct task_struct *p)
  761. __acquires(rq->lock)
  762. {
  763. struct rq *rq;
  764. for (;;) {
  765. while (task_is_waking(p))
  766. cpu_relax();
  767. rq = task_rq(p);
  768. raw_spin_lock(&rq->lock);
  769. if (likely(rq == task_rq(p) && !task_is_waking(p)))
  770. return rq;
  771. raw_spin_unlock(&rq->lock);
  772. }
  773. }
  774. /*
  775. * task_rq_lock - lock the runqueue a given task resides on and disable
  776. * interrupts. Note the ordering: we can safely lookup the task_rq without
  777. * explicitly disabling preemption.
  778. */
  779. static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
  780. __acquires(rq->lock)
  781. {
  782. struct rq *rq;
  783. for (;;) {
  784. while (task_is_waking(p))
  785. cpu_relax();
  786. local_irq_save(*flags);
  787. rq = task_rq(p);
  788. raw_spin_lock(&rq->lock);
  789. if (likely(rq == task_rq(p) && !task_is_waking(p)))
  790. return rq;
  791. raw_spin_unlock_irqrestore(&rq->lock, *flags);
  792. }
  793. }
  794. void task_rq_unlock_wait(struct task_struct *p)
  795. {
  796. struct rq *rq = task_rq(p);
  797. smp_mb(); /* spin-unlock-wait is not a full memory barrier */
  798. raw_spin_unlock_wait(&rq->lock);
  799. }
  800. static void __task_rq_unlock(struct rq *rq)
  801. __releases(rq->lock)
  802. {
  803. raw_spin_unlock(&rq->lock);
  804. }
  805. static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
  806. __releases(rq->lock)
  807. {
  808. raw_spin_unlock_irqrestore(&rq->lock, *flags);
  809. }
  810. /*
  811. * this_rq_lock - lock this runqueue and disable interrupts.
  812. */
  813. static struct rq *this_rq_lock(void)
  814. __acquires(rq->lock)
  815. {
  816. struct rq *rq;
  817. local_irq_disable();
  818. rq = this_rq();
  819. raw_spin_lock(&rq->lock);
  820. return rq;
  821. }
  822. #ifdef CONFIG_SCHED_HRTICK
  823. /*
  824. * Use HR-timers to deliver accurate preemption points.
  825. *
  826. * Its all a bit involved since we cannot program an hrt while holding the
  827. * rq->lock. So what we do is store a state in in rq->hrtick_* and ask for a
  828. * reschedule event.
  829. *
  830. * When we get rescheduled we reprogram the hrtick_timer outside of the
  831. * rq->lock.
  832. */
  833. /*
  834. * Use hrtick when:
  835. * - enabled by features
  836. * - hrtimer is actually high res
  837. */
  838. static inline int hrtick_enabled(struct rq *rq)
  839. {
  840. if (!sched_feat(HRTICK))
  841. return 0;
  842. if (!cpu_active(cpu_of(rq)))
  843. return 0;
  844. return hrtimer_is_hres_active(&rq->hrtick_timer);
  845. }
  846. static void hrtick_clear(struct rq *rq)
  847. {
  848. if (hrtimer_active(&rq->hrtick_timer))
  849. hrtimer_cancel(&rq->hrtick_timer);
  850. }
  851. /*
  852. * High-resolution timer tick.
  853. * Runs from hardirq context with interrupts disabled.
  854. */
  855. static enum hrtimer_restart hrtick(struct hrtimer *timer)
  856. {
  857. struct rq *rq = container_of(timer, struct rq, hrtick_timer);
  858. WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
  859. raw_spin_lock(&rq->lock);
  860. update_rq_clock(rq);
  861. rq->curr->sched_class->task_tick(rq, rq->curr, 1);
  862. raw_spin_unlock(&rq->lock);
  863. return HRTIMER_NORESTART;
  864. }
  865. #ifdef CONFIG_SMP
  866. /*
  867. * called from hardirq (IPI) context
  868. */
  869. static void __hrtick_start(void *arg)
  870. {
  871. struct rq *rq = arg;
  872. raw_spin_lock(&rq->lock);
  873. hrtimer_restart(&rq->hrtick_timer);
  874. rq->hrtick_csd_pending = 0;
  875. raw_spin_unlock(&rq->lock);
  876. }
  877. /*
  878. * Called to set the hrtick timer state.
  879. *
  880. * called with rq->lock held and irqs disabled
  881. */
  882. static void hrtick_start(struct rq *rq, u64 delay)
  883. {
  884. struct hrtimer *timer = &rq->hrtick_timer;
  885. ktime_t time = ktime_add_ns(timer->base->get_time(), delay);
  886. hrtimer_set_expires(timer, time);
  887. if (rq == this_rq()) {
  888. hrtimer_restart(timer);
  889. } else if (!rq->hrtick_csd_pending) {
  890. __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
  891. rq->hrtick_csd_pending = 1;
  892. }
  893. }
  894. static int
  895. hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
  896. {
  897. int cpu = (int)(long)hcpu;
  898. switch (action) {
  899. case CPU_UP_CANCELED:
  900. case CPU_UP_CANCELED_FROZEN:
  901. case CPU_DOWN_PREPARE:
  902. case CPU_DOWN_PREPARE_FROZEN:
  903. case CPU_DEAD:
  904. case CPU_DEAD_FROZEN:
  905. hrtick_clear(cpu_rq(cpu));
  906. return NOTIFY_OK;
  907. }
  908. return NOTIFY_DONE;
  909. }
  910. static __init void init_hrtick(void)
  911. {
  912. hotcpu_notifier(hotplug_hrtick, 0);
  913. }
  914. #else
  915. /*
  916. * Called to set the hrtick timer state.
  917. *
  918. * called with rq->lock held and irqs disabled
  919. */
  920. static void hrtick_start(struct rq *rq, u64 delay)
  921. {
  922. __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
  923. HRTIMER_MODE_REL_PINNED, 0);
  924. }
  925. static inline void init_hrtick(void)
  926. {
  927. }
  928. #endif /* CONFIG_SMP */
  929. static void init_rq_hrtick(struct rq *rq)
  930. {
  931. #ifdef CONFIG_SMP
  932. rq->hrtick_csd_pending = 0;
  933. rq->hrtick_csd.flags = 0;
  934. rq->hrtick_csd.func = __hrtick_start;
  935. rq->hrtick_csd.info = rq;
  936. #endif
  937. hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  938. rq->hrtick_timer.function = hrtick;
  939. }
  940. #else /* CONFIG_SCHED_HRTICK */
  941. static inline void hrtick_clear(struct rq *rq)
  942. {
  943. }
  944. static inline void init_rq_hrtick(struct rq *rq)
  945. {
  946. }
  947. static inline void init_hrtick(void)
  948. {
  949. }
  950. #endif /* CONFIG_SCHED_HRTICK */
  951. /*
  952. * resched_task - mark a task 'to be rescheduled now'.
  953. *
  954. * On UP this means the setting of the need_resched flag, on SMP it
  955. * might also involve a cross-CPU call to trigger the scheduler on
  956. * the target CPU.
  957. */
  958. #ifdef CONFIG_SMP
  959. #ifndef tsk_is_polling
  960. #define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG)
  961. #endif
  962. static void resched_task(struct task_struct *p)
  963. {
  964. int cpu;
  965. assert_raw_spin_locked(&task_rq(p)->lock);
  966. if (test_tsk_need_resched(p))
  967. return;
  968. set_tsk_need_resched(p);
  969. cpu = task_cpu(p);
  970. if (cpu == smp_processor_id())
  971. return;
  972. /* NEED_RESCHED must be visible before we test polling */
  973. smp_mb();
  974. if (!tsk_is_polling(p))
  975. smp_send_reschedule(cpu);
  976. }
  977. static void resched_cpu(int cpu)
  978. {
  979. struct rq *rq = cpu_rq(cpu);
  980. unsigned long flags;
  981. if (!raw_spin_trylock_irqsave(&rq->lock, flags))
  982. return;
  983. resched_task(cpu_curr(cpu));
  984. raw_spin_unlock_irqrestore(&rq->lock, flags);
  985. }
  986. #ifdef CONFIG_NO_HZ
  987. /*
  988. * When add_timer_on() enqueues a timer into the timer wheel of an
  989. * idle CPU then this timer might expire before the next timer event
  990. * which is scheduled to wake up that CPU. In case of a completely
  991. * idle system the next event might even be infinite time into the
  992. * future. wake_up_idle_cpu() ensures that the CPU is woken up and
  993. * leaves the inner idle loop so the newly added timer is taken into
  994. * account when the CPU goes back to idle and evaluates the timer
  995. * wheel for the next timer event.
  996. */
  997. void wake_up_idle_cpu(int cpu)
  998. {
  999. struct rq *rq = cpu_rq(cpu);
  1000. if (cpu == smp_processor_id())
  1001. return;
  1002. /*
  1003. * This is safe, as this function is called with the timer
  1004. * wheel base lock of (cpu) held. When the CPU is on the way
  1005. * to idle and has not yet set rq->curr to idle then it will
  1006. * be serialized on the timer wheel base lock and take the new
  1007. * timer into account automatically.
  1008. */
  1009. if (rq->curr != rq->idle)
  1010. return;
  1011. /*
  1012. * We can set TIF_RESCHED on the idle task of the other CPU
  1013. * lockless. The worst case is that the other CPU runs the
  1014. * idle task through an additional NOOP schedule()
  1015. */
  1016. set_tsk_need_resched(rq->idle);
  1017. /* NEED_RESCHED must be visible before we test polling */
  1018. smp_mb();
  1019. if (!tsk_is_polling(rq->idle))
  1020. smp_send_reschedule(cpu);
  1021. }
  1022. #endif /* CONFIG_NO_HZ */
  1023. static u64 sched_avg_period(void)
  1024. {
  1025. return (u64)sysctl_sched_time_avg * NSEC_PER_MSEC / 2;
  1026. }
  1027. static void sched_avg_update(struct rq *rq)
  1028. {
  1029. s64 period = sched_avg_period();
  1030. while ((s64)(rq->clock - rq->age_stamp) > period) {
  1031. rq->age_stamp += period;
  1032. rq->rt_avg /= 2;
  1033. }
  1034. }
  1035. static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  1036. {
  1037. rq->rt_avg += rt_delta;
  1038. sched_avg_update(rq);
  1039. }
  1040. #else /* !CONFIG_SMP */
  1041. static void resched_task(struct task_struct *p)
  1042. {
  1043. assert_raw_spin_locked(&task_rq(p)->lock);
  1044. set_tsk_need_resched(p);
  1045. }
  1046. static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
  1047. {
  1048. }
  1049. #endif /* CONFIG_SMP */
  1050. #if BITS_PER_LONG == 32
  1051. # define WMULT_CONST (~0UL)
  1052. #else
  1053. # define WMULT_CONST (1UL << 32)
  1054. #endif
  1055. #define WMULT_SHIFT 32
  1056. /*
  1057. * Shift right and round:
  1058. */
  1059. #define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
  1060. /*
  1061. * delta *= weight / lw
  1062. */
  1063. static unsigned long
  1064. calc_delta_mine(unsigned long delta_exec, unsigned long weight,
  1065. struct load_weight *lw)
  1066. {
  1067. u64 tmp;
  1068. if (!lw->inv_weight) {
  1069. if (BITS_PER_LONG > 32 && unlikely(lw->weight >= WMULT_CONST))
  1070. lw->inv_weight = 1;
  1071. else
  1072. lw->inv_weight = 1 + (WMULT_CONST-lw->weight/2)
  1073. / (lw->weight+1);
  1074. }
  1075. tmp = (u64)delta_exec * weight;
  1076. /*
  1077. * Check whether we'd overflow the 64-bit multiplication:
  1078. */
  1079. if (unlikely(tmp > WMULT_CONST))
  1080. tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
  1081. WMULT_SHIFT/2);
  1082. else
  1083. tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
  1084. return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
  1085. }
  1086. static inline void update_load_add(struct load_weight *lw, unsigned long inc)
  1087. {
  1088. lw->weight += inc;
  1089. lw->inv_weight = 0;
  1090. }
  1091. static inline void update_load_sub(struct load_weight *lw, unsigned long dec)
  1092. {
  1093. lw->weight -= dec;
  1094. lw->inv_weight = 0;
  1095. }
  1096. /*
  1097. * To aid in avoiding the subversion of "niceness" due to uneven distribution
  1098. * of tasks with abnormal "nice" values across CPUs the contribution that
  1099. * each task makes to its run queue's load is weighted according to its
  1100. * scheduling class and "nice" value. For SCHED_NORMAL tasks this is just a
  1101. * scaled version of the new time slice allocation that they receive on time
  1102. * slice expiry etc.
  1103. */
  1104. #define WEIGHT_IDLEPRIO 3
  1105. #define WMULT_IDLEPRIO 1431655765
  1106. /*
  1107. * Nice levels are multiplicative, with a gentle 10% change for every
  1108. * nice level changed. I.e. when a CPU-bound task goes from nice 0 to
  1109. * nice 1, it will get ~10% less CPU time than another CPU-bound task
  1110. * that remained on nice 0.
  1111. *
  1112. * The "10% effect" is relative and cumulative: from _any_ nice level,
  1113. * if you go up 1 level, it's -10% CPU usage, if you go down 1 level
  1114. * it's +10% CPU usage. (to achieve that we use a multiplier of 1.25.
  1115. * If a task goes up by ~10% and another task goes down by ~10% then
  1116. * the relative distance between them is ~25%.)
  1117. */
  1118. static const int prio_to_weight[40] = {
  1119. /* -20 */ 88761, 71755, 56483, 46273, 36291,
  1120. /* -15 */ 29154, 23254, 18705, 14949, 11916,
  1121. /* -10 */ 9548, 7620, 6100, 4904, 3906,
  1122. /* -5 */ 3121, 2501, 1991, 1586, 1277,
  1123. /* 0 */ 1024, 820, 655, 526, 423,
  1124. /* 5 */ 335, 272, 215, 172, 137,
  1125. /* 10 */ 110, 87, 70, 56, 45,
  1126. /* 15 */ 36, 29, 23, 18, 15,
  1127. };
  1128. /*
  1129. * Inverse (2^32/x) values of the prio_to_weight[] array, precalculated.
  1130. *
  1131. * In cases where the weight does not change often, we can use the
  1132. * precalculated inverse to speed up arithmetics by turning divisions
  1133. * into multiplications:
  1134. */
  1135. static const u32 prio_to_wmult[40] = {
  1136. /* -20 */ 48388, 59856, 76040, 92818, 118348,
  1137. /* -15 */ 147320, 184698, 229616, 287308, 360437,
  1138. /* -10 */ 449829, 563644, 704093, 875809, 1099582,
  1139. /* -5 */ 1376151, 1717300, 2157191, 2708050, 3363326,
  1140. /* 0 */ 4194304, 5237765, 6557202, 8165337, 10153587,
  1141. /* 5 */ 12820798, 15790321, 19976592, 24970740, 31350126,
  1142. /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
  1143. /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
  1144. };
  1145. /* Time spent by the tasks of the cpu accounting group executing in ... */
  1146. enum cpuacct_stat_index {
  1147. CPUACCT_STAT_USER, /* ... user mode */
  1148. CPUACCT_STAT_SYSTEM, /* ... kernel mode */
  1149. CPUACCT_STAT_NSTATS,
  1150. };
  1151. #ifdef CONFIG_CGROUP_CPUACCT
  1152. static void cpuacct_charge(struct task_struct *tsk, u64 cputime);
  1153. static void cpuacct_update_stats(struct task_struct *tsk,
  1154. enum cpuacct_stat_index idx, cputime_t val);
  1155. #else
  1156. static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
  1157. static inline void cpuacct_update_stats(struct task_struct *tsk,
  1158. enum cpuacct_stat_index idx, cputime_t val) {}
  1159. #endif
  1160. static inline void inc_cpu_load(struct rq *rq, unsigned long load)
  1161. {
  1162. update_load_add(&rq->load, load);
  1163. }
  1164. static inline void dec_cpu_load(struct rq *rq, unsigned long load)
  1165. {
  1166. update_load_sub(&rq->load, load);
  1167. }
  1168. #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
  1169. typedef int (*tg_visitor)(struct task_group *, void *);
  1170. /*
  1171. * Iterate the full tree, calling @down when first entering a node and @up when
  1172. * leaving it for the final time.
  1173. */
  1174. static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
  1175. {
  1176. struct task_group *parent, *child;
  1177. int ret;
  1178. rcu_read_lock();
  1179. parent = &root_task_group;
  1180. down:
  1181. ret = (*down)(parent, data);
  1182. if (ret)
  1183. goto out_unlock;
  1184. list_for_each_entry_rcu(child, &parent->children, siblings) {
  1185. parent = child;
  1186. goto down;
  1187. up:
  1188. continue;
  1189. }
  1190. ret = (*up)(parent, data);
  1191. if (ret)
  1192. goto out_unlock;
  1193. child = parent;
  1194. parent = parent->parent;
  1195. if (parent)
  1196. goto up;
  1197. out_unlock:
  1198. rcu_read_unlock();
  1199. return ret;
  1200. }
  1201. static int tg_nop(struct task_group *tg, void *data)
  1202. {
  1203. return 0;
  1204. }
  1205. #endif
  1206. #ifdef CONFIG_SMP
  1207. /* Used instead of source_load when we know the type == 0 */
  1208. static unsigned long weighted_cpuload(const int cpu)
  1209. {
  1210. return cpu_rq(cpu)->load.weight;
  1211. }
  1212. /*
  1213. * Return a low guess at the load of a migration-source cpu weighted
  1214. * according to the scheduling class and "nice" value.
  1215. *
  1216. * We want to under-estimate the load of migration sources, to
  1217. * balance conservatively.
  1218. */
  1219. static unsigned long source_load(int cpu, int type)
  1220. {
  1221. struct rq *rq = cpu_rq(cpu);
  1222. unsigned long total = weighted_cpuload(cpu);
  1223. if (type == 0 || !sched_feat(LB_BIAS))
  1224. return total;
  1225. return min(rq->cpu_load[type-1], total);
  1226. }
  1227. /*
  1228. * Return a high guess at the load of a migration-target cpu weighted
  1229. * according to the scheduling class and "nice" value.
  1230. */
  1231. static unsigned long target_load(int cpu, int type)
  1232. {
  1233. struct rq *rq = cpu_rq(cpu);
  1234. unsigned long total = weighted_cpuload(cpu);
  1235. if (type == 0 || !sched_feat(LB_BIAS))
  1236. return total;
  1237. return max(rq->cpu_load[type-1], total);
  1238. }
  1239. static struct sched_group *group_of(int cpu)
  1240. {
  1241. struct sched_domain *sd = rcu_dereference(cpu_rq(cpu)->sd);
  1242. if (!sd)
  1243. return NULL;
  1244. return sd->groups;
  1245. }
  1246. static unsigned long power_of(int cpu)
  1247. {
  1248. struct sched_group *group = group_of(cpu);
  1249. if (!group)
  1250. return SCHED_LOAD_SCALE;
  1251. return group->cpu_power;
  1252. }
  1253. static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
  1254. static unsigned long cpu_avg_load_per_task(int cpu)
  1255. {
  1256. struct rq *rq = cpu_rq(cpu);
  1257. unsigned long nr_running = ACCESS_ONCE(rq->nr_running);
  1258. if (nr_running)
  1259. rq->avg_load_per_task = rq->load.weight / nr_running;
  1260. else
  1261. rq->avg_load_per_task = 0;
  1262. return rq->avg_load_per_task;
  1263. }
  1264. #ifdef CONFIG_FAIR_GROUP_SCHED
  1265. static __read_mostly unsigned long *update_shares_data;
  1266. static void __set_se_shares(struct sched_entity *se, unsigned long shares);
  1267. /*
  1268. * Calculate and set the cpu's group shares.
  1269. */
  1270. static void update_group_shares_cpu(struct task_group *tg, int cpu,
  1271. unsigned long sd_shares,
  1272. unsigned long sd_rq_weight,
  1273. unsigned long *usd_rq_weight)
  1274. {
  1275. unsigned long shares, rq_weight;
  1276. int boost = 0;
  1277. rq_weight = usd_rq_weight[cpu];
  1278. if (!rq_weight) {
  1279. boost = 1;
  1280. rq_weight = NICE_0_LOAD;
  1281. }
  1282. /*
  1283. * \Sum_j shares_j * rq_weight_i
  1284. * shares_i = -----------------------------
  1285. * \Sum_j rq_weight_j
  1286. */
  1287. shares = (sd_shares * rq_weight) / sd_rq_weight;
  1288. shares = clamp_t(unsigned long, shares, MIN_SHARES, MAX_SHARES);
  1289. if (abs(shares - tg->se[cpu]->load.weight) >
  1290. sysctl_sched_shares_thresh) {
  1291. struct rq *rq = cpu_rq(cpu);
  1292. unsigned long flags;
  1293. raw_spin_lock_irqsave(&rq->lock, flags);
  1294. tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
  1295. tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
  1296. __set_se_shares(tg->se[cpu], shares);
  1297. raw_spin_unlock_irqrestore(&rq->lock, flags);
  1298. }
  1299. }
  1300. /*
  1301. * Re-compute the task group their per cpu shares over the given domain.
  1302. * This needs to be done in a bottom-up fashion because the rq weight of a
  1303. * parent group depends on the shares of its child groups.
  1304. */
  1305. static int tg_shares_up(struct task_group *tg, void *data)
  1306. {
  1307. unsigned long weight, rq_weight = 0, sum_weight = 0, shares = 0;
  1308. unsigned long *usd_rq_weight;
  1309. struct sched_domain *sd = data;
  1310. unsigned long flags;
  1311. int i;
  1312. if (!tg->se[0])
  1313. return 0;
  1314. local_irq_save(flags);
  1315. usd_rq_weight = per_cpu_ptr(update_shares_data, smp_processor_id());
  1316. for_each_cpu(i, sched_domain_span(sd)) {
  1317. weight = tg->cfs_rq[i]->load.weight;
  1318. usd_rq_weight[i] = weight;
  1319. rq_weight += weight;
  1320. /*
  1321. * If there are currently no tasks on the cpu pretend there
  1322. * is one of average load so that when a new task gets to
  1323. * run here it will not get delayed by group starvation.
  1324. */
  1325. if (!weight)
  1326. weight = NICE_0_LOAD;
  1327. sum_weight += weight;
  1328. shares += tg->cfs_rq[i]->shares;
  1329. }
  1330. if (!rq_weight)
  1331. rq_weight = sum_weight;
  1332. if ((!shares && rq_weight) || shares > tg->shares)
  1333. shares = tg->shares;
  1334. if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
  1335. shares = tg->shares;
  1336. for_each_cpu(i, sched_domain_span(sd))
  1337. update_group_shares_cpu(tg, i, shares, rq_weight, usd_rq_weight);
  1338. local_irq_restore(flags);
  1339. return 0;
  1340. }
  1341. /*
  1342. * Compute the cpu's hierarchical load factor for each task group.
  1343. * This needs to be done in a top-down fashion because the load of a child
  1344. * group is a fraction of its parents load.
  1345. */
  1346. static int tg_load_down(struct task_group *tg, void *data)
  1347. {
  1348. unsigned long load;
  1349. long cpu = (long)data;
  1350. if (!tg->parent) {
  1351. load = cpu_rq(cpu)->load.weight;
  1352. } else {
  1353. load = tg->parent->cfs_rq[cpu]->h_load;
  1354. load *= tg->cfs_rq[cpu]->shares;
  1355. load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
  1356. }
  1357. tg->cfs_rq[cpu]->h_load = load;
  1358. return 0;
  1359. }
  1360. static void update_shares(struct sched_domain *sd)
  1361. {
  1362. s64 elapsed;
  1363. u64 now;
  1364. if (root_task_group_empty())
  1365. return;
  1366. now = cpu_clock(raw_smp_processor_id());
  1367. elapsed = now - sd->last_update;
  1368. if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
  1369. sd->last_update = now;
  1370. walk_tg_tree(tg_nop, tg_shares_up, sd);
  1371. }
  1372. }
  1373. static void update_h_load(long cpu)
  1374. {
  1375. if (root_task_group_empty())
  1376. return;
  1377. walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
  1378. }
  1379. #else
  1380. static inline void update_shares(struct sched_domain *sd)
  1381. {
  1382. }
  1383. #endif
  1384. #ifdef CONFIG_PREEMPT
  1385. static void double_rq_lock(struct rq *rq1, struct rq *rq2);
  1386. /*
  1387. * fair double_lock_balance: Safely acquires both rq->locks in a fair
  1388. * way at the expense of forcing extra atomic operations in all
  1389. * invocations. This assures that the double_lock is acquired using the
  1390. * same underlying policy as the spinlock_t on this architecture, which
  1391. * reduces latency compared to the unfair variant below. However, it
  1392. * also adds more overhead and therefore may reduce throughput.
  1393. */
  1394. static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1395. __releases(this_rq->lock)
  1396. __acquires(busiest->lock)
  1397. __acquires(this_rq->lock)
  1398. {
  1399. raw_spin_unlock(&this_rq->lock);
  1400. double_rq_lock(this_rq, busiest);
  1401. return 1;
  1402. }
  1403. #else
  1404. /*
  1405. * Unfair double_lock_balance: Optimizes throughput at the expense of
  1406. * latency by eliminating extra atomic operations when the locks are
  1407. * already in proper order on entry. This favors lower cpu-ids and will
  1408. * grant the double lock to lower cpus over higher ids under contention,
  1409. * regardless of entry order into the function.
  1410. */
  1411. static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1412. __releases(this_rq->lock)
  1413. __acquires(busiest->lock)
  1414. __acquires(this_rq->lock)
  1415. {
  1416. int ret = 0;
  1417. if (unlikely(!raw_spin_trylock(&busiest->lock))) {
  1418. if (busiest < this_rq) {
  1419. raw_spin_unlock(&this_rq->lock);
  1420. raw_spin_lock(&busiest->lock);
  1421. raw_spin_lock_nested(&this_rq->lock,
  1422. SINGLE_DEPTH_NESTING);
  1423. ret = 1;
  1424. } else
  1425. raw_spin_lock_nested(&busiest->lock,
  1426. SINGLE_DEPTH_NESTING);
  1427. }
  1428. return ret;
  1429. }
  1430. #endif /* CONFIG_PREEMPT */
  1431. /*
  1432. * double_lock_balance - lock the busiest runqueue, this_rq is locked already.
  1433. */
  1434. static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
  1435. {
  1436. if (unlikely(!irqs_disabled())) {
  1437. /* printk() doesn't work good under rq->lock */
  1438. raw_spin_unlock(&this_rq->lock);
  1439. BUG_ON(1);
  1440. }
  1441. return _double_lock_balance(this_rq, busiest);
  1442. }
  1443. static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
  1444. __releases(busiest->lock)
  1445. {
  1446. raw_spin_unlock(&busiest->lock);
  1447. lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
  1448. }
  1449. /*
  1450. * double_rq_lock - safely lock two runqueues
  1451. *
  1452. * Note this does not disable interrupts like task_rq_lock,
  1453. * you need to do so manually before calling.
  1454. */
  1455. static void double_rq_lock(struct rq *rq1, struct rq *rq2)
  1456. __acquires(rq1->lock)
  1457. __acquires(rq2->lock)
  1458. {
  1459. BUG_ON(!irqs_disabled());
  1460. if (rq1 == rq2) {
  1461. raw_spin_lock(&rq1->lock);
  1462. __acquire(rq2->lock); /* Fake it out ;) */
  1463. } else {
  1464. if (rq1 < rq2) {
  1465. raw_spin_lock(&rq1->lock);
  1466. raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
  1467. } else {
  1468. raw_spin_lock(&rq2->lock);
  1469. raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
  1470. }
  1471. }
  1472. update_rq_clock(rq1);
  1473. update_rq_clock(rq2);
  1474. }
  1475. /*
  1476. * double_rq_unlock - safely unlock two runqueues
  1477. *
  1478. * Note this does not restore interrupts like task_rq_unlock,
  1479. * you need to do so manually after calling.
  1480. */
  1481. static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
  1482. __releases(rq1->lock)
  1483. __releases(rq2->lock)
  1484. {
  1485. raw_spin_unlock(&rq1->lock);
  1486. if (rq1 != rq2)
  1487. raw_spin_unlock(&rq2->lock);
  1488. else
  1489. __release(rq2->lock);
  1490. }
  1491. #endif
  1492. #ifdef CONFIG_FAIR_GROUP_SCHED
  1493. static void cfs_rq_set_shares(struct cfs_rq *cfs_rq, unsigned long shares)
  1494. {
  1495. #ifdef CONFIG_SMP
  1496. cfs_rq->shares = shares;
  1497. #endif
  1498. }
  1499. #endif
  1500. static void calc_load_account_active(struct rq *this_rq);
  1501. static void update_sysctl(void);
  1502. static int get_update_sysctl_factor(void);
  1503. static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
  1504. {
  1505. set_task_rq(p, cpu);
  1506. #ifdef CONFIG_SMP
  1507. /*
  1508. * After ->cpu is set up to a new value, task_rq_lock(p, ...) can be
  1509. * successfuly executed on another CPU. We must ensure that updates of
  1510. * per-task data have been completed by this moment.
  1511. */
  1512. smp_wmb();
  1513. task_thread_info(p)->cpu = cpu;
  1514. #endif
  1515. }
  1516. static const struct sched_class rt_sched_class;
  1517. #define sched_class_highest (&rt_sched_class)
  1518. #define for_each_class(class) \
  1519. for (class = sched_class_highest; class; class = class->next)
  1520. #include "sched_stats.h"
  1521. static void inc_nr_running(struct rq *rq)
  1522. {
  1523. rq->nr_running++;
  1524. }
  1525. static void dec_nr_running(struct rq *rq)
  1526. {
  1527. rq->nr_running--;
  1528. }
  1529. static void set_load_weight(struct task_struct *p)
  1530. {
  1531. if (task_has_rt_policy(p)) {
  1532. p->se.load.weight = prio_to_weight[0] * 2;
  1533. p->se.load.inv_weight = prio_to_wmult[0] >> 1;
  1534. return;
  1535. }
  1536. /*
  1537. * SCHED_IDLE tasks get minimal weight:
  1538. */
  1539. if (p->policy == SCHED_IDLE) {
  1540. p->se.load.weight = WEIGHT_IDLEPRIO;
  1541. p->se.load.inv_weight = WMULT_IDLEPRIO;
  1542. return;
  1543. }
  1544. p->se.load.weight = prio_to_weight[p->static_prio - MAX_RT_PRIO];
  1545. p->se.load.inv_weight = prio_to_wmult[p->static_prio - MAX_RT_PRIO];
  1546. }
  1547. static void update_avg(u64 *avg, u64 sample)
  1548. {
  1549. s64 diff = sample - *avg;
  1550. *avg += diff >> 3;
  1551. }
  1552. static void
  1553. enqueue_task(struct rq *rq, struct task_struct *p, int wakeup, bool head)
  1554. {
  1555. if (wakeup)
  1556. p->se.start_runtime = p->se.sum_exec_runtime;
  1557. sched_info_queued(p);
  1558. p->sched_class->enqueue_task(rq, p, wakeup, head);
  1559. p->se.on_rq = 1;
  1560. }
  1561. static void dequeue_task(struct rq *rq, struct task_struct *p, int sleep)
  1562. {
  1563. if (sleep) {
  1564. if (p->se.last_wakeup) {
  1565. update_avg(&p->se.avg_overlap,
  1566. p->se.sum_exec_runtime - p->se.last_wakeup);
  1567. p->se.last_wakeup = 0;
  1568. } else {
  1569. update_avg(&p->se.avg_wakeup,
  1570. sysctl_sched_wakeup_granularity);
  1571. }
  1572. }
  1573. sched_info_dequeued(p);
  1574. p->sched_class->dequeue_task(rq, p, sleep);
  1575. p->se.on_rq = 0;
  1576. }
  1577. /*
  1578. * activate_task - move a task to the runqueue.
  1579. */
  1580. static void activate_task(struct rq *rq, struct task_struct *p, int wakeup)
  1581. {
  1582. if (task_contributes_to_load(p))
  1583. rq->nr_uninterruptible--;
  1584. enqueue_task(rq, p, wakeup, false);
  1585. inc_nr_running(rq);
  1586. }
  1587. /*
  1588. * deactivate_task - remove a task from the runqueue.
  1589. */
  1590. static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep)
  1591. {
  1592. if (task_contributes_to_load(p))
  1593. rq->nr_uninterruptible++;
  1594. dequeue_task(rq, p, sleep);
  1595. dec_nr_running(rq);
  1596. }
  1597. #include "sched_idletask.c"
  1598. #include "sched_fair.c"
  1599. #include "sched_rt.c"
  1600. #ifdef CONFIG_SCHED_DEBUG
  1601. # include "sched_debug.c"
  1602. #endif
  1603. /*
  1604. * __normal_prio - return the priority that is based on the static prio
  1605. */
  1606. static inline int __normal_prio(struct task_struct *p)
  1607. {
  1608. return p->static_prio;
  1609. }
  1610. /*
  1611. * Calculate the expected normal priority: i.e. priority
  1612. * without taking RT-inheritance into account. Might be
  1613. * boosted by interactivity modifiers. Changes upon fork,
  1614. * setprio syscalls, and whenever the interactivity
  1615. * estimator recalculates.
  1616. */
  1617. static inline int normal_prio(struct task_struct *p)
  1618. {
  1619. int prio;
  1620. if (task_has_rt_policy(p))
  1621. prio = MAX_RT_PRIO-1 - p->rt_priority;
  1622. else
  1623. prio = __normal_prio(p);
  1624. return prio;
  1625. }
  1626. /*
  1627. * Calculate the current priority, i.e. the priority
  1628. * taken into account by the scheduler. This value might
  1629. * be boosted by RT tasks, or might be boosted by
  1630. * interactivity modifiers. Will be RT if the task got
  1631. * RT-boosted. If not then it returns p->normal_prio.
  1632. */
  1633. static int effective_prio(struct task_struct *p)
  1634. {
  1635. p->normal_prio = normal_prio(p);
  1636. /*
  1637. * If we are RT tasks or we were boosted to RT priority,
  1638. * keep the priority unchanged. Otherwise, update priority
  1639. * to the normal priority:
  1640. */
  1641. if (!rt_prio(p->prio))
  1642. return p->normal_prio;
  1643. return p->prio;
  1644. }
  1645. /**
  1646. * task_curr - is this task currently executing on a CPU?
  1647. * @p: the task in question.
  1648. */
  1649. inline int task_curr(const struct task_struct *p)
  1650. {
  1651. return cpu_curr(task_cpu(p)) == p;
  1652. }
  1653. static inline void check_class_changed(struct rq *rq, struct task_struct *p,
  1654. const struct sched_class *prev_class,
  1655. int oldprio, int running)
  1656. {
  1657. if (prev_class != p->sched_class) {
  1658. if (prev_class->switched_from)
  1659. prev_class->switched_from(rq, p, running);
  1660. p->sched_class->switched_to(rq, p, running);
  1661. } else
  1662. p->sched_class->prio_changed(rq, p, oldprio, running);
  1663. }
  1664. #ifdef CONFIG_SMP
  1665. /*
  1666. * Is this task likely cache-hot:
  1667. */
  1668. static int
  1669. task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
  1670. {
  1671. s64 delta;
  1672. if (p->sched_class != &fair_sched_class)
  1673. return 0;
  1674. /*
  1675. * Buddy candidates are cache hot:
  1676. */
  1677. if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
  1678. (&p->se == cfs_rq_of(&p->se)->next ||
  1679. &p->se == cfs_rq_of(&p->se)->last))
  1680. return 1;
  1681. if (sysctl_sched_migration_cost == -1)
  1682. return 1;
  1683. if (sysctl_sched_migration_cost == 0)
  1684. return 0;
  1685. delta = now - p->se.exec_start;
  1686. return delta < (s64)sysctl_sched_migration_cost;
  1687. }
  1688. void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  1689. {
  1690. #ifdef CONFIG_SCHED_DEBUG
  1691. /*
  1692. * We should never call set_task_cpu() on a blocked task,
  1693. * ttwu() will sort out the placement.
  1694. */
  1695. WARN_ON_ONCE(p->state != TASK_RUNNING && p->state != TASK_WAKING &&
  1696. !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
  1697. #endif
  1698. trace_sched_migrate_task(p, new_cpu);
  1699. if (task_cpu(p) != new_cpu) {
  1700. p->se.nr_migrations++;
  1701. perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
  1702. }
  1703. __set_task_cpu(p, new_cpu);
  1704. }
  1705. struct migration_req {
  1706. struct list_head list;
  1707. struct task_struct *task;
  1708. int dest_cpu;
  1709. struct completion done;
  1710. };
  1711. /*
  1712. * The task's runqueue lock must be held.
  1713. * Returns true if you have to wait for migration thread.
  1714. */
  1715. static int
  1716. migrate_task(struct task_struct *p, int dest_cpu, struct migration_req *req)
  1717. {
  1718. struct rq *rq = task_rq(p);
  1719. /*
  1720. * If the task is not on a runqueue (and not running), then
  1721. * the next wake-up will properly place the task.
  1722. */
  1723. if (!p->se.on_rq && !task_running(rq, p))
  1724. return 0;
  1725. init_completion(&req->done);
  1726. req->task = p;
  1727. req->dest_cpu = dest_cpu;
  1728. list_add(&req->list, &rq->migration_queue);
  1729. return 1;
  1730. }
  1731. /*
  1732. * wait_task_context_switch - wait for a thread to complete at least one
  1733. * context switch.
  1734. *
  1735. * @p must not be current.
  1736. */
  1737. void wait_task_context_switch(struct task_struct *p)
  1738. {
  1739. unsigned long nvcsw, nivcsw, flags;
  1740. int running;
  1741. struct rq *rq;
  1742. nvcsw = p->nvcsw;
  1743. nivcsw = p->nivcsw;
  1744. for (;;) {
  1745. /*
  1746. * The runqueue is assigned before the actual context
  1747. * switch. We need to take the runqueue lock.
  1748. *
  1749. * We could check initially without the lock but it is
  1750. * very likely that we need to take the lock in every
  1751. * iteration.
  1752. */
  1753. rq = task_rq_lock(p, &flags);
  1754. running = task_running(rq, p);
  1755. task_rq_unlock(rq, &flags);
  1756. if (likely(!running))
  1757. break;
  1758. /*
  1759. * The switch count is incremented before the actual
  1760. * context switch. We thus wait for two switches to be
  1761. * sure at least one completed.
  1762. */
  1763. if ((p->nvcsw - nvcsw) > 1)
  1764. break;
  1765. if ((p->nivcsw - nivcsw) > 1)
  1766. break;
  1767. cpu_relax();
  1768. }
  1769. }
  1770. /*
  1771. * wait_task_inactive - wait for a thread to unschedule.
  1772. *
  1773. * If @match_state is nonzero, it's the @p->state value just checked and
  1774. * not expected to change. If it changes, i.e. @p might have woken up,
  1775. * then return zero. When we succeed in waiting for @p to be off its CPU,
  1776. * we return a positive number (its total switch count). If a second call
  1777. * a short while later returns the same number, the caller can be sure that
  1778. * @p has remained unscheduled the whole time.
  1779. *
  1780. * The caller must ensure that the task *will* unschedule sometime soon,
  1781. * else this function might spin for a *long* time. This function can't
  1782. * be called with interrupts off, or it may introduce deadlock with
  1783. * smp_call_function() if an IPI is sent by the same process we are
  1784. * waiting to become inactive.
  1785. */
  1786. unsigned long wait_task_inactive(struct task_struct *p, long match_state)
  1787. {
  1788. unsigned long flags;
  1789. int running, on_rq;
  1790. unsigned long ncsw;
  1791. struct rq *rq;
  1792. for (;;) {
  1793. /*
  1794. * We do the initial early heuristics without holding
  1795. * any task-queue locks at all. We'll only try to get
  1796. * the runqueue lock when things look like they will
  1797. * work out!
  1798. */
  1799. rq = task_rq(p);
  1800. /*
  1801. * If the task is actively running on another CPU
  1802. * still, just relax and busy-wait without holding
  1803. * any locks.
  1804. *
  1805. * NOTE! Since we don't hold any locks, it's not
  1806. * even sure that "rq" stays as the right runqueue!
  1807. * But we don't care, since "task_running()" will
  1808. * return false if the runqueue has changed and p
  1809. * is actually now running somewhere else!
  1810. */
  1811. while (task_running(rq, p)) {
  1812. if (match_state && unlikely(p->state != match_state))
  1813. return 0;
  1814. cpu_relax();
  1815. }
  1816. /*
  1817. * Ok, time to look more closely! We need the rq
  1818. * lock now, to be *sure*. If we're wrong, we'll
  1819. * just go back and repeat.
  1820. */
  1821. rq = task_rq_lock(p, &flags);
  1822. trace_sched_wait_task(rq, p);
  1823. running = task_running(rq, p);
  1824. on_rq = p->se.on_rq;
  1825. ncsw = 0;
  1826. if (!match_state || p->state == match_state)
  1827. ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
  1828. task_rq_unlock(rq, &flags);
  1829. /*
  1830. * If it changed from the expected state, bail out now.
  1831. */
  1832. if (unlikely(!ncsw))
  1833. break;
  1834. /*
  1835. * Was it really running after all now that we
  1836. * checked with the proper locks actually held?
  1837. *
  1838. * Oops. Go back and try again..
  1839. */
  1840. if (unlikely(running)) {
  1841. cpu_relax();
  1842. continue;
  1843. }
  1844. /*
  1845. * It's not enough that it's not actively running,
  1846. * it must be off the runqueue _entirely_, and not
  1847. * preempted!
  1848. *
  1849. * So if it was still runnable (but just not actively
  1850. * running right now), it's preempted, and we should
  1851. * yield - it could be a while.
  1852. */
  1853. if (unlikely(on_rq)) {
  1854. schedule_timeout_uninterruptible(1);
  1855. continue;
  1856. }
  1857. /*
  1858. * Ahh, all good. It wasn't running, and it wasn't
  1859. * runnable, which means that it will never become
  1860. * running in the future either. We're all done!
  1861. */
  1862. break;
  1863. }
  1864. return ncsw;
  1865. }
  1866. /***
  1867. * kick_process - kick a running thread to enter/exit the kernel
  1868. * @p: the to-be-kicked thread
  1869. *
  1870. * Cause a process which is running on another CPU to enter
  1871. * kernel-mode, without any delay. (to get signals handled.)
  1872. *
  1873. * NOTE: this function doesnt have to take the runqueue lock,
  1874. * because all it wants to ensure is that the remote task enters
  1875. * the kernel. If the IPI races and the task has been migrated
  1876. * to another CPU then no harm is done and the purpose has been
  1877. * achieved as well.
  1878. */
  1879. void kick_process(struct task_struct *p)
  1880. {
  1881. int cpu;
  1882. preempt_disable();
  1883. cpu = task_cpu(p);
  1884. if ((cpu != smp_processor_id()) && task_curr(p))
  1885. smp_send_reschedule(cpu);
  1886. preempt_enable();
  1887. }
  1888. EXPORT_SYMBOL_GPL(kick_process);
  1889. #endif /* CONFIG_SMP */
  1890. /**
  1891. * task_oncpu_function_call - call a function on the cpu on which a task runs
  1892. * @p: the task to evaluate
  1893. * @func: the function to be called
  1894. * @info: the function call argument
  1895. *
  1896. * Calls the function @func when the task is currently running. This might
  1897. * be on the current CPU, which just calls the function directly
  1898. */
  1899. void task_oncpu_function_call(struct task_struct *p,
  1900. void (*func) (void *info), void *info)
  1901. {
  1902. int cpu;
  1903. preempt_disable();
  1904. cpu = task_cpu(p);
  1905. if (task_curr(p))
  1906. smp_call_function_single(cpu, func, info, 1);
  1907. preempt_enable();
  1908. }
  1909. #ifdef CONFIG_SMP
  1910. static int select_fallback_rq(int cpu, struct task_struct *p)
  1911. {
  1912. int dest_cpu;
  1913. const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(cpu));
  1914. /* Look for allowed, online CPU in same node. */
  1915. for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask)
  1916. if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
  1917. return dest_cpu;
  1918. /* Any allowed, online CPU? */
  1919. dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask);
  1920. if (dest_cpu < nr_cpu_ids)
  1921. return dest_cpu;
  1922. /* No more Mr. Nice Guy. */
  1923. if (dest_cpu >= nr_cpu_ids) {
  1924. rcu_read_lock();
  1925. cpuset_cpus_allowed_locked(p, &p->cpus_allowed);
  1926. rcu_read_unlock();
  1927. dest_cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
  1928. /*
  1929. * Don't tell them about moving exiting tasks or
  1930. * kernel threads (both mm NULL), since they never
  1931. * leave kernel.
  1932. */
  1933. if (p->mm && printk_ratelimit()) {
  1934. printk(KERN_INFO "process %d (%s) no "
  1935. "longer affine to cpu%d\n",
  1936. task_pid_nr(p), p->comm, cpu);
  1937. }
  1938. }
  1939. return dest_cpu;
  1940. }
  1941. /*
  1942. * Gets called from 3 sites (exec, fork, wakeup), since it is called without
  1943. * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done
  1944. * by:
  1945. *
  1946. * exec: is unstable, retry loop
  1947. * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING
  1948. */
  1949. static inline
  1950. int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags)
  1951. {
  1952. int cpu = p->sched_class->select_task_rq(p, sd_flags, wake_flags);
  1953. /*
  1954. * In order not to call set_task_cpu() on a blocking task we need
  1955. * to rely on ttwu() to place the task on a valid ->cpus_allowed
  1956. * cpu.
  1957. *
  1958. * Since this is common to all placement strategies, this lives here.
  1959. *
  1960. * [ this allows ->select_task() to simply return task_cpu(p) and
  1961. * not worry about this generic constraint ]
  1962. */
  1963. if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
  1964. !cpu_online(cpu)))
  1965. cpu = select_fallback_rq(task_cpu(p), p);
  1966. return cpu;
  1967. }
  1968. #endif
  1969. /***
  1970. * try_to_wake_up - wake up a thread
  1971. * @p: the to-be-woken-up thread
  1972. * @state: the mask of task states that can be woken
  1973. * @sync: do a synchronous wakeup?
  1974. *
  1975. * Put it on the run-queue if it's not already there. The "current"
  1976. * thread is always on the run-queue (except when the actual
  1977. * re-schedule is in progress), and as such you're allowed to do
  1978. * the simpler "current->state = TASK_RUNNING" to mark yourself
  1979. * runnable without the overhead of this.
  1980. *
  1981. * returns failure only if the task is already active.
  1982. */
  1983. static int try_to_wake_up(struct task_struct *p, unsigned int state,
  1984. int wake_flags)
  1985. {
  1986. int cpu, orig_cpu, this_cpu, success = 0;
  1987. unsigned long flags;
  1988. struct rq *rq, *orig_rq;
  1989. if (!sched_feat(SYNC_WAKEUPS))
  1990. wake_flags &= ~WF_SYNC;
  1991. this_cpu = get_cpu();
  1992. smp_wmb();
  1993. rq = orig_rq = task_rq_lock(p, &flags);
  1994. update_rq_clock(rq);
  1995. if (!(p->state & state))
  1996. goto out;
  1997. if (p->se.on_rq)
  1998. goto out_running;
  1999. cpu = task_cpu(p);
  2000. orig_cpu = cpu;
  2001. #ifdef CONFIG_SMP
  2002. if (unlikely(task_running(rq, p)))
  2003. goto out_activate;
  2004. /*
  2005. * In order to handle concurrent wakeups and release the rq->lock
  2006. * we put the task in TASK_WAKING state.
  2007. *
  2008. * First fix up the nr_uninterruptible count:
  2009. */
  2010. if (task_contributes_to_load(p))
  2011. rq->nr_uninterruptible--;
  2012. p->state = TASK_WAKING;
  2013. if (p->sched_class->task_waking)
  2014. p->sched_class->task_waking(rq, p);
  2015. __task_rq_unlock(rq);
  2016. cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
  2017. if (cpu != orig_cpu) {
  2018. /*
  2019. * Since we migrate the task without holding any rq->lock,
  2020. * we need to be careful with task_rq_lock(), since that
  2021. * might end up locking an invalid rq.
  2022. */
  2023. set_task_cpu(p, cpu);
  2024. }
  2025. rq = cpu_rq(cpu);
  2026. raw_spin_lock(&rq->lock);
  2027. update_rq_clock(rq);
  2028. /*
  2029. * We migrated the task without holding either rq->lock, however
  2030. * since the task is not on the task list itself, nobody else
  2031. * will try and migrate the task, hence the rq should match the
  2032. * cpu we just moved it to.
  2033. */
  2034. WARN_ON(task_cpu(p) != cpu);
  2035. WARN_ON(p->state != TASK_WAKING);
  2036. #ifdef CONFIG_SCHEDSTATS
  2037. schedstat_inc(rq, ttwu_count);
  2038. if (cpu == this_cpu)
  2039. schedstat_inc(rq, ttwu_local);
  2040. else {
  2041. struct sched_domain *sd;
  2042. for_each_domain(this_cpu, sd) {
  2043. if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  2044. schedstat_inc(sd, ttwu_wake_remote);
  2045. break;
  2046. }
  2047. }
  2048. }
  2049. #endif /* CONFIG_SCHEDSTATS */
  2050. out_activate:
  2051. #endif /* CONFIG_SMP */
  2052. schedstat_inc(p, se.nr_wakeups);
  2053. if (wake_flags & WF_SYNC)
  2054. schedstat_inc(p, se.nr_wakeups_sync);
  2055. if (orig_cpu != cpu)
  2056. schedstat_inc(p, se.nr_wakeups_migrate);
  2057. if (cpu == this_cpu)
  2058. schedstat_inc(p, se.nr_wakeups_local);
  2059. else
  2060. schedstat_inc(p, se.nr_wakeups_remote);
  2061. activate_task(rq, p, 1);
  2062. success = 1;
  2063. /*
  2064. * Only attribute actual wakeups done by this task.
  2065. */
  2066. if (!in_interrupt()) {
  2067. struct sched_entity *se = &current->se;
  2068. u64 sample = se->sum_exec_runtime;
  2069. if (se->last_wakeup)
  2070. sample -= se->last_wakeup;
  2071. else
  2072. sample -= se->start_runtime;
  2073. update_avg(&se->avg_wakeup, sample);
  2074. se->last_wakeup = se->sum_exec_runtime;
  2075. }
  2076. out_running:
  2077. trace_sched_wakeup(rq, p, success);
  2078. check_preempt_curr(rq, p, wake_flags);
  2079. p->state = TASK_RUNNING;
  2080. #ifdef CONFIG_SMP
  2081. if (p->sched_class->task_woken)
  2082. p->sched_class->task_woken(rq, p);
  2083. if (unlikely(rq->idle_stamp)) {
  2084. u64 delta = rq->clock - rq->idle_stamp;
  2085. u64 max = 2*sysctl_sched_migration_cost;
  2086. if (delta > max)
  2087. rq->avg_idle = max;
  2088. else
  2089. update_avg(&rq->avg_idle, delta);
  2090. rq->idle_stamp = 0;
  2091. }
  2092. #endif
  2093. out:
  2094. task_rq_unlock(rq, &flags);
  2095. put_cpu();
  2096. return success;
  2097. }
  2098. /**
  2099. * wake_up_process - Wake up a specific process
  2100. * @p: The process to be woken up.
  2101. *
  2102. * Attempt to wake up the nominated process and move it to the set of runnable
  2103. * processes. Returns 1 if the process was woken up, 0 if it was already
  2104. * running.
  2105. *
  2106. * It may be assumed that this function implies a write memory barrier before
  2107. * changing the task state if and only if any tasks are woken up.
  2108. */
  2109. int wake_up_process(struct task_struct *p)
  2110. {
  2111. return try_to_wake_up(p, TASK_ALL, 0);
  2112. }
  2113. EXPORT_SYMBOL(wake_up_process);
  2114. int wake_up_state(struct task_struct *p, unsigned int state)
  2115. {
  2116. return try_to_wake_up(p, state, 0);
  2117. }
  2118. /*
  2119. * Perform scheduler related setup for a newly forked process p.
  2120. * p is forked by current.
  2121. *
  2122. * __sched_fork() is basic setup used by init_idle() too:
  2123. */
  2124. static void __sched_fork(struct task_struct *p)
  2125. {
  2126. p->se.exec_start = 0;
  2127. p->se.sum_exec_runtime = 0;
  2128. p->se.prev_sum_exec_runtime = 0;
  2129. p->se.nr_migrations = 0;
  2130. p->se.last_wakeup = 0;
  2131. p->se.avg_overlap = 0;
  2132. p->se.start_runtime = 0;
  2133. p->se.avg_wakeup = sysctl_sched_wakeup_granularity;
  2134. #ifdef CONFIG_SCHEDSTATS
  2135. p->se.wait_start = 0;
  2136. p->se.wait_max = 0;
  2137. p->se.wait_count = 0;
  2138. p->se.wait_sum = 0;
  2139. p->se.sleep_start = 0;
  2140. p->se.sleep_max = 0;
  2141. p->se.sum_sleep_runtime = 0;
  2142. p->se.block_start = 0;
  2143. p->se.block_max = 0;
  2144. p->se.exec_max = 0;
  2145. p->se.slice_max = 0;
  2146. p->se.nr_migrations_cold = 0;
  2147. p->se.nr_failed_migrations_affine = 0;
  2148. p->se.nr_failed_migrations_running = 0;
  2149. p->se.nr_failed_migrations_hot = 0;
  2150. p->se.nr_forced_migrations = 0;
  2151. p->se.nr_wakeups = 0;
  2152. p->se.nr_wakeups_sync = 0;
  2153. p->se.nr_wakeups_migrate = 0;
  2154. p->se.nr_wakeups_local = 0;
  2155. p->se.nr_wakeups_remote = 0;
  2156. p->se.nr_wakeups_affine = 0;
  2157. p->se.nr_wakeups_affine_attempts = 0;
  2158. p->se.nr_wakeups_passive = 0;
  2159. p->se.nr_wakeups_idle = 0;
  2160. #endif
  2161. INIT_LIST_HEAD(&p->rt.run_list);
  2162. p->se.on_rq = 0;
  2163. INIT_LIST_HEAD(&p->se.group_node);
  2164. #ifdef CONFIG_PREEMPT_NOTIFIERS
  2165. INIT_HLIST_HEAD(&p->preempt_notifiers);
  2166. #endif
  2167. }
  2168. /*
  2169. * fork()/clone()-time setup:
  2170. */
  2171. void sched_fork(struct task_struct *p, int clone_flags)
  2172. {
  2173. int cpu = get_cpu();
  2174. __sched_fork(p);
  2175. /*
  2176. * We mark the process as waking here. This guarantees that
  2177. * nobody will actually run it, and a signal or other external
  2178. * event cannot wake it up and insert it on the runqueue either.
  2179. */
  2180. p->state = TASK_WAKING;
  2181. /*
  2182. * Revert to default priority/policy on fork if requested.
  2183. */
  2184. if (unlikely(p->sched_reset_on_fork)) {
  2185. if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) {
  2186. p->policy = SCHED_NORMAL;
  2187. p->normal_prio = p->static_prio;
  2188. }
  2189. if (PRIO_TO_NICE(p->static_prio) < 0) {
  2190. p->static_prio = NICE_TO_PRIO(0);
  2191. p->normal_prio = p->static_prio;
  2192. set_load_weight(p);
  2193. }
  2194. /*
  2195. * We don't need the reset flag anymore after the fork. It has
  2196. * fulfilled its duty:
  2197. */
  2198. p->sched_reset_on_fork = 0;
  2199. }
  2200. /*
  2201. * Make sure we do not leak PI boosting priority to the child.
  2202. */
  2203. p->prio = current->normal_prio;
  2204. if (!rt_prio(p->prio))
  2205. p->sched_class = &fair_sched_class;
  2206. if (p->sched_class->task_fork)
  2207. p->sched_class->task_fork(p);
  2208. set_task_cpu(p, cpu);
  2209. #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
  2210. if (likely(sched_info_on()))
  2211. memset(&p->sched_info, 0, sizeof(p->sched_info));
  2212. #endif
  2213. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  2214. p->oncpu = 0;
  2215. #endif
  2216. #ifdef CONFIG_PREEMPT
  2217. /* Want to start with kernel preemption disabled. */
  2218. task_thread_info(p)->preempt_count = 1;
  2219. #endif
  2220. plist_node_init(&p->pushable_tasks, MAX_PRIO);
  2221. put_cpu();
  2222. }
  2223. /*
  2224. * wake_up_new_task - wake up a newly created task for the first time.
  2225. *
  2226. * This function will do some initial scheduler statistics housekeeping
  2227. * that must be done for every newly created context, then puts the task
  2228. * on the runqueue and wakes it.
  2229. */
  2230. void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
  2231. {
  2232. unsigned long flags;
  2233. struct rq *rq;
  2234. int cpu = get_cpu();
  2235. #ifdef CONFIG_SMP
  2236. /*
  2237. * Fork balancing, do it here and not earlier because:
  2238. * - cpus_allowed can change in the fork path
  2239. * - any previously selected cpu might disappear through hotplug
  2240. *
  2241. * We still have TASK_WAKING but PF_STARTING is gone now, meaning
  2242. * ->cpus_allowed is stable, we have preemption disabled, meaning
  2243. * cpu_online_mask is stable.
  2244. */
  2245. cpu = select_task_rq(p, SD_BALANCE_FORK, 0);
  2246. set_task_cpu(p, cpu);
  2247. #endif
  2248. /*
  2249. * Since the task is not on the rq and we still have TASK_WAKING set
  2250. * nobody else will migrate this task.
  2251. */
  2252. rq = cpu_rq(cpu);
  2253. raw_spin_lock_irqsave(&rq->lock, flags);
  2254. BUG_ON(p->state != TASK_WAKING);
  2255. p->state = TASK_RUNNING;
  2256. update_rq_clock(rq);
  2257. activate_task(rq, p, 0);
  2258. trace_sched_wakeup_new(rq, p, 1);
  2259. check_preempt_curr(rq, p, WF_FORK);
  2260. #ifdef CONFIG_SMP
  2261. if (p->sched_class->task_woken)
  2262. p->sched_class->task_woken(rq, p);
  2263. #endif
  2264. task_rq_unlock(rq, &flags);
  2265. put_cpu();
  2266. }
  2267. #ifdef CONFIG_PREEMPT_NOTIFIERS
  2268. /**
  2269. * preempt_notifier_register - tell me when current is being preempted & rescheduled
  2270. * @notifier: notifier struct to register
  2271. */
  2272. void preempt_notifier_register(struct preempt_notifier *notifier)
  2273. {
  2274. hlist_add_head(&notifier->link, &current->preempt_notifiers);
  2275. }
  2276. EXPORT_SYMBOL_GPL(preempt_notifier_register);
  2277. /**
  2278. * preempt_notifier_unregister - no longer interested in preemption notifications
  2279. * @notifier: notifier struct to unregister
  2280. *
  2281. * This is safe to call from within a preemption notifier.
  2282. */
  2283. void preempt_notifier_unregister(struct preempt_notifier *notifier)
  2284. {
  2285. hlist_del(&notifier->link);
  2286. }
  2287. EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
  2288. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  2289. {
  2290. struct preempt_notifier *notifier;
  2291. struct hlist_node *node;
  2292. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  2293. notifier->ops->sched_in(notifier, raw_smp_processor_id());
  2294. }
  2295. static void
  2296. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  2297. struct task_struct *next)
  2298. {
  2299. struct preempt_notifier *notifier;
  2300. struct hlist_node *node;
  2301. hlist_for_each_entry(notifier, node, &curr->preempt_notifiers, link)
  2302. notifier->ops->sched_out(notifier, next);
  2303. }
  2304. #else /* !CONFIG_PREEMPT_NOTIFIERS */
  2305. static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
  2306. {
  2307. }
  2308. static void
  2309. fire_sched_out_preempt_notifiers(struct task_struct *curr,
  2310. struct task_struct *next)
  2311. {
  2312. }
  2313. #endif /* CONFIG_PREEMPT_NOTIFIERS */
  2314. /**
  2315. * prepare_task_switch - prepare to switch tasks
  2316. * @rq: the runqueue preparing to switch
  2317. * @prev: the current task that is being switched out
  2318. * @next: the task we are going to switch to.
  2319. *
  2320. * This is called with the rq lock held and interrupts off. It must
  2321. * be paired with a subsequent finish_task_switch after the context
  2322. * switch.
  2323. *
  2324. * prepare_task_switch sets up locking and calls architecture specific
  2325. * hooks.
  2326. */
  2327. static inline void
  2328. prepare_task_switch(struct rq *rq, struct task_struct *prev,
  2329. struct task_struct *next)
  2330. {
  2331. fire_sched_out_preempt_notifiers(prev, next);
  2332. prepare_lock_switch(rq, next);
  2333. prepare_arch_switch(next);
  2334. }
  2335. /**
  2336. * finish_task_switch - clean up after a task-switch
  2337. * @rq: runqueue associated with task-switch
  2338. * @prev: the thread we just switched away from.
  2339. *
  2340. * finish_task_switch must be called after the context switch, paired
  2341. * with a prepare_task_switch call before the context switch.
  2342. * finish_task_switch will reconcile locking set up by prepare_task_switch,
  2343. * and do any other architecture-specific cleanup actions.
  2344. *
  2345. * Note that we may have delayed dropping an mm in context_switch(). If
  2346. * so, we finish that here outside of the runqueue lock. (Doing it
  2347. * with the lock held can cause deadlocks; see schedule() for
  2348. * details.)
  2349. */
  2350. static void finish_task_switch(struct rq *rq, struct task_struct *prev)
  2351. __releases(rq->lock)
  2352. {
  2353. struct mm_struct *mm = rq->prev_mm;
  2354. long prev_state;
  2355. rq->prev_mm = NULL;
  2356. /*
  2357. * A task struct has one reference for the use as "current".
  2358. * If a task dies, then it sets TASK_DEAD in tsk->state and calls
  2359. * schedule one last time. The schedule call will never return, and
  2360. * the scheduled task must drop that reference.
  2361. * The test for TASK_DEAD must occur while the runqueue locks are
  2362. * still held, otherwise prev could be scheduled on another cpu, die
  2363. * there before we look at prev->state, and then the reference would
  2364. * be dropped twice.
  2365. * Manfred Spraul <manfred@colorfullife.com>
  2366. */
  2367. prev_state = prev->state;
  2368. finish_arch_switch(prev);
  2369. perf_event_task_sched_in(current, cpu_of(rq));
  2370. finish_lock_switch(rq, prev);
  2371. fire_sched_in_preempt_notifiers(current);
  2372. if (mm)
  2373. mmdrop(mm);
  2374. if (unlikely(prev_state == TASK_DEAD)) {
  2375. /*
  2376. * Remove function-return probe instances associated with this
  2377. * task and put them back on the free list.
  2378. */
  2379. kprobe_flush_task(prev);
  2380. put_task_struct(prev);
  2381. }
  2382. }
  2383. #ifdef CONFIG_SMP
  2384. /* assumes rq->lock is held */
  2385. static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
  2386. {
  2387. if (prev->sched_class->pre_schedule)
  2388. prev->sched_class->pre_schedule(rq, prev);
  2389. }
  2390. /* rq->lock is NOT held, but preemption is disabled */
  2391. static inline void post_schedule(struct rq *rq)
  2392. {
  2393. if (rq->post_schedule) {
  2394. unsigned long flags;
  2395. raw_spin_lock_irqsave(&rq->lock, flags);
  2396. if (rq->curr->sched_class->post_schedule)
  2397. rq->curr->sched_class->post_schedule(rq);
  2398. raw_spin_unlock_irqrestore(&rq->lock, flags);
  2399. rq->post_schedule = 0;
  2400. }
  2401. }
  2402. #else
  2403. static inline void pre_schedule(struct rq *rq, struct task_struct *p)
  2404. {
  2405. }
  2406. static inline void post_schedule(struct rq *rq)
  2407. {
  2408. }
  2409. #endif
  2410. /**
  2411. * schedule_tail - first thing a freshly forked thread must call.
  2412. * @prev: the thread we just switched away from.
  2413. */
  2414. asmlinkage void schedule_tail(struct task_struct *prev)
  2415. __releases(rq->lock)
  2416. {
  2417. struct rq *rq = this_rq();
  2418. finish_task_switch(rq, prev);
  2419. /*
  2420. * FIXME: do we need to worry about rq being invalidated by the
  2421. * task_switch?
  2422. */
  2423. post_schedule(rq);
  2424. #ifdef __ARCH_WANT_UNLOCKED_CTXSW
  2425. /* In this case, finish_task_switch does not reenable preemption */
  2426. preempt_enable();
  2427. #endif
  2428. if (current->set_child_tid)
  2429. put_user(task_pid_vnr(current), current->set_child_tid);
  2430. }
  2431. /*
  2432. * context_switch - switch to the new MM and the new
  2433. * thread's register state.
  2434. */
  2435. static inline void
  2436. context_switch(struct rq *rq, struct task_struct *prev,
  2437. struct task_struct *next)
  2438. {
  2439. struct mm_struct *mm, *oldmm;
  2440. prepare_task_switch(rq, prev, next);
  2441. trace_sched_switch(rq, prev, next);
  2442. mm = next->mm;
  2443. oldmm = prev->active_mm;
  2444. /*
  2445. * For paravirt, this is coupled with an exit in switch_to to
  2446. * combine the page table reload and the switch backend into
  2447. * one hypercall.
  2448. */
  2449. arch_start_context_switch(prev);
  2450. if (likely(!mm)) {
  2451. next->active_mm = oldmm;
  2452. atomic_inc(&oldmm->mm_count);
  2453. enter_lazy_tlb(oldmm, next);
  2454. } else
  2455. switch_mm(oldmm, mm, next);
  2456. if (likely(!prev->mm)) {
  2457. prev->active_mm = NULL;
  2458. rq->prev_mm = oldmm;
  2459. }
  2460. /*
  2461. * Since the runqueue lock will be released by the next
  2462. * task (which is an invalid locking op but in the case
  2463. * of the scheduler it's an obvious special-case), so we
  2464. * do an early lockdep release here:
  2465. */
  2466. #ifndef __ARCH_WANT_UNLOCKED_CTXSW
  2467. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  2468. #endif
  2469. /* Here we just switch the register state and the stack. */
  2470. switch_to(prev, next, prev);
  2471. barrier();
  2472. /*
  2473. * this_rq must be evaluated again because prev may have moved
  2474. * CPUs since it called schedule(), thus the 'rq' on its stack
  2475. * frame will be invalid.
  2476. */
  2477. finish_task_switch(this_rq(), prev);
  2478. }
  2479. /*
  2480. * nr_running, nr_uninterruptible and nr_context_switches:
  2481. *
  2482. * externally visible scheduler statistics: current number of runnable
  2483. * threads, current number of uninterruptible-sleeping threads, total
  2484. * number of context switches performed since bootup.
  2485. */
  2486. unsigned long nr_running(void)
  2487. {
  2488. unsigned long i, sum = 0;
  2489. for_each_online_cpu(i)
  2490. sum += cpu_rq(i)->nr_running;
  2491. return sum;
  2492. }
  2493. unsigned long nr_uninterruptible(void)
  2494. {
  2495. unsigned long i, sum = 0;
  2496. for_each_possible_cpu(i)
  2497. sum += cpu_rq(i)->nr_uninterruptible;
  2498. /*
  2499. * Since we read the counters lockless, it might be slightly
  2500. * inaccurate. Do not allow it to go below zero though:
  2501. */
  2502. if (unlikely((long)sum < 0))
  2503. sum = 0;
  2504. return sum;
  2505. }
  2506. unsigned long long nr_context_switches(void)
  2507. {
  2508. int i;
  2509. unsigned long long sum = 0;
  2510. for_each_possible_cpu(i)
  2511. sum += cpu_rq(i)->nr_switches;
  2512. return sum;
  2513. }
  2514. unsigned long nr_iowait(void)
  2515. {
  2516. unsigned long i, sum = 0;
  2517. for_each_possible_cpu(i)
  2518. sum += atomic_read(&cpu_rq(i)->nr_iowait);
  2519. return sum;
  2520. }
  2521. unsigned long nr_iowait_cpu(void)
  2522. {
  2523. struct rq *this = this_rq();
  2524. return atomic_read(&this->nr_iowait);
  2525. }
  2526. unsigned long this_cpu_load(void)
  2527. {
  2528. struct rq *this = this_rq();
  2529. return this->cpu_load[0];
  2530. }
  2531. /* Variables and functions for calc_load */
  2532. static atomic_long_t calc_load_tasks;
  2533. static unsigned long calc_load_update;
  2534. unsigned long avenrun[3];
  2535. EXPORT_SYMBOL(avenrun);
  2536. /**
  2537. * get_avenrun - get the load average array
  2538. * @loads: pointer to dest load array
  2539. * @offset: offset to add
  2540. * @shift: shift count to shift the result left
  2541. *
  2542. * These values are estimates at best, so no need for locking.
  2543. */
  2544. void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
  2545. {
  2546. loads[0] = (avenrun[0] + offset) << shift;
  2547. loads[1] = (avenrun[1] + offset) << shift;
  2548. loads[2] = (avenrun[2] + offset) << shift;
  2549. }
  2550. static unsigned long
  2551. calc_load(unsigned long load, unsigned long exp, unsigned long active)
  2552. {
  2553. load *= exp;
  2554. load += active * (FIXED_1 - exp);
  2555. return load >> FSHIFT;
  2556. }
  2557. /*
  2558. * calc_load - update the avenrun load estimates 10 ticks after the
  2559. * CPUs have updated calc_load_tasks.
  2560. */
  2561. void calc_global_load(void)
  2562. {
  2563. unsigned long upd = calc_load_update + 10;
  2564. long active;
  2565. if (time_before(jiffies, upd))
  2566. return;
  2567. active = atomic_long_read(&calc_load_tasks);
  2568. active = active > 0 ? active * FIXED_1 : 0;
  2569. avenrun[0] = calc_load(avenrun[0], EXP_1, active);
  2570. avenrun[1] = calc_load(avenrun[1], EXP_5, active);
  2571. avenrun[2] = calc_load(avenrun[2], EXP_15, active);
  2572. calc_load_update += LOAD_FREQ;
  2573. }
  2574. /*
  2575. * Either called from update_cpu_load() or from a cpu going idle
  2576. */
  2577. static void calc_load_account_active(struct rq *this_rq)
  2578. {
  2579. long nr_active, delta;
  2580. nr_active = this_rq->nr_running;
  2581. nr_active += (long) this_rq->nr_uninterruptible;
  2582. if (nr_active != this_rq->calc_load_active) {
  2583. delta = nr_active - this_rq->calc_load_active;
  2584. this_rq->calc_load_active = nr_active;
  2585. atomic_long_add(delta, &calc_load_tasks);
  2586. }
  2587. }
  2588. /*
  2589. * Update rq->cpu_load[] statistics. This function is usually called every
  2590. * scheduler tick (TICK_NSEC).
  2591. */
  2592. static void update_cpu_load(struct rq *this_rq)
  2593. {
  2594. unsigned long this_load = this_rq->load.weight;
  2595. int i, scale;
  2596. this_rq->nr_load_updates++;
  2597. /* Update our load: */
  2598. for (i = 0, scale = 1; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
  2599. unsigned long old_load, new_load;
  2600. /* scale is effectively 1 << i now, and >> i divides by scale */
  2601. old_load = this_rq->cpu_load[i];
  2602. new_load = this_load;
  2603. /*
  2604. * Round up the averaging division if load is increasing. This
  2605. * prevents us from getting stuck on 9 if the load is 10, for
  2606. * example.
  2607. */
  2608. if (new_load > old_load)
  2609. new_load += scale-1;
  2610. this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
  2611. }
  2612. if (time_after_eq(jiffies, this_rq->calc_load_update)) {
  2613. this_rq->calc_load_update += LOAD_FREQ;
  2614. calc_load_account_active(this_rq);
  2615. }
  2616. }
  2617. #ifdef CONFIG_SMP
  2618. /*
  2619. * sched_exec - execve() is a valuable balancing opportunity, because at
  2620. * this point the task has the smallest effective memory and cache footprint.
  2621. */
  2622. void sched_exec(void)
  2623. {
  2624. struct task_struct *p = current;
  2625. struct migration_req req;
  2626. int dest_cpu, this_cpu;
  2627. unsigned long flags;
  2628. struct rq *rq;
  2629. again:
  2630. this_cpu = get_cpu();
  2631. dest_cpu = select_task_rq(p, SD_BALANCE_EXEC, 0);
  2632. if (dest_cpu == this_cpu) {
  2633. put_cpu();
  2634. return;
  2635. }
  2636. rq = task_rq_lock(p, &flags);
  2637. put_cpu();
  2638. /*
  2639. * select_task_rq() can race against ->cpus_allowed
  2640. */
  2641. if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)
  2642. || unlikely(!cpu_active(dest_cpu))) {
  2643. task_rq_unlock(rq, &flags);
  2644. goto again;
  2645. }
  2646. /* force the process onto the specified CPU */
  2647. if (migrate_task(p, dest_cpu, &req)) {
  2648. /* Need to wait for migration thread (might exit: take ref). */
  2649. struct task_struct *mt = rq->migration_thread;
  2650. get_task_struct(mt);
  2651. task_rq_unlock(rq, &flags);
  2652. wake_up_process(mt);
  2653. put_task_struct(mt);
  2654. wait_for_completion(&req.done);
  2655. return;
  2656. }
  2657. task_rq_unlock(rq, &flags);
  2658. }
  2659. #endif
  2660. DEFINE_PER_CPU(struct kernel_stat, kstat);
  2661. EXPORT_PER_CPU_SYMBOL(kstat);
  2662. /*
  2663. * Return any ns on the sched_clock that have not yet been accounted in
  2664. * @p in case that task is currently running.
  2665. *
  2666. * Called with task_rq_lock() held on @rq.
  2667. */
  2668. static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
  2669. {
  2670. u64 ns = 0;
  2671. if (task_current(rq, p)) {
  2672. update_rq_clock(rq);
  2673. ns = rq->clock - p->se.exec_start;
  2674. if ((s64)ns < 0)
  2675. ns = 0;
  2676. }
  2677. return ns;
  2678. }
  2679. unsigned long long task_delta_exec(struct task_struct *p)
  2680. {
  2681. unsigned long flags;
  2682. struct rq *rq;
  2683. u64 ns = 0;
  2684. rq = task_rq_lock(p, &flags);
  2685. ns = do_task_delta_exec(p, rq);
  2686. task_rq_unlock(rq, &flags);
  2687. return ns;
  2688. }
  2689. /*
  2690. * Return accounted runtime for the task.
  2691. * In case the task is currently running, return the runtime plus current's
  2692. * pending runtime that have not been accounted yet.
  2693. */
  2694. unsigned long long task_sched_runtime(struct task_struct *p)
  2695. {
  2696. unsigned long flags;
  2697. struct rq *rq;
  2698. u64 ns = 0;
  2699. rq = task_rq_lock(p, &flags);
  2700. ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq);
  2701. task_rq_unlock(rq, &flags);
  2702. return ns;
  2703. }
  2704. /*
  2705. * Return sum_exec_runtime for the thread group.
  2706. * In case the task is currently running, return the sum plus current's
  2707. * pending runtime that have not been accounted yet.
  2708. *
  2709. * Note that the thread group might have other running tasks as well,
  2710. * so the return value not includes other pending runtime that other
  2711. * running tasks might have.
  2712. */
  2713. unsigned long long thread_group_sched_runtime(struct task_struct *p)
  2714. {
  2715. struct task_cputime totals;
  2716. unsigned long flags;
  2717. struct rq *rq;
  2718. u64 ns;
  2719. rq = task_rq_lock(p, &flags);
  2720. thread_group_cputime(p, &totals);
  2721. ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq);
  2722. task_rq_unlock(rq, &flags);
  2723. return ns;
  2724. }
  2725. /*
  2726. * Account user cpu time to a process.
  2727. * @p: the process that the cpu time gets accounted to
  2728. * @cputime: the cpu time spent in user space since the last update
  2729. * @cputime_scaled: cputime scaled by cpu frequency
  2730. */
  2731. void account_user_time(struct task_struct *p, cputime_t cputime,
  2732. cputime_t cputime_scaled)
  2733. {
  2734. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2735. cputime64_t tmp;
  2736. /* Add user time to process. */
  2737. p->utime = cputime_add(p->utime, cputime);
  2738. p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
  2739. account_group_user_time(p, cputime);
  2740. /* Add user time to cpustat. */
  2741. tmp = cputime_to_cputime64(cputime);
  2742. if (TASK_NICE(p) > 0)
  2743. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  2744. else
  2745. cpustat->user = cputime64_add(cpustat->user, tmp);
  2746. cpuacct_update_stats(p, CPUACCT_STAT_USER, cputime);
  2747. /* Account for user time used */
  2748. acct_update_integrals(p);
  2749. }
  2750. /*
  2751. * Account guest cpu time to a process.
  2752. * @p: the process that the cpu time gets accounted to
  2753. * @cputime: the cpu time spent in virtual machine since the last update
  2754. * @cputime_scaled: cputime scaled by cpu frequency
  2755. */
  2756. static void account_guest_time(struct task_struct *p, cputime_t cputime,
  2757. cputime_t cputime_scaled)
  2758. {
  2759. cputime64_t tmp;
  2760. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2761. tmp = cputime_to_cputime64(cputime);
  2762. /* Add guest time to process. */
  2763. p->utime = cputime_add(p->utime, cputime);
  2764. p->utimescaled = cputime_add(p->utimescaled, cputime_scaled);
  2765. account_group_user_time(p, cputime);
  2766. p->gtime = cputime_add(p->gtime, cputime);
  2767. /* Add guest time to cpustat. */
  2768. if (TASK_NICE(p) > 0) {
  2769. cpustat->nice = cputime64_add(cpustat->nice, tmp);
  2770. cpustat->guest_nice = cputime64_add(cpustat->guest_nice, tmp);
  2771. } else {
  2772. cpustat->user = cputime64_add(cpustat->user, tmp);
  2773. cpustat->guest = cputime64_add(cpustat->guest, tmp);
  2774. }
  2775. }
  2776. /*
  2777. * Account system cpu time to a process.
  2778. * @p: the process that the cpu time gets accounted to
  2779. * @hardirq_offset: the offset to subtract from hardirq_count()
  2780. * @cputime: the cpu time spent in kernel space since the last update
  2781. * @cputime_scaled: cputime scaled by cpu frequency
  2782. */
  2783. void account_system_time(struct task_struct *p, int hardirq_offset,
  2784. cputime_t cputime, cputime_t cputime_scaled)
  2785. {
  2786. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2787. cputime64_t tmp;
  2788. if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
  2789. account_guest_time(p, cputime, cputime_scaled);
  2790. return;
  2791. }
  2792. /* Add system time to process. */
  2793. p->stime = cputime_add(p->stime, cputime);
  2794. p->stimescaled = cputime_add(p->stimescaled, cputime_scaled);
  2795. account_group_system_time(p, cputime);
  2796. /* Add system time to cpustat. */
  2797. tmp = cputime_to_cputime64(cputime);
  2798. if (hardirq_count() - hardirq_offset)
  2799. cpustat->irq = cputime64_add(cpustat->irq, tmp);
  2800. else if (softirq_count())
  2801. cpustat->softirq = cputime64_add(cpustat->softirq, tmp);
  2802. else
  2803. cpustat->system = cputime64_add(cpustat->system, tmp);
  2804. cpuacct_update_stats(p, CPUACCT_STAT_SYSTEM, cputime);
  2805. /* Account for system time used */
  2806. acct_update_integrals(p);
  2807. }
  2808. /*
  2809. * Account for involuntary wait time.
  2810. * @steal: the cpu time spent in involuntary wait
  2811. */
  2812. void account_steal_time(cputime_t cputime)
  2813. {
  2814. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2815. cputime64_t cputime64 = cputime_to_cputime64(cputime);
  2816. cpustat->steal = cputime64_add(cpustat->steal, cputime64);
  2817. }
  2818. /*
  2819. * Account for idle time.
  2820. * @cputime: the cpu time spent in idle wait
  2821. */
  2822. void account_idle_time(cputime_t cputime)
  2823. {
  2824. struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
  2825. cputime64_t cputime64 = cputime_to_cputime64(cputime);
  2826. struct rq *rq = this_rq();
  2827. if (atomic_read(&rq->nr_iowait) > 0)
  2828. cpustat->iowait = cputime64_add(cpustat->iowait, cputime64);
  2829. else
  2830. cpustat->idle = cputime64_add(cpustat->idle, cputime64);
  2831. }
  2832. #ifndef CONFIG_VIRT_CPU_ACCOUNTING
  2833. /*
  2834. * Account a single tick of cpu time.
  2835. * @p: the process that the cpu time gets accounted to
  2836. * @user_tick: indicates if the tick is a user or a system tick
  2837. */
  2838. void account_process_tick(struct task_struct *p, int user_tick)
  2839. {
  2840. cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy);
  2841. struct rq *rq = this_rq();
  2842. if (user_tick)
  2843. account_user_time(p, cputime_one_jiffy, one_jiffy_scaled);
  2844. else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
  2845. account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy,
  2846. one_jiffy_scaled);
  2847. else
  2848. account_idle_time(cputime_one_jiffy);
  2849. }
  2850. /*
  2851. * Account multiple ticks of steal time.
  2852. * @p: the process from which the cpu time has been stolen
  2853. * @ticks: number of stolen ticks
  2854. */
  2855. void account_steal_ticks(unsigned long ticks)
  2856. {
  2857. account_steal_time(jiffies_to_cputime(ticks));
  2858. }
  2859. /*
  2860. * Account multiple ticks of idle time.
  2861. * @ticks: number of stolen ticks
  2862. */
  2863. void account_idle_ticks(unsigned long ticks)
  2864. {
  2865. account_idle_time(jiffies_to_cputime(ticks));
  2866. }
  2867. #endif
  2868. /*
  2869. * Use precise platform statistics if available:
  2870. */
  2871. #ifdef CONFIG_VIRT_CPU_ACCOUNTING
  2872. void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2873. {
  2874. *ut = p->utime;
  2875. *st = p->stime;
  2876. }
  2877. void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2878. {
  2879. struct task_cputime cputime;
  2880. thread_group_cputime(p, &cputime);
  2881. *ut = cputime.utime;
  2882. *st = cputime.stime;
  2883. }
  2884. #else
  2885. #ifndef nsecs_to_cputime
  2886. # define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs)
  2887. #endif
  2888. void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2889. {
  2890. cputime_t rtime, utime = p->utime, total = cputime_add(utime, p->stime);
  2891. /*
  2892. * Use CFS's precise accounting:
  2893. */
  2894. rtime = nsecs_to_cputime(p->se.sum_exec_runtime);
  2895. if (total) {
  2896. u64 temp;
  2897. temp = (u64)(rtime * utime);
  2898. do_div(temp, total);
  2899. utime = (cputime_t)temp;
  2900. } else
  2901. utime = rtime;
  2902. /*
  2903. * Compare with previous values, to keep monotonicity:
  2904. */
  2905. p->prev_utime = max(p->prev_utime, utime);
  2906. p->prev_stime = max(p->prev_stime, cputime_sub(rtime, p->prev_utime));
  2907. *ut = p->prev_utime;
  2908. *st = p->prev_stime;
  2909. }
  2910. /*
  2911. * Must be called with siglock held.
  2912. */
  2913. void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  2914. {
  2915. struct signal_struct *sig = p->signal;
  2916. struct task_cputime cputime;
  2917. cputime_t rtime, utime, total;
  2918. thread_group_cputime(p, &cputime);
  2919. total = cputime_add(cputime.utime, cputime.stime);
  2920. rtime = nsecs_to_cputime(cputime.sum_exec_runtime);
  2921. if (total) {
  2922. u64 temp;
  2923. temp = (u64)(rtime * cputime.utime);
  2924. do_div(temp, total);
  2925. utime = (cputime_t)temp;
  2926. } else
  2927. utime = rtime;
  2928. sig->prev_utime = max(sig->prev_utime, utime);
  2929. sig->prev_stime = max(sig->prev_stime,
  2930. cputime_sub(rtime, sig->prev_utime));
  2931. *ut = sig->prev_utime;
  2932. *st = sig->prev_stime;
  2933. }
  2934. #endif
  2935. /*
  2936. * This function gets called by the timer code, with HZ frequency.
  2937. * We call it with interrupts disabled.
  2938. *
  2939. * It also gets called by the fork code, when changing the parent's
  2940. * timeslices.
  2941. */
  2942. void scheduler_tick(void)
  2943. {
  2944. int cpu = smp_processor_id();
  2945. struct rq *rq = cpu_rq(cpu);
  2946. struct task_struct *curr = rq->curr;
  2947. sched_clock_tick();
  2948. raw_spin_lock(&rq->lock);
  2949. update_rq_clock(rq);
  2950. update_cpu_load(rq);
  2951. curr->sched_class->task_tick(rq, curr, 0);
  2952. raw_spin_unlock(&rq->lock);
  2953. perf_event_task_tick(curr, cpu);
  2954. #ifdef CONFIG_SMP
  2955. rq->idle_at_tick = idle_cpu(cpu);
  2956. trigger_load_balance(rq, cpu);
  2957. #endif
  2958. }
  2959. notrace unsigned long get_parent_ip(unsigned long addr)
  2960. {
  2961. if (in_lock_functions(addr)) {
  2962. addr = CALLER_ADDR2;
  2963. if (in_lock_functions(addr))
  2964. addr = CALLER_ADDR3;
  2965. }
  2966. return addr;
  2967. }
  2968. #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
  2969. defined(CONFIG_PREEMPT_TRACER))
  2970. void __kprobes add_preempt_count(int val)
  2971. {
  2972. #ifdef CONFIG_DEBUG_PREEMPT
  2973. /*
  2974. * Underflow?
  2975. */
  2976. if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
  2977. return;
  2978. #endif
  2979. preempt_count() += val;
  2980. #ifdef CONFIG_DEBUG_PREEMPT
  2981. /*
  2982. * Spinlock count overflowing soon?
  2983. */
  2984. DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >=
  2985. PREEMPT_MASK - 10);
  2986. #endif
  2987. if (preempt_count() == val)
  2988. trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  2989. }
  2990. EXPORT_SYMBOL(add_preempt_count);
  2991. void __kprobes sub_preempt_count(int val)
  2992. {
  2993. #ifdef CONFIG_DEBUG_PREEMPT
  2994. /*
  2995. * Underflow?
  2996. */
  2997. if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
  2998. return;
  2999. /*
  3000. * Is the spinlock portion underflowing?
  3001. */
  3002. if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
  3003. !(preempt_count() & PREEMPT_MASK)))
  3004. return;
  3005. #endif
  3006. if (preempt_count() == val)
  3007. trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
  3008. preempt_count() -= val;
  3009. }
  3010. EXPORT_SYMBOL(sub_preempt_count);
  3011. #endif
  3012. /*
  3013. * Print scheduling while atomic bug:
  3014. */
  3015. static noinline void __schedule_bug(struct task_struct *prev)
  3016. {
  3017. struct pt_regs *regs = get_irq_regs();
  3018. printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n",
  3019. prev->comm, prev->pid, preempt_count());
  3020. debug_show_held_locks(prev);
  3021. print_modules();
  3022. if (irqs_disabled())
  3023. print_irqtrace_events(prev);
  3024. if (regs)
  3025. show_regs(regs);
  3026. else
  3027. dump_stack();
  3028. }
  3029. /*
  3030. * Various schedule()-time debugging checks and statistics:
  3031. */
  3032. static inline void schedule_debug(struct task_struct *prev)
  3033. {
  3034. /*
  3035. * Test if we are atomic. Since do_exit() needs to call into
  3036. * schedule() atomically, we ignore that path for now.
  3037. * Otherwise, whine if we are scheduling when we should not be.
  3038. */
  3039. if (unlikely(in_atomic_preempt_off() && !prev->exit_state))
  3040. __schedule_bug(prev);
  3041. profile_hit(SCHED_PROFILING, __builtin_return_address(0));
  3042. schedstat_inc(this_rq(), sched_count);
  3043. #ifdef CONFIG_SCHEDSTATS
  3044. if (unlikely(prev->lock_depth >= 0)) {
  3045. schedstat_inc(this_rq(), bkl_count);
  3046. schedstat_inc(prev, sched_info.bkl_count);
  3047. }
  3048. #endif
  3049. }
  3050. static void put_prev_task(struct rq *rq, struct task_struct *prev)
  3051. {
  3052. if (prev->state == TASK_RUNNING) {
  3053. u64 runtime = prev->se.sum_exec_runtime;
  3054. runtime -= prev->se.prev_sum_exec_runtime;
  3055. runtime = min_t(u64, runtime, 2*sysctl_sched_migration_cost);
  3056. /*
  3057. * In order to avoid avg_overlap growing stale when we are
  3058. * indeed overlapping and hence not getting put to sleep, grow
  3059. * the avg_overlap on preemption.
  3060. *
  3061. * We use the average preemption runtime because that
  3062. * correlates to the amount of cache footprint a task can
  3063. * build up.
  3064. */
  3065. update_avg(&prev->se.avg_overlap, runtime);
  3066. }
  3067. prev->sched_class->put_prev_task(rq, prev);
  3068. }
  3069. /*
  3070. * Pick up the highest-prio task:
  3071. */
  3072. static inline struct task_struct *
  3073. pick_next_task(struct rq *rq)
  3074. {
  3075. const struct sched_class *class;
  3076. struct task_struct *p;
  3077. /*
  3078. * Optimization: we know that if all tasks are in
  3079. * the fair class we can call that function directly:
  3080. */
  3081. if (likely(rq->nr_running == rq->cfs.nr_running)) {
  3082. p = fair_sched_class.pick_next_task(rq);
  3083. if (likely(p))
  3084. return p;
  3085. }
  3086. class = sched_class_highest;
  3087. for ( ; ; ) {
  3088. p = class->pick_next_task(rq);
  3089. if (p)
  3090. return p;
  3091. /*
  3092. * Will never be NULL as the idle class always
  3093. * returns a non-NULL p:
  3094. */
  3095. class = class->next;
  3096. }
  3097. }
  3098. /*
  3099. * schedule() is the main scheduler function.
  3100. */
  3101. asmlinkage void __sched schedule(void)
  3102. {
  3103. struct task_struct *prev, *next;
  3104. unsigned long *switch_count;
  3105. struct rq *rq;
  3106. int cpu;
  3107. need_resched:
  3108. preempt_disable();
  3109. cpu = smp_processor_id();
  3110. rq = cpu_rq(cpu);
  3111. rcu_sched_qs(cpu);
  3112. prev = rq->curr;
  3113. switch_count = &prev->nivcsw;
  3114. release_kernel_lock(prev);
  3115. need_resched_nonpreemptible:
  3116. schedule_debug(prev);
  3117. if (sched_feat(HRTICK))
  3118. hrtick_clear(rq);
  3119. raw_spin_lock_irq(&rq->lock);
  3120. update_rq_clock(rq);
  3121. clear_tsk_need_resched(prev);
  3122. if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
  3123. if (unlikely(signal_pending_state(prev->state, prev)))
  3124. prev->state = TASK_RUNNING;
  3125. else
  3126. deactivate_task(rq, prev, 1);
  3127. switch_count = &prev->nvcsw;
  3128. }
  3129. pre_schedule(rq, prev);
  3130. if (unlikely(!rq->nr_running))
  3131. idle_balance(cpu, rq);
  3132. put_prev_task(rq, prev);
  3133. next = pick_next_task(rq);
  3134. if (likely(prev != next)) {
  3135. sched_info_switch(prev, next);
  3136. perf_event_task_sched_out(prev, next, cpu);
  3137. rq->nr_switches++;
  3138. rq->curr = next;
  3139. ++*switch_count;
  3140. context_switch(rq, prev, next); /* unlocks the rq */
  3141. /*
  3142. * the context switch might have flipped the stack from under
  3143. * us, hence refresh the local variables.
  3144. */
  3145. cpu = smp_processor_id();
  3146. rq = cpu_rq(cpu);
  3147. } else
  3148. raw_spin_unlock_irq(&rq->lock);
  3149. post_schedule(rq);
  3150. if (unlikely(reacquire_kernel_lock(current) < 0)) {
  3151. prev = rq->curr;
  3152. switch_count = &prev->nivcsw;
  3153. goto need_resched_nonpreemptible;
  3154. }
  3155. preempt_enable_no_resched();
  3156. if (need_resched())
  3157. goto need_resched;
  3158. }
  3159. EXPORT_SYMBOL(schedule);
  3160. #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
  3161. /*
  3162. * Look out! "owner" is an entirely speculative pointer
  3163. * access and not reliable.
  3164. */
  3165. int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
  3166. {
  3167. unsigned int cpu;
  3168. struct rq *rq;
  3169. if (!sched_feat(OWNER_SPIN))
  3170. return 0;
  3171. #ifdef CONFIG_DEBUG_PAGEALLOC
  3172. /*
  3173. * Need to access the cpu field knowing that
  3174. * DEBUG_PAGEALLOC could have unmapped it if
  3175. * the mutex owner just released it and exited.
  3176. */
  3177. if (probe_kernel_address(&owner->cpu, cpu))
  3178. goto out;
  3179. #else
  3180. cpu = owner->cpu;
  3181. #endif
  3182. /*
  3183. * Even if the access succeeded (likely case),
  3184. * the cpu field may no longer be valid.
  3185. */
  3186. if (cpu >= nr_cpumask_bits)
  3187. goto out;
  3188. /*
  3189. * We need to validate that we can do a
  3190. * get_cpu() and that we have the percpu area.
  3191. */
  3192. if (!cpu_online(cpu))
  3193. goto out;
  3194. rq = cpu_rq(cpu);
  3195. for (;;) {
  3196. /*
  3197. * Owner changed, break to re-assess state.
  3198. */
  3199. if (lock->owner != owner)
  3200. break;
  3201. /*
  3202. * Is that owner really running on that cpu?
  3203. */
  3204. if (task_thread_info(rq->curr) != owner || need_resched())
  3205. return 0;
  3206. cpu_relax();
  3207. }
  3208. out:
  3209. return 1;
  3210. }
  3211. #endif
  3212. #ifdef CONFIG_PREEMPT
  3213. /*
  3214. * this is the entry point to schedule() from in-kernel preemption
  3215. * off of preempt_enable. Kernel preemptions off return from interrupt
  3216. * occur there and call schedule directly.
  3217. */
  3218. asmlinkage void __sched preempt_schedule(void)
  3219. {
  3220. struct thread_info *ti = current_thread_info();
  3221. /*
  3222. * If there is a non-zero preempt_count or interrupts are disabled,
  3223. * we do not want to preempt the current task. Just return..
  3224. */
  3225. if (likely(ti->preempt_count || irqs_disabled()))
  3226. return;
  3227. do {
  3228. add_preempt_count(PREEMPT_ACTIVE);
  3229. schedule();
  3230. sub_preempt_count(PREEMPT_ACTIVE);
  3231. /*
  3232. * Check again in case we missed a preemption opportunity
  3233. * between schedule and now.
  3234. */
  3235. barrier();
  3236. } while (need_resched());
  3237. }
  3238. EXPORT_SYMBOL(preempt_schedule);
  3239. /*
  3240. * this is the entry point to schedule() from kernel preemption
  3241. * off of irq context.
  3242. * Note, that this is called and return with irqs disabled. This will
  3243. * protect us against recursive calling from irq.
  3244. */
  3245. asmlinkage void __sched preempt_schedule_irq(void)
  3246. {
  3247. struct thread_info *ti = current_thread_info();
  3248. /* Catch callers which need to be fixed */
  3249. BUG_ON(ti->preempt_count || !irqs_disabled());
  3250. do {
  3251. add_preempt_count(PREEMPT_ACTIVE);
  3252. local_irq_enable();
  3253. schedule();
  3254. local_irq_disable();
  3255. sub_preempt_count(PREEMPT_ACTIVE);
  3256. /*
  3257. * Check again in case we missed a preemption opportunity
  3258. * between schedule and now.
  3259. */
  3260. barrier();
  3261. } while (need_resched());
  3262. }
  3263. #endif /* CONFIG_PREEMPT */
  3264. int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags,
  3265. void *key)
  3266. {
  3267. return try_to_wake_up(curr->private, mode, wake_flags);
  3268. }
  3269. EXPORT_SYMBOL(default_wake_function);
  3270. /*
  3271. * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
  3272. * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
  3273. * number) then we wake all the non-exclusive tasks and one exclusive task.
  3274. *
  3275. * There are circumstances in which we can try to wake a task which has already
  3276. * started to run but is not in state TASK_RUNNING. try_to_wake_up() returns
  3277. * zero in this (rare) case, and we handle it by continuing to scan the queue.
  3278. */
  3279. static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
  3280. int nr_exclusive, int wake_flags, void *key)
  3281. {
  3282. wait_queue_t *curr, *next;
  3283. list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
  3284. unsigned flags = curr->flags;
  3285. if (curr->func(curr, mode, wake_flags, key) &&
  3286. (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
  3287. break;
  3288. }
  3289. }
  3290. /**
  3291. * __wake_up - wake up threads blocked on a waitqueue.
  3292. * @q: the waitqueue
  3293. * @mode: which threads
  3294. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3295. * @key: is directly passed to the wakeup function
  3296. *
  3297. * It may be assumed that this function implies a write memory barrier before
  3298. * changing the task state if and only if any tasks are woken up.
  3299. */
  3300. void __wake_up(wait_queue_head_t *q, unsigned int mode,
  3301. int nr_exclusive, void *key)
  3302. {
  3303. unsigned long flags;
  3304. spin_lock_irqsave(&q->lock, flags);
  3305. __wake_up_common(q, mode, nr_exclusive, 0, key);
  3306. spin_unlock_irqrestore(&q->lock, flags);
  3307. }
  3308. EXPORT_SYMBOL(__wake_up);
  3309. /*
  3310. * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
  3311. */
  3312. void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
  3313. {
  3314. __wake_up_common(q, mode, 1, 0, NULL);
  3315. }
  3316. void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
  3317. {
  3318. __wake_up_common(q, mode, 1, 0, key);
  3319. }
  3320. /**
  3321. * __wake_up_sync_key - wake up threads blocked on a waitqueue.
  3322. * @q: the waitqueue
  3323. * @mode: which threads
  3324. * @nr_exclusive: how many wake-one or wake-many threads to wake up
  3325. * @key: opaque value to be passed to wakeup targets
  3326. *
  3327. * The sync wakeup differs that the waker knows that it will schedule
  3328. * away soon, so while the target thread will be woken up, it will not
  3329. * be migrated to another CPU - ie. the two threads are 'synchronized'
  3330. * with each other. This can prevent needless bouncing between CPUs.
  3331. *
  3332. * On UP it can prevent extra preemption.
  3333. *
  3334. * It may be assumed that this function implies a write memory barrier before
  3335. * changing the task state if and only if any tasks are woken up.
  3336. */
  3337. void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode,
  3338. int nr_exclusive, void *key)
  3339. {
  3340. unsigned long flags;
  3341. int wake_flags = WF_SYNC;
  3342. if (unlikely(!q))
  3343. return;
  3344. if (unlikely(!nr_exclusive))
  3345. wake_flags = 0;
  3346. spin_lock_irqsave(&q->lock, flags);
  3347. __wake_up_common(q, mode, nr_exclusive, wake_flags, key);
  3348. spin_unlock_irqrestore(&q->lock, flags);
  3349. }
  3350. EXPORT_SYMBOL_GPL(__wake_up_sync_key);
  3351. /*
  3352. * __wake_up_sync - see __wake_up_sync_key()
  3353. */
  3354. void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
  3355. {
  3356. __wake_up_sync_key(q, mode, nr_exclusive, NULL);
  3357. }
  3358. EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
  3359. /**
  3360. * complete: - signals a single thread waiting on this completion
  3361. * @x: holds the state of this particular completion
  3362. *
  3363. * This will wake up a single thread waiting on this completion. Threads will be
  3364. * awakened in the same order in which they were queued.
  3365. *
  3366. * See also complete_all(), wait_for_completion() and related routines.
  3367. *
  3368. * It may be assumed that this function implies a write memory barrier before
  3369. * changing the task state if and only if any tasks are woken up.
  3370. */
  3371. void complete(struct completion *x)
  3372. {
  3373. unsigned long flags;
  3374. spin_lock_irqsave(&x->wait.lock, flags);
  3375. x->done++;
  3376. __wake_up_common(&x->wait, TASK_NORMAL, 1, 0, NULL);
  3377. spin_unlock_irqrestore(&x->wait.lock, flags);
  3378. }
  3379. EXPORT_SYMBOL(complete);
  3380. /**
  3381. * complete_all: - signals all threads waiting on this completion
  3382. * @x: holds the state of this particular completion
  3383. *
  3384. * This will wake up all threads waiting on this particular completion event.
  3385. *
  3386. * It may be assumed that this function implies a write memory barrier before
  3387. * changing the task state if and only if any tasks are woken up.
  3388. */
  3389. void complete_all(struct completion *x)
  3390. {
  3391. unsigned long flags;
  3392. spin_lock_irqsave(&x->wait.lock, flags);
  3393. x->done += UINT_MAX/2;
  3394. __wake_up_common(&x->wait, TASK_NORMAL, 0, 0, NULL);
  3395. spin_unlock_irqrestore(&x->wait.lock, flags);
  3396. }
  3397. EXPORT_SYMBOL(complete_all);
  3398. static inline long __sched
  3399. do_wait_for_common(struct completion *x, long timeout, int state)
  3400. {
  3401. if (!x->done) {
  3402. DECLARE_WAITQUEUE(wait, current);
  3403. wait.flags |= WQ_FLAG_EXCLUSIVE;
  3404. __add_wait_queue_tail(&x->wait, &wait);
  3405. do {
  3406. if (signal_pending_state(state, current)) {
  3407. timeout = -ERESTARTSYS;
  3408. break;
  3409. }
  3410. __set_current_state(state);
  3411. spin_unlock_irq(&x->wait.lock);
  3412. timeout = schedule_timeout(timeout);
  3413. spin_lock_irq(&x->wait.lock);
  3414. } while (!x->done && timeout);
  3415. __remove_wait_queue(&x->wait, &wait);
  3416. if (!x->done)
  3417. return timeout;
  3418. }
  3419. x->done--;
  3420. return timeout ?: 1;
  3421. }
  3422. static long __sched
  3423. wait_for_common(struct completion *x, long timeout, int state)
  3424. {
  3425. might_sleep();
  3426. spin_lock_irq(&x->wait.lock);
  3427. timeout = do_wait_for_common(x, timeout, state);
  3428. spin_unlock_irq(&x->wait.lock);
  3429. return timeout;
  3430. }
  3431. /**
  3432. * wait_for_completion: - waits for completion of a task
  3433. * @x: holds the state of this particular completion
  3434. *
  3435. * This waits to be signaled for completion of a specific task. It is NOT
  3436. * interruptible and there is no timeout.
  3437. *
  3438. * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
  3439. * and interrupt capability. Also see complete().
  3440. */
  3441. void __sched wait_for_completion(struct completion *x)
  3442. {
  3443. wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
  3444. }
  3445. EXPORT_SYMBOL(wait_for_completion);
  3446. /**
  3447. * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
  3448. * @x: holds the state of this particular completion
  3449. * @timeout: timeout value in jiffies
  3450. *
  3451. * This waits for either a completion of a specific task to be signaled or for a
  3452. * specified timeout to expire. The timeout is in jiffies. It is not
  3453. * interruptible.
  3454. */
  3455. unsigned long __sched
  3456. wait_for_completion_timeout(struct completion *x, unsigned long timeout)
  3457. {
  3458. return wait_for_common(x, timeout, TASK_UNINTERRUPTIBLE);
  3459. }
  3460. EXPORT_SYMBOL(wait_for_completion_timeout);
  3461. /**
  3462. * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
  3463. * @x: holds the state of this particular completion
  3464. *
  3465. * This waits for completion of a specific task to be signaled. It is
  3466. * interruptible.
  3467. */
  3468. int __sched wait_for_completion_interruptible(struct completion *x)
  3469. {
  3470. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
  3471. if (t == -ERESTARTSYS)
  3472. return t;
  3473. return 0;
  3474. }
  3475. EXPORT_SYMBOL(wait_for_completion_interruptible);
  3476. /**
  3477. * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
  3478. * @x: holds the state of this particular completion
  3479. * @timeout: timeout value in jiffies
  3480. *
  3481. * This waits for either a completion of a specific task to be signaled or for a
  3482. * specified timeout to expire. It is interruptible. The timeout is in jiffies.
  3483. */
  3484. unsigned long __sched
  3485. wait_for_completion_interruptible_timeout(struct completion *x,
  3486. unsigned long timeout)
  3487. {
  3488. return wait_for_common(x, timeout, TASK_INTERRUPTIBLE);
  3489. }
  3490. EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
  3491. /**
  3492. * wait_for_completion_killable: - waits for completion of a task (killable)
  3493. * @x: holds the state of this particular completion
  3494. *
  3495. * This waits to be signaled for completion of a specific task. It can be
  3496. * interrupted by a kill signal.
  3497. */
  3498. int __sched wait_for_completion_killable(struct completion *x)
  3499. {
  3500. long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
  3501. if (t == -ERESTARTSYS)
  3502. return t;
  3503. return 0;
  3504. }
  3505. EXPORT_SYMBOL(wait_for_completion_killable);
  3506. /**
  3507. * try_wait_for_completion - try to decrement a completion without blocking
  3508. * @x: completion structure
  3509. *
  3510. * Returns: 0 if a decrement cannot be done without blocking
  3511. * 1 if a decrement succeeded.
  3512. *
  3513. * If a completion is being used as a counting completion,
  3514. * attempt to decrement the counter without blocking. This
  3515. * enables us to avoid waiting if the resource the completion
  3516. * is protecting is not available.
  3517. */
  3518. bool try_wait_for_completion(struct completion *x)
  3519. {
  3520. unsigned long flags;
  3521. int ret = 1;
  3522. spin_lock_irqsave(&x->wait.lock, flags);
  3523. if (!x->done)
  3524. ret = 0;
  3525. else
  3526. x->done--;
  3527. spin_unlock_irqrestore(&x->wait.lock, flags);
  3528. return ret;
  3529. }
  3530. EXPORT_SYMBOL(try_wait_for_completion);
  3531. /**
  3532. * completion_done - Test to see if a completion has any waiters
  3533. * @x: completion structure
  3534. *
  3535. * Returns: 0 if there are waiters (wait_for_completion() in progress)
  3536. * 1 if there are no waiters.
  3537. *
  3538. */
  3539. bool completion_done(struct completion *x)
  3540. {
  3541. unsigned long flags;
  3542. int ret = 1;
  3543. spin_lock_irqsave(&x->wait.lock, flags);
  3544. if (!x->done)
  3545. ret = 0;
  3546. spin_unlock_irqrestore(&x->wait.lock, flags);
  3547. return ret;
  3548. }
  3549. EXPORT_SYMBOL(completion_done);
  3550. static long __sched
  3551. sleep_on_common(wait_queue_head_t *q, int state, long timeout)
  3552. {
  3553. unsigned long flags;
  3554. wait_queue_t wait;
  3555. init_waitqueue_entry(&wait, current);
  3556. __set_current_state(state);
  3557. spin_lock_irqsave(&q->lock, flags);
  3558. __add_wait_queue(q, &wait);
  3559. spin_unlock(&q->lock);
  3560. timeout = schedule_timeout(timeout);
  3561. spin_lock_irq(&q->lock);
  3562. __remove_wait_queue(q, &wait);
  3563. spin_unlock_irqrestore(&q->lock, flags);
  3564. return timeout;
  3565. }
  3566. void __sched interruptible_sleep_on(wait_queue_head_t *q)
  3567. {
  3568. sleep_on_common(q, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  3569. }
  3570. EXPORT_SYMBOL(interruptible_sleep_on);
  3571. long __sched
  3572. interruptible_sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3573. {
  3574. return sleep_on_common(q, TASK_INTERRUPTIBLE, timeout);
  3575. }
  3576. EXPORT_SYMBOL(interruptible_sleep_on_timeout);
  3577. void __sched sleep_on(wait_queue_head_t *q)
  3578. {
  3579. sleep_on_common(q, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
  3580. }
  3581. EXPORT_SYMBOL(sleep_on);
  3582. long __sched sleep_on_timeout(wait_queue_head_t *q, long timeout)
  3583. {
  3584. return sleep_on_common(q, TASK_UNINTERRUPTIBLE, timeout);
  3585. }
  3586. EXPORT_SYMBOL(sleep_on_timeout);
  3587. #ifdef CONFIG_RT_MUTEXES
  3588. /*
  3589. * rt_mutex_setprio - set the current priority of a task
  3590. * @p: task
  3591. * @prio: prio value (kernel-internal form)
  3592. *
  3593. * This function changes the 'effective' priority of a task. It does
  3594. * not touch ->normal_prio like __setscheduler().
  3595. *
  3596. * Used by the rt_mutex code to implement priority inheritance logic.
  3597. */
  3598. void rt_mutex_setprio(struct task_struct *p, int prio)
  3599. {
  3600. unsigned long flags;
  3601. int oldprio, on_rq, running;
  3602. struct rq *rq;
  3603. const struct sched_class *prev_class = p->sched_class;
  3604. BUG_ON(prio < 0 || prio > MAX_PRIO);
  3605. rq = task_rq_lock(p, &flags);
  3606. update_rq_clock(rq);
  3607. oldprio = p->prio;
  3608. on_rq = p->se.on_rq;
  3609. running = task_current(rq, p);
  3610. if (on_rq)
  3611. dequeue_task(rq, p, 0);
  3612. if (running)
  3613. p->sched_class->put_prev_task(rq, p);
  3614. if (rt_prio(prio))
  3615. p->sched_class = &rt_sched_class;
  3616. else
  3617. p->sched_class = &fair_sched_class;
  3618. p->prio = prio;
  3619. if (running)
  3620. p->sched_class->set_curr_task(rq);
  3621. if (on_rq) {
  3622. enqueue_task(rq, p, 0, oldprio < prio);
  3623. check_class_changed(rq, p, prev_class, oldprio, running);
  3624. }
  3625. task_rq_unlock(rq, &flags);
  3626. }
  3627. #endif
  3628. void set_user_nice(struct task_struct *p, long nice)
  3629. {
  3630. int old_prio, delta, on_rq;
  3631. unsigned long flags;
  3632. struct rq *rq;
  3633. if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
  3634. return;
  3635. /*
  3636. * We have to be careful, if called from sys_setpriority(),
  3637. * the task might be in the middle of scheduling on another CPU.
  3638. */
  3639. rq = task_rq_lock(p, &flags);
  3640. update_rq_clock(rq);
  3641. /*
  3642. * The RT priorities are set via sched_setscheduler(), but we still
  3643. * allow the 'normal' nice value to be set - but as expected
  3644. * it wont have any effect on scheduling until the task is
  3645. * SCHED_FIFO/SCHED_RR:
  3646. */
  3647. if (task_has_rt_policy(p)) {
  3648. p->static_prio = NICE_TO_PRIO(nice);
  3649. goto out_unlock;
  3650. }
  3651. on_rq = p->se.on_rq;
  3652. if (on_rq)
  3653. dequeue_task(rq, p, 0);
  3654. p->static_prio = NICE_TO_PRIO(nice);
  3655. set_load_weight(p);
  3656. old_prio = p->prio;
  3657. p->prio = effective_prio(p);
  3658. delta = p->prio - old_prio;
  3659. if (on_rq) {
  3660. enqueue_task(rq, p, 0, false);
  3661. /*
  3662. * If the task increased its priority or is running and
  3663. * lowered its priority, then reschedule its CPU:
  3664. */
  3665. if (delta < 0 || (delta > 0 && task_running(rq, p)))
  3666. resched_task(rq->curr);
  3667. }
  3668. out_unlock:
  3669. task_rq_unlock(rq, &flags);
  3670. }
  3671. EXPORT_SYMBOL(set_user_nice);
  3672. /*
  3673. * can_nice - check if a task can reduce its nice value
  3674. * @p: task
  3675. * @nice: nice value
  3676. */
  3677. int can_nice(const struct task_struct *p, const int nice)
  3678. {
  3679. /* convert nice value [19,-20] to rlimit style value [1,40] */
  3680. int nice_rlim = 20 - nice;
  3681. return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
  3682. capable(CAP_SYS_NICE));
  3683. }
  3684. #ifdef __ARCH_WANT_SYS_NICE
  3685. /*
  3686. * sys_nice - change the priority of the current process.
  3687. * @increment: priority increment
  3688. *
  3689. * sys_setpriority is a more generic, but much slower function that
  3690. * does similar things.
  3691. */
  3692. SYSCALL_DEFINE1(nice, int, increment)
  3693. {
  3694. long nice, retval;
  3695. /*
  3696. * Setpriority might change our priority at the same moment.
  3697. * We don't have to worry. Conceptually one call occurs first
  3698. * and we have a single winner.
  3699. */
  3700. if (increment < -40)
  3701. increment = -40;
  3702. if (increment > 40)
  3703. increment = 40;
  3704. nice = TASK_NICE(current) + increment;
  3705. if (nice < -20)
  3706. nice = -20;
  3707. if (nice > 19)
  3708. nice = 19;
  3709. if (increment < 0 && !can_nice(current, nice))
  3710. return -EPERM;
  3711. retval = security_task_setnice(current, nice);
  3712. if (retval)
  3713. return retval;
  3714. set_user_nice(current, nice);
  3715. return 0;
  3716. }
  3717. #endif
  3718. /**
  3719. * task_prio - return the priority value of a given task.
  3720. * @p: the task in question.
  3721. *
  3722. * This is the priority value as seen by users in /proc.
  3723. * RT tasks are offset by -200. Normal tasks are centered
  3724. * around 0, value goes from -16 to +15.
  3725. */
  3726. int task_prio(const struct task_struct *p)
  3727. {
  3728. return p->prio - MAX_RT_PRIO;
  3729. }
  3730. /**
  3731. * task_nice - return the nice value of a given task.
  3732. * @p: the task in question.
  3733. */
  3734. int task_nice(const struct task_struct *p)
  3735. {
  3736. return TASK_NICE(p);
  3737. }
  3738. EXPORT_SYMBOL(task_nice);
  3739. /**
  3740. * idle_cpu - is a given cpu idle currently?
  3741. * @cpu: the processor in question.
  3742. */
  3743. int idle_cpu(int cpu)
  3744. {
  3745. return cpu_curr(cpu) == cpu_rq(cpu)->idle;
  3746. }
  3747. /**
  3748. * idle_task - return the idle task for a given cpu.
  3749. * @cpu: the processor in question.
  3750. */
  3751. struct task_struct *idle_task(int cpu)
  3752. {
  3753. return cpu_rq(cpu)->idle;
  3754. }
  3755. /**
  3756. * find_process_by_pid - find a process with a matching PID value.
  3757. * @pid: the pid in question.
  3758. */
  3759. static struct task_struct *find_process_by_pid(pid_t pid)
  3760. {
  3761. return pid ? find_task_by_vpid(pid) : current;
  3762. }
  3763. /* Actually do priority change: must hold rq lock. */
  3764. static void
  3765. __setscheduler(struct rq *rq, struct task_struct *p, int policy, int prio)
  3766. {
  3767. BUG_ON(p->se.on_rq);
  3768. p->policy = policy;
  3769. p->rt_priority = prio;
  3770. p->normal_prio = normal_prio(p);
  3771. /* we are holding p->pi_lock already */
  3772. p->prio = rt_mutex_getprio(p);
  3773. if (rt_prio(p->prio))
  3774. p->sched_class = &rt_sched_class;
  3775. else
  3776. p->sched_class = &fair_sched_class;
  3777. set_load_weight(p);
  3778. }
  3779. /*
  3780. * check the target process has a UID that matches the current process's
  3781. */
  3782. static bool check_same_owner(struct task_struct *p)
  3783. {
  3784. const struct cred *cred = current_cred(), *pcred;
  3785. bool match;
  3786. rcu_read_lock();
  3787. pcred = __task_cred(p);
  3788. match = (cred->euid == pcred->euid ||
  3789. cred->euid == pcred->uid);
  3790. rcu_read_unlock();
  3791. return match;
  3792. }
  3793. static int __sched_setscheduler(struct task_struct *p, int policy,
  3794. struct sched_param *param, bool user)
  3795. {
  3796. int retval, oldprio, oldpolicy = -1, on_rq, running;
  3797. unsigned long flags;
  3798. const struct sched_class *prev_class = p->sched_class;
  3799. struct rq *rq;
  3800. int reset_on_fork;
  3801. /* may grab non-irq protected spin_locks */
  3802. BUG_ON(in_interrupt());
  3803. recheck:
  3804. /* double check policy once rq lock held */
  3805. if (policy < 0) {
  3806. reset_on_fork = p->sched_reset_on_fork;
  3807. policy = oldpolicy = p->policy;
  3808. } else {
  3809. reset_on_fork = !!(policy & SCHED_RESET_ON_FORK);
  3810. policy &= ~SCHED_RESET_ON_FORK;
  3811. if (policy != SCHED_FIFO && policy != SCHED_RR &&
  3812. policy != SCHED_NORMAL && policy != SCHED_BATCH &&
  3813. policy != SCHED_IDLE)
  3814. return -EINVAL;
  3815. }
  3816. /*
  3817. * Valid priorities for SCHED_FIFO and SCHED_RR are
  3818. * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL,
  3819. * SCHED_BATCH and SCHED_IDLE is 0.
  3820. */
  3821. if (param->sched_priority < 0 ||
  3822. (p->mm && param->sched_priority > MAX_USER_RT_PRIO-1) ||
  3823. (!p->mm && param->sched_priority > MAX_RT_PRIO-1))
  3824. return -EINVAL;
  3825. if (rt_policy(policy) != (param->sched_priority != 0))
  3826. return -EINVAL;
  3827. /*
  3828. * Allow unprivileged RT tasks to decrease priority:
  3829. */
  3830. if (user && !capable(CAP_SYS_NICE)) {
  3831. if (rt_policy(policy)) {
  3832. unsigned long rlim_rtprio;
  3833. if (!lock_task_sighand(p, &flags))
  3834. return -ESRCH;
  3835. rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
  3836. unlock_task_sighand(p, &flags);
  3837. /* can't set/change the rt policy */
  3838. if (policy != p->policy && !rlim_rtprio)
  3839. return -EPERM;
  3840. /* can't increase priority */
  3841. if (param->sched_priority > p->rt_priority &&
  3842. param->sched_priority > rlim_rtprio)
  3843. return -EPERM;
  3844. }
  3845. /*
  3846. * Like positive nice levels, dont allow tasks to
  3847. * move out of SCHED_IDLE either:
  3848. */
  3849. if (p->policy == SCHED_IDLE && policy != SCHED_IDLE)
  3850. return -EPERM;
  3851. /* can't change other user's priorities */
  3852. if (!check_same_owner(p))
  3853. return -EPERM;
  3854. /* Normal users shall not reset the sched_reset_on_fork flag */
  3855. if (p->sched_reset_on_fork && !reset_on_fork)
  3856. return -EPERM;
  3857. }
  3858. if (user) {
  3859. #ifdef CONFIG_RT_GROUP_SCHED
  3860. /*
  3861. * Do not allow realtime tasks into groups that have no runtime
  3862. * assigned.
  3863. */
  3864. if (rt_bandwidth_enabled() && rt_policy(policy) &&
  3865. task_group(p)->rt_bandwidth.rt_runtime == 0)
  3866. return -EPERM;
  3867. #endif
  3868. retval = security_task_setscheduler(p, policy, param);
  3869. if (retval)
  3870. return retval;
  3871. }
  3872. /*
  3873. * make sure no PI-waiters arrive (or leave) while we are
  3874. * changing the priority of the task:
  3875. */
  3876. raw_spin_lock_irqsave(&p->pi_lock, flags);
  3877. /*
  3878. * To be able to change p->policy safely, the apropriate
  3879. * runqueue lock must be held.
  3880. */
  3881. rq = __task_rq_lock(p);
  3882. /* recheck policy now with rq lock held */
  3883. if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
  3884. policy = oldpolicy = -1;
  3885. __task_rq_unlock(rq);
  3886. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  3887. goto recheck;
  3888. }
  3889. update_rq_clock(rq);
  3890. on_rq = p->se.on_rq;
  3891. running = task_current(rq, p);
  3892. if (on_rq)
  3893. deactivate_task(rq, p, 0);
  3894. if (running)
  3895. p->sched_class->put_prev_task(rq, p);
  3896. p->sched_reset_on_fork = reset_on_fork;
  3897. oldprio = p->prio;
  3898. __setscheduler(rq, p, policy, param->sched_priority);
  3899. if (running)
  3900. p->sched_class->set_curr_task(rq);
  3901. if (on_rq) {
  3902. activate_task(rq, p, 0);
  3903. check_class_changed(rq, p, prev_class, oldprio, running);
  3904. }
  3905. __task_rq_unlock(rq);
  3906. raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  3907. rt_mutex_adjust_pi(p);
  3908. return 0;
  3909. }
  3910. /**
  3911. * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
  3912. * @p: the task in question.
  3913. * @policy: new policy.
  3914. * @param: structure containing the new RT priority.
  3915. *
  3916. * NOTE that the task may be already dead.
  3917. */
  3918. int sched_setscheduler(struct task_struct *p, int policy,
  3919. struct sched_param *param)
  3920. {
  3921. return __sched_setscheduler(p, policy, param, true);
  3922. }
  3923. EXPORT_SYMBOL_GPL(sched_setscheduler);
  3924. /**
  3925. * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace.
  3926. * @p: the task in question.
  3927. * @policy: new policy.
  3928. * @param: structure containing the new RT priority.
  3929. *
  3930. * Just like sched_setscheduler, only don't bother checking if the
  3931. * current context has permission. For example, this is needed in
  3932. * stop_machine(): we create temporary high priority worker threads,
  3933. * but our caller might not have that capability.
  3934. */
  3935. int sched_setscheduler_nocheck(struct task_struct *p, int policy,
  3936. struct sched_param *param)
  3937. {
  3938. return __sched_setscheduler(p, policy, param, false);
  3939. }
  3940. static int
  3941. do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param)
  3942. {
  3943. struct sched_param lparam;
  3944. struct task_struct *p;
  3945. int retval;
  3946. if (!param || pid < 0)
  3947. return -EINVAL;
  3948. if (copy_from_user(&lparam, param, sizeof(struct sched_param)))
  3949. return -EFAULT;
  3950. rcu_read_lock();
  3951. retval = -ESRCH;
  3952. p = find_process_by_pid(pid);
  3953. if (p != NULL)
  3954. retval = sched_setscheduler(p, policy, &lparam);
  3955. rcu_read_unlock();
  3956. return retval;
  3957. }
  3958. /**
  3959. * sys_sched_setscheduler - set/change the scheduler policy and RT priority
  3960. * @pid: the pid in question.
  3961. * @policy: new policy.
  3962. * @param: structure containing the new RT priority.
  3963. */
  3964. SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy,
  3965. struct sched_param __user *, param)
  3966. {
  3967. /* negative values for policy are not valid */
  3968. if (policy < 0)
  3969. return -EINVAL;
  3970. return do_sched_setscheduler(pid, policy, param);
  3971. }
  3972. /**
  3973. * sys_sched_setparam - set/change the RT priority of a thread
  3974. * @pid: the pid in question.
  3975. * @param: structure containing the new RT priority.
  3976. */
  3977. SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
  3978. {
  3979. return do_sched_setscheduler(pid, -1, param);
  3980. }
  3981. /**
  3982. * sys_sched_getscheduler - get the policy (scheduling class) of a thread
  3983. * @pid: the pid in question.
  3984. */
  3985. SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid)
  3986. {
  3987. struct task_struct *p;
  3988. int retval;
  3989. if (pid < 0)
  3990. return -EINVAL;
  3991. retval = -ESRCH;
  3992. rcu_read_lock();
  3993. p = find_process_by_pid(pid);
  3994. if (p) {
  3995. retval = security_task_getscheduler(p);
  3996. if (!retval)
  3997. retval = p->policy
  3998. | (p->sched_reset_on_fork ? SCHED_RESET_ON_FORK : 0);
  3999. }
  4000. rcu_read_unlock();
  4001. return retval;
  4002. }
  4003. /**
  4004. * sys_sched_getparam - get the RT priority of a thread
  4005. * @pid: the pid in question.
  4006. * @param: structure containing the RT priority.
  4007. */
  4008. SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param)
  4009. {
  4010. struct sched_param lp;
  4011. struct task_struct *p;
  4012. int retval;
  4013. if (!param || pid < 0)
  4014. return -EINVAL;
  4015. rcu_read_lock();
  4016. p = find_process_by_pid(pid);
  4017. retval = -ESRCH;
  4018. if (!p)
  4019. goto out_unlock;
  4020. retval = security_task_getscheduler(p);
  4021. if (retval)
  4022. goto out_unlock;
  4023. lp.sched_priority = p->rt_priority;
  4024. rcu_read_unlock();
  4025. /*
  4026. * This one might sleep, we cannot do it with a spinlock held ...
  4027. */
  4028. retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0;
  4029. return retval;
  4030. out_unlock:
  4031. rcu_read_unlock();
  4032. return retval;
  4033. }
  4034. long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  4035. {
  4036. cpumask_var_t cpus_allowed, new_mask;
  4037. struct task_struct *p;
  4038. int retval;
  4039. get_online_cpus();
  4040. rcu_read_lock();
  4041. p = find_process_by_pid(pid);
  4042. if (!p) {
  4043. rcu_read_unlock();
  4044. put_online_cpus();
  4045. return -ESRCH;
  4046. }
  4047. /* Prevent p going away */
  4048. get_task_struct(p);
  4049. rcu_read_unlock();
  4050. if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
  4051. retval = -ENOMEM;
  4052. goto out_put_task;
  4053. }
  4054. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
  4055. retval = -ENOMEM;
  4056. goto out_free_cpus_allowed;
  4057. }
  4058. retval = -EPERM;
  4059. if (!check_same_owner(p) && !capable(CAP_SYS_NICE))
  4060. goto out_unlock;
  4061. retval = security_task_setscheduler(p, 0, NULL);
  4062. if (retval)
  4063. goto out_unlock;
  4064. cpuset_cpus_allowed(p, cpus_allowed);
  4065. cpumask_and(new_mask, in_mask, cpus_allowed);
  4066. again:
  4067. retval = set_cpus_allowed_ptr(p, new_mask);
  4068. if (!retval) {
  4069. cpuset_cpus_allowed(p, cpus_allowed);
  4070. if (!cpumask_subset(new_mask, cpus_allowed)) {
  4071. /*
  4072. * We must have raced with a concurrent cpuset
  4073. * update. Just reset the cpus_allowed to the
  4074. * cpuset's cpus_allowed
  4075. */
  4076. cpumask_copy(new_mask, cpus_allowed);
  4077. goto again;
  4078. }
  4079. }
  4080. out_unlock:
  4081. free_cpumask_var(new_mask);
  4082. out_free_cpus_allowed:
  4083. free_cpumask_var(cpus_allowed);
  4084. out_put_task:
  4085. put_task_struct(p);
  4086. put_online_cpus();
  4087. return retval;
  4088. }
  4089. static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
  4090. struct cpumask *new_mask)
  4091. {
  4092. if (len < cpumask_size())
  4093. cpumask_clear(new_mask);
  4094. else if (len > cpumask_size())
  4095. len = cpumask_size();
  4096. return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0;
  4097. }
  4098. /**
  4099. * sys_sched_setaffinity - set the cpu affinity of a process
  4100. * @pid: pid of the process
  4101. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  4102. * @user_mask_ptr: user-space pointer to the new cpu mask
  4103. */
  4104. SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len,
  4105. unsigned long __user *, user_mask_ptr)
  4106. {
  4107. cpumask_var_t new_mask;
  4108. int retval;
  4109. if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
  4110. return -ENOMEM;
  4111. retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
  4112. if (retval == 0)
  4113. retval = sched_setaffinity(pid, new_mask);
  4114. free_cpumask_var(new_mask);
  4115. return retval;
  4116. }
  4117. long sched_getaffinity(pid_t pid, struct cpumask *mask)
  4118. {
  4119. struct task_struct *p;
  4120. unsigned long flags;
  4121. struct rq *rq;
  4122. int retval;
  4123. get_online_cpus();
  4124. rcu_read_lock();
  4125. retval = -ESRCH;
  4126. p = find_process_by_pid(pid);
  4127. if (!p)
  4128. goto out_unlock;
  4129. retval = security_task_getscheduler(p);
  4130. if (retval)
  4131. goto out_unlock;
  4132. rq = task_rq_lock(p, &flags);
  4133. cpumask_and(mask, &p->cpus_allowed, cpu_online_mask);
  4134. task_rq_unlock(rq, &flags);
  4135. out_unlock:
  4136. rcu_read_unlock();
  4137. put_online_cpus();
  4138. return retval;
  4139. }
  4140. /**
  4141. * sys_sched_getaffinity - get the cpu affinity of a process
  4142. * @pid: pid of the process
  4143. * @len: length in bytes of the bitmask pointed to by user_mask_ptr
  4144. * @user_mask_ptr: user-space pointer to hold the current cpu mask
  4145. */
  4146. SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
  4147. unsigned long __user *, user_mask_ptr)
  4148. {
  4149. int ret;
  4150. cpumask_var_t mask;
  4151. if (len < cpumask_size())
  4152. return -EINVAL;
  4153. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  4154. return -ENOMEM;
  4155. ret = sched_getaffinity(pid, mask);
  4156. if (ret == 0) {
  4157. if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
  4158. ret = -EFAULT;
  4159. else
  4160. ret = cpumask_size();
  4161. }
  4162. free_cpumask_var(mask);
  4163. return ret;
  4164. }
  4165. /**
  4166. * sys_sched_yield - yield the current processor to other threads.
  4167. *
  4168. * This function yields the current CPU to other tasks. If there are no
  4169. * other threads running on this CPU then this function will return.
  4170. */
  4171. SYSCALL_DEFINE0(sched_yield)
  4172. {
  4173. struct rq *rq = this_rq_lock();
  4174. schedstat_inc(rq, yld_count);
  4175. current->sched_class->yield_task(rq);
  4176. /*
  4177. * Since we are going to call schedule() anyway, there's
  4178. * no need to preempt or enable interrupts:
  4179. */
  4180. __release(rq->lock);
  4181. spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
  4182. do_raw_spin_unlock(&rq->lock);
  4183. preempt_enable_no_resched();
  4184. schedule();
  4185. return 0;
  4186. }
  4187. static inline int should_resched(void)
  4188. {
  4189. return need_resched() && !(preempt_count() & PREEMPT_ACTIVE);
  4190. }
  4191. static void __cond_resched(void)
  4192. {
  4193. add_preempt_count(PREEMPT_ACTIVE);
  4194. schedule();
  4195. sub_preempt_count(PREEMPT_ACTIVE);
  4196. }
  4197. int __sched _cond_resched(void)
  4198. {
  4199. if (should_resched()) {
  4200. __cond_resched();
  4201. return 1;
  4202. }
  4203. return 0;
  4204. }
  4205. EXPORT_SYMBOL(_cond_resched);
  4206. /*
  4207. * __cond_resched_lock() - if a reschedule is pending, drop the given lock,
  4208. * call schedule, and on return reacquire the lock.
  4209. *
  4210. * This works OK both with and without CONFIG_PREEMPT. We do strange low-level
  4211. * operations here to prevent schedule() from being called twice (once via
  4212. * spin_unlock(), once by hand).
  4213. */
  4214. int __cond_resched_lock(spinlock_t *lock)
  4215. {
  4216. int resched = should_resched();
  4217. int ret = 0;
  4218. lockdep_assert_held(lock);
  4219. if (spin_needbreak(lock) || resched) {
  4220. spin_unlock(lock);
  4221. if (resched)
  4222. __cond_resched();
  4223. else
  4224. cpu_relax();
  4225. ret = 1;
  4226. spin_lock(lock);
  4227. }
  4228. return ret;
  4229. }
  4230. EXPORT_SYMBOL(__cond_resched_lock);
  4231. int __sched __cond_resched_softirq(void)
  4232. {
  4233. BUG_ON(!in_softirq());
  4234. if (should_resched()) {
  4235. local_bh_enable();
  4236. __cond_resched();
  4237. local_bh_disable();
  4238. return 1;
  4239. }
  4240. return 0;
  4241. }
  4242. EXPORT_SYMBOL(__cond_resched_softirq);
  4243. /**
  4244. * yield - yield the current processor to other threads.
  4245. *
  4246. * This is a shortcut for kernel-space yielding - it marks the
  4247. * thread runnable and calls sys_sched_yield().
  4248. */
  4249. void __sched yield(void)
  4250. {
  4251. set_current_state(TASK_RUNNING);
  4252. sys_sched_yield();
  4253. }
  4254. EXPORT_SYMBOL(yield);
  4255. /*
  4256. * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  4257. * that process accounting knows that this is a task in IO wait state.
  4258. */
  4259. void __sched io_schedule(void)
  4260. {
  4261. struct rq *rq = raw_rq();
  4262. delayacct_blkio_start();
  4263. atomic_inc(&rq->nr_iowait);
  4264. current->in_iowait = 1;
  4265. schedule();
  4266. current->in_iowait = 0;
  4267. atomic_dec(&rq->nr_iowait);
  4268. delayacct_blkio_end();
  4269. }
  4270. EXPORT_SYMBOL(io_schedule);
  4271. long __sched io_schedule_timeout(long timeout)
  4272. {
  4273. struct rq *rq = raw_rq();
  4274. long ret;
  4275. delayacct_blkio_start();
  4276. atomic_inc(&rq->nr_iowait);
  4277. current->in_iowait = 1;
  4278. ret = schedule_timeout(timeout);
  4279. current->in_iowait = 0;
  4280. atomic_dec(&rq->nr_iowait);
  4281. delayacct_blkio_end();
  4282. return ret;
  4283. }
  4284. /**
  4285. * sys_sched_get_priority_max - return maximum RT priority.
  4286. * @policy: scheduling class.
  4287. *
  4288. * this syscall returns the maximum rt_priority that can be used
  4289. * by a given scheduling class.
  4290. */
  4291. SYSCALL_DEFINE1(sched_get_priority_max, int, policy)
  4292. {
  4293. int ret = -EINVAL;
  4294. switch (policy) {
  4295. case SCHED_FIFO:
  4296. case SCHED_RR:
  4297. ret = MAX_USER_RT_PRIO-1;
  4298. break;
  4299. case SCHED_NORMAL:
  4300. case SCHED_BATCH:
  4301. case SCHED_IDLE:
  4302. ret = 0;
  4303. break;
  4304. }
  4305. return ret;
  4306. }
  4307. /**
  4308. * sys_sched_get_priority_min - return minimum RT priority.
  4309. * @policy: scheduling class.
  4310. *
  4311. * this syscall returns the minimum rt_priority that can be used
  4312. * by a given scheduling class.
  4313. */
  4314. SYSCALL_DEFINE1(sched_get_priority_min, int, policy)
  4315. {
  4316. int ret = -EINVAL;
  4317. switch (policy) {
  4318. case SCHED_FIFO:
  4319. case SCHED_RR:
  4320. ret = 1;
  4321. break;
  4322. case SCHED_NORMAL:
  4323. case SCHED_BATCH:
  4324. case SCHED_IDLE:
  4325. ret = 0;
  4326. }
  4327. return ret;
  4328. }
  4329. /**
  4330. * sys_sched_rr_get_interval - return the default timeslice of a process.
  4331. * @pid: pid of the process.
  4332. * @interval: userspace pointer to the timeslice value.
  4333. *
  4334. * this syscall writes the default timeslice value of a given process
  4335. * into the user-space timespec buffer. A value of '0' means infinity.
  4336. */
  4337. SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid,
  4338. struct timespec __user *, interval)
  4339. {
  4340. struct task_struct *p;
  4341. unsigned int time_slice;
  4342. unsigned long flags;
  4343. struct rq *rq;
  4344. int retval;
  4345. struct timespec t;
  4346. if (pid < 0)
  4347. return -EINVAL;
  4348. retval = -ESRCH;
  4349. rcu_read_lock();
  4350. p = find_process_by_pid(pid);
  4351. if (!p)
  4352. goto out_unlock;
  4353. retval = security_task_getscheduler(p);
  4354. if (retval)
  4355. goto out_unlock;
  4356. rq = task_rq_lock(p, &flags);
  4357. time_slice = p->sched_class->get_rr_interval(rq, p);
  4358. task_rq_unlock(rq, &flags);
  4359. rcu_read_unlock();
  4360. jiffies_to_timespec(time_slice, &t);
  4361. retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
  4362. return retval;
  4363. out_unlock:
  4364. rcu_read_unlock();
  4365. return retval;
  4366. }
  4367. static const char stat_nam[] = TASK_STATE_TO_CHAR_STR;
  4368. void sched_show_task(struct task_struct *p)
  4369. {
  4370. unsigned long free = 0;
  4371. unsigned state;
  4372. state = p->state ? __ffs(p->state) + 1 : 0;
  4373. printk(KERN_INFO "%-13.13s %c", p->comm,
  4374. state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
  4375. #if BITS_PER_LONG == 32
  4376. if (state == TASK_RUNNING)
  4377. printk(KERN_CONT " running ");
  4378. else
  4379. printk(KERN_CONT " %08lx ", thread_saved_pc(p));
  4380. #else
  4381. if (state == TASK_RUNNING)
  4382. printk(KERN_CONT " running task ");
  4383. else
  4384. printk(KERN_CONT " %016lx ", thread_saved_pc(p));
  4385. #endif
  4386. #ifdef CONFIG_DEBUG_STACK_USAGE
  4387. free = stack_not_used(p);
  4388. #endif
  4389. printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free,
  4390. task_pid_nr(p), task_pid_nr(p->real_parent),
  4391. (unsigned long)task_thread_info(p)->flags);
  4392. show_stack(p, NULL);
  4393. }
  4394. void show_state_filter(unsigned long state_filter)
  4395. {
  4396. struct task_struct *g, *p;
  4397. #if BITS_PER_LONG == 32
  4398. printk(KERN_INFO
  4399. " task PC stack pid father\n");
  4400. #else
  4401. printk(KERN_INFO
  4402. " task PC stack pid father\n");
  4403. #endif
  4404. read_lock(&tasklist_lock);
  4405. do_each_thread(g, p) {
  4406. /*
  4407. * reset the NMI-timeout, listing all files on a slow
  4408. * console might take alot of time:
  4409. */
  4410. touch_nmi_watchdog();
  4411. if (!state_filter || (p->state & state_filter))
  4412. sched_show_task(p);
  4413. } while_each_thread(g, p);
  4414. touch_all_softlockup_watchdogs();
  4415. #ifdef CONFIG_SCHED_DEBUG
  4416. sysrq_sched_debug_show();
  4417. #endif
  4418. read_unlock(&tasklist_lock);
  4419. /*
  4420. * Only show locks if all tasks are dumped:
  4421. */
  4422. if (!state_filter)
  4423. debug_show_all_locks();
  4424. }
  4425. void __cpuinit init_idle_bootup_task(struct task_struct *idle)
  4426. {
  4427. idle->sched_class = &idle_sched_class;
  4428. }
  4429. /**
  4430. * init_idle - set up an idle thread for a given CPU
  4431. * @idle: task in question
  4432. * @cpu: cpu the idle task belongs to
  4433. *
  4434. * NOTE: this function does not set the idle thread's NEED_RESCHED
  4435. * flag, to make booting more robust.
  4436. */
  4437. void __cpuinit init_idle(struct task_struct *idle, int cpu)
  4438. {
  4439. struct rq *rq = cpu_rq(cpu);
  4440. unsigned long flags;
  4441. raw_spin_lock_irqsave(&rq->lock, flags);
  4442. __sched_fork(idle);
  4443. idle->state = TASK_RUNNING;
  4444. idle->se.exec_start = sched_clock();
  4445. cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
  4446. __set_task_cpu(idle, cpu);
  4447. rq->curr = rq->idle = idle;
  4448. #if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
  4449. idle->oncpu = 1;
  4450. #endif
  4451. raw_spin_unlock_irqrestore(&rq->lock, flags);
  4452. /* Set the preempt count _outside_ the spinlocks! */
  4453. #if defined(CONFIG_PREEMPT)
  4454. task_thread_info(idle)->preempt_count = (idle->lock_depth >= 0);
  4455. #else
  4456. task_thread_info(idle)->preempt_count = 0;
  4457. #endif
  4458. /*
  4459. * The idle tasks have their own, simple scheduling class:
  4460. */
  4461. idle->sched_class = &idle_sched_class;
  4462. ftrace_graph_init_task(idle);
  4463. }
  4464. /*
  4465. * In a system that switches off the HZ timer nohz_cpu_mask
  4466. * indicates which cpus entered this state. This is used
  4467. * in the rcu update to wait only for active cpus. For system
  4468. * which do not switch off the HZ timer nohz_cpu_mask should
  4469. * always be CPU_BITS_NONE.
  4470. */
  4471. cpumask_var_t nohz_cpu_mask;
  4472. /*
  4473. * Increase the granularity value when there are more CPUs,
  4474. * because with more CPUs the 'effective latency' as visible
  4475. * to users decreases. But the relationship is not linear,
  4476. * so pick a second-best guess by going with the log2 of the
  4477. * number of CPUs.
  4478. *
  4479. * This idea comes from the SD scheduler of Con Kolivas:
  4480. */
  4481. static int get_update_sysctl_factor(void)
  4482. {
  4483. unsigned int cpus = min_t(int, num_online_cpus(), 8);
  4484. unsigned int factor;
  4485. switch (sysctl_sched_tunable_scaling) {
  4486. case SCHED_TUNABLESCALING_NONE:
  4487. factor = 1;
  4488. break;
  4489. case SCHED_TUNABLESCALING_LINEAR:
  4490. factor = cpus;
  4491. break;
  4492. case SCHED_TUNABLESCALING_LOG:
  4493. default:
  4494. factor = 1 + ilog2(cpus);
  4495. break;
  4496. }
  4497. return factor;
  4498. }
  4499. static void update_sysctl(void)
  4500. {
  4501. unsigned int factor = get_update_sysctl_factor();
  4502. #define SET_SYSCTL(name) \
  4503. (sysctl_##name = (factor) * normalized_sysctl_##name)
  4504. SET_SYSCTL(sched_min_granularity);
  4505. SET_SYSCTL(sched_latency);
  4506. SET_SYSCTL(sched_wakeup_granularity);
  4507. SET_SYSCTL(sched_shares_ratelimit);
  4508. #undef SET_SYSCTL
  4509. }
  4510. static inline void sched_init_granularity(void)
  4511. {
  4512. update_sysctl();
  4513. }
  4514. #ifdef CONFIG_SMP
  4515. /*
  4516. * This is how migration works:
  4517. *
  4518. * 1) we queue a struct migration_req structure in the source CPU's
  4519. * runqueue and wake up that CPU's migration thread.
  4520. * 2) we down() the locked semaphore => thread blocks.
  4521. * 3) migration thread wakes up (implicitly it forces the migrated
  4522. * thread off the CPU)
  4523. * 4) it gets the migration request and checks whether the migrated
  4524. * task is still in the wrong runqueue.
  4525. * 5) if it's in the wrong runqueue then the migration thread removes
  4526. * it and puts it into the right queue.
  4527. * 6) migration thread up()s the semaphore.
  4528. * 7) we wake up and the migration is done.
  4529. */
  4530. /*
  4531. * Change a given task's CPU affinity. Migrate the thread to a
  4532. * proper CPU and schedule it away if the CPU it's executing on
  4533. * is removed from the allowed bitmask.
  4534. *
  4535. * NOTE: the caller must have a valid reference to the task, the
  4536. * task must not exit() & deallocate itself prematurely. The
  4537. * call is not atomic; no spinlocks may be held.
  4538. */
  4539. int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
  4540. {
  4541. struct migration_req req;
  4542. unsigned long flags;
  4543. struct rq *rq;
  4544. int ret = 0;
  4545. rq = task_rq_lock(p, &flags);
  4546. if (!cpumask_intersects(new_mask, cpu_active_mask)) {
  4547. ret = -EINVAL;
  4548. goto out;
  4549. }
  4550. if (unlikely((p->flags & PF_THREAD_BOUND) && p != current &&
  4551. !cpumask_equal(&p->cpus_allowed, new_mask))) {
  4552. ret = -EINVAL;
  4553. goto out;
  4554. }
  4555. if (p->sched_class->set_cpus_allowed)
  4556. p->sched_class->set_cpus_allowed(p, new_mask);
  4557. else {
  4558. cpumask_copy(&p->cpus_allowed, new_mask);
  4559. p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
  4560. }
  4561. /* Can the task run on the task's current CPU? If so, we're done */
  4562. if (cpumask_test_cpu(task_cpu(p), new_mask))
  4563. goto out;
  4564. if (migrate_task(p, cpumask_any_and(cpu_active_mask, new_mask), &req)) {
  4565. /* Need help from migration thread: drop lock and wait. */
  4566. struct task_struct *mt = rq->migration_thread;
  4567. get_task_struct(mt);
  4568. task_rq_unlock(rq, &flags);
  4569. wake_up_process(rq->migration_thread);
  4570. put_task_struct(mt);
  4571. wait_for_completion(&req.done);
  4572. tlb_migrate_finish(p->mm);
  4573. return 0;
  4574. }
  4575. out:
  4576. task_rq_unlock(rq, &flags);
  4577. return ret;
  4578. }
  4579. EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);
  4580. /*
  4581. * Move (not current) task off this cpu, onto dest cpu. We're doing
  4582. * this because either it can't run here any more (set_cpus_allowed()
  4583. * away from this CPU, or CPU going down), or because we're
  4584. * attempting to rebalance this task on exec (sched_exec).
  4585. *
  4586. * So we race with normal scheduler movements, but that's OK, as long
  4587. * as the task is no longer on this CPU.
  4588. *
  4589. * Returns non-zero if task was successfully migrated.
  4590. */
  4591. static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
  4592. {
  4593. struct rq *rq_dest, *rq_src;
  4594. int ret = 0;
  4595. if (unlikely(!cpu_active(dest_cpu)))
  4596. return ret;
  4597. rq_src = cpu_rq(src_cpu);
  4598. rq_dest = cpu_rq(dest_cpu);
  4599. double_rq_lock(rq_src, rq_dest);
  4600. /* Already moved. */
  4601. if (task_cpu(p) != src_cpu)
  4602. goto done;
  4603. /* Affinity changed (again). */
  4604. if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
  4605. goto fail;
  4606. /*
  4607. * If we're not on a rq, the next wake-up will ensure we're
  4608. * placed properly.
  4609. */
  4610. if (p->se.on_rq) {
  4611. deactivate_task(rq_src, p, 0);
  4612. set_task_cpu(p, dest_cpu);
  4613. activate_task(rq_dest, p, 0);
  4614. check_preempt_curr(rq_dest, p, 0);
  4615. }
  4616. done:
  4617. ret = 1;
  4618. fail:
  4619. double_rq_unlock(rq_src, rq_dest);
  4620. return ret;
  4621. }
  4622. #define RCU_MIGRATION_IDLE 0
  4623. #define RCU_MIGRATION_NEED_QS 1
  4624. #define RCU_MIGRATION_GOT_QS 2
  4625. #define RCU_MIGRATION_MUST_SYNC 3
  4626. /*
  4627. * migration_thread - this is a highprio system thread that performs
  4628. * thread migration by bumping thread off CPU then 'pushing' onto
  4629. * another runqueue.
  4630. */
  4631. static int migration_thread(void *data)
  4632. {
  4633. int badcpu;
  4634. int cpu = (long)data;
  4635. struct rq *rq;
  4636. rq = cpu_rq(cpu);
  4637. BUG_ON(rq->migration_thread != current);
  4638. set_current_state(TASK_INTERRUPTIBLE);
  4639. while (!kthread_should_stop()) {
  4640. struct migration_req *req;
  4641. struct list_head *head;
  4642. raw_spin_lock_irq(&rq->lock);
  4643. if (cpu_is_offline(cpu)) {
  4644. raw_spin_unlock_irq(&rq->lock);
  4645. break;
  4646. }
  4647. if (rq->active_balance) {
  4648. active_load_balance(rq, cpu);
  4649. rq->active_balance = 0;
  4650. }
  4651. head = &rq->migration_queue;
  4652. if (list_empty(head)) {
  4653. raw_spin_unlock_irq(&rq->lock);
  4654. schedule();
  4655. set_current_state(TASK_INTERRUPTIBLE);
  4656. continue;
  4657. }
  4658. req = list_entry(head->next, struct migration_req, list);
  4659. list_del_init(head->next);
  4660. if (req->task != NULL) {
  4661. raw_spin_unlock(&rq->lock);
  4662. __migrate_task(req->task, cpu, req->dest_cpu);
  4663. } else if (likely(cpu == (badcpu = smp_processor_id()))) {
  4664. req->dest_cpu = RCU_MIGRATION_GOT_QS;
  4665. raw_spin_unlock(&rq->lock);
  4666. } else {
  4667. req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
  4668. raw_spin_unlock(&rq->lock);
  4669. WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
  4670. }
  4671. local_irq_enable();
  4672. complete(&req->done);
  4673. }
  4674. __set_current_state(TASK_RUNNING);
  4675. return 0;
  4676. }
  4677. #ifdef CONFIG_HOTPLUG_CPU
  4678. static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu)
  4679. {
  4680. int ret;
  4681. local_irq_disable();
  4682. ret = __migrate_task(p, src_cpu, dest_cpu);
  4683. local_irq_enable();
  4684. return ret;
  4685. }
  4686. /*
  4687. * Figure out where task on dead CPU should go, use force if necessary.
  4688. */
  4689. static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p)
  4690. {
  4691. int dest_cpu;
  4692. again:
  4693. dest_cpu = select_fallback_rq(dead_cpu, p);
  4694. /* It can have affinity changed while we were choosing. */
  4695. if (unlikely(!__migrate_task_irq(p, dead_cpu, dest_cpu)))
  4696. goto again;
  4697. }
  4698. /*
  4699. * While a dead CPU has no uninterruptible tasks queued at this point,
  4700. * it might still have a nonzero ->nr_uninterruptible counter, because
  4701. * for performance reasons the counter is not stricly tracking tasks to
  4702. * their home CPUs. So we just add the counter to another CPU's counter,
  4703. * to keep the global sum constant after CPU-down:
  4704. */
  4705. static void migrate_nr_uninterruptible(struct rq *rq_src)
  4706. {
  4707. struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
  4708. unsigned long flags;
  4709. local_irq_save(flags);
  4710. double_rq_lock(rq_src, rq_dest);
  4711. rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
  4712. rq_src->nr_uninterruptible = 0;
  4713. double_rq_unlock(rq_src, rq_dest);
  4714. local_irq_restore(flags);
  4715. }
  4716. /* Run through task list and migrate tasks from the dead cpu. */
  4717. static void migrate_live_tasks(int src_cpu)
  4718. {
  4719. struct task_struct *p, *t;
  4720. read_lock(&tasklist_lock);
  4721. do_each_thread(t, p) {
  4722. if (p == current)
  4723. continue;
  4724. if (task_cpu(p) == src_cpu)
  4725. move_task_off_dead_cpu(src_cpu, p);
  4726. } while_each_thread(t, p);
  4727. read_unlock(&tasklist_lock);
  4728. }
  4729. /*
  4730. * Schedules idle task to be the next runnable task on current CPU.
  4731. * It does so by boosting its priority to highest possible.
  4732. * Used by CPU offline code.
  4733. */
  4734. void sched_idle_next(void)
  4735. {
  4736. int this_cpu = smp_processor_id();
  4737. struct rq *rq = cpu_rq(this_cpu);
  4738. struct task_struct *p = rq->idle;
  4739. unsigned long flags;
  4740. /* cpu has to be offline */
  4741. BUG_ON(cpu_online(this_cpu));
  4742. /*
  4743. * Strictly not necessary since rest of the CPUs are stopped by now
  4744. * and interrupts disabled on the current cpu.
  4745. */
  4746. raw_spin_lock_irqsave(&rq->lock, flags);
  4747. __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
  4748. update_rq_clock(rq);
  4749. activate_task(rq, p, 0);
  4750. raw_spin_unlock_irqrestore(&rq->lock, flags);
  4751. }
  4752. /*
  4753. * Ensures that the idle task is using init_mm right before its cpu goes
  4754. * offline.
  4755. */
  4756. void idle_task_exit(void)
  4757. {
  4758. struct mm_struct *mm = current->active_mm;
  4759. BUG_ON(cpu_online(smp_processor_id()));
  4760. if (mm != &init_mm)
  4761. switch_mm(mm, &init_mm, current);
  4762. mmdrop(mm);
  4763. }
  4764. /* called under rq->lock with disabled interrupts */
  4765. static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
  4766. {
  4767. struct rq *rq = cpu_rq(dead_cpu);
  4768. /* Must be exiting, otherwise would be on tasklist. */
  4769. BUG_ON(!p->exit_state);
  4770. /* Cannot have done final schedule yet: would have vanished. */
  4771. BUG_ON(p->state == TASK_DEAD);
  4772. get_task_struct(p);
  4773. /*
  4774. * Drop lock around migration; if someone else moves it,
  4775. * that's OK. No task can be added to this CPU, so iteration is
  4776. * fine.
  4777. */
  4778. raw_spin_unlock_irq(&rq->lock);
  4779. move_task_off_dead_cpu(dead_cpu, p);
  4780. raw_spin_lock_irq(&rq->lock);
  4781. put_task_struct(p);
  4782. }
  4783. /* release_task() removes task from tasklist, so we won't find dead tasks. */
  4784. static void migrate_dead_tasks(unsigned int dead_cpu)
  4785. {
  4786. struct rq *rq = cpu_rq(dead_cpu);
  4787. struct task_struct *next;
  4788. for ( ; ; ) {
  4789. if (!rq->nr_running)
  4790. break;
  4791. update_rq_clock(rq);
  4792. next = pick_next_task(rq);
  4793. if (!next)
  4794. break;
  4795. next->sched_class->put_prev_task(rq, next);
  4796. migrate_dead(dead_cpu, next);
  4797. }
  4798. }
  4799. /*
  4800. * remove the tasks which were accounted by rq from calc_load_tasks.
  4801. */
  4802. static void calc_global_load_remove(struct rq *rq)
  4803. {
  4804. atomic_long_sub(rq->calc_load_active, &calc_load_tasks);
  4805. rq->calc_load_active = 0;
  4806. }
  4807. #endif /* CONFIG_HOTPLUG_CPU */
  4808. #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
  4809. static struct ctl_table sd_ctl_dir[] = {
  4810. {
  4811. .procname = "sched_domain",
  4812. .mode = 0555,
  4813. },
  4814. {}
  4815. };
  4816. static struct ctl_table sd_ctl_root[] = {
  4817. {
  4818. .procname = "kernel",
  4819. .mode = 0555,
  4820. .child = sd_ctl_dir,
  4821. },
  4822. {}
  4823. };
  4824. static struct ctl_table *sd_alloc_ctl_entry(int n)
  4825. {
  4826. struct ctl_table *entry =
  4827. kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
  4828. return entry;
  4829. }
  4830. static void sd_free_ctl_entry(struct ctl_table **tablep)
  4831. {
  4832. struct ctl_table *entry;
  4833. /*
  4834. * In the intermediate directories, both the child directory and
  4835. * procname are dynamically allocated and could fail but the mode
  4836. * will always be set. In the lowest directory the names are
  4837. * static strings and all have proc handlers.
  4838. */
  4839. for (entry = *tablep; entry->mode; entry++) {
  4840. if (entry->child)
  4841. sd_free_ctl_entry(&entry->child);
  4842. if (entry->proc_handler == NULL)
  4843. kfree(entry->procname);
  4844. }
  4845. kfree(*tablep);
  4846. *tablep = NULL;
  4847. }
  4848. static void
  4849. set_table_entry(struct ctl_table *entry,
  4850. const char *procname, void *data, int maxlen,
  4851. mode_t mode, proc_handler *proc_handler)
  4852. {
  4853. entry->procname = procname;
  4854. entry->data = data;
  4855. entry->maxlen = maxlen;
  4856. entry->mode = mode;
  4857. entry->proc_handler = proc_handler;
  4858. }
  4859. static struct ctl_table *
  4860. sd_alloc_ctl_domain_table(struct sched_domain *sd)
  4861. {
  4862. struct ctl_table *table = sd_alloc_ctl_entry(13);
  4863. if (table == NULL)
  4864. return NULL;
  4865. set_table_entry(&table[0], "min_interval", &sd->min_interval,
  4866. sizeof(long), 0644, proc_doulongvec_minmax);
  4867. set_table_entry(&table[1], "max_interval", &sd->max_interval,
  4868. sizeof(long), 0644, proc_doulongvec_minmax);
  4869. set_table_entry(&table[2], "busy_idx", &sd->busy_idx,
  4870. sizeof(int), 0644, proc_dointvec_minmax);
  4871. set_table_entry(&table[3], "idle_idx", &sd->idle_idx,
  4872. sizeof(int), 0644, proc_dointvec_minmax);
  4873. set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx,
  4874. sizeof(int), 0644, proc_dointvec_minmax);
  4875. set_table_entry(&table[5], "wake_idx", &sd->wake_idx,
  4876. sizeof(int), 0644, proc_dointvec_minmax);
  4877. set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx,
  4878. sizeof(int), 0644, proc_dointvec_minmax);
  4879. set_table_entry(&table[7], "busy_factor", &sd->busy_factor,
  4880. sizeof(int), 0644, proc_dointvec_minmax);
  4881. set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct,
  4882. sizeof(int), 0644, proc_dointvec_minmax);
  4883. set_table_entry(&table[9], "cache_nice_tries",
  4884. &sd->cache_nice_tries,
  4885. sizeof(int), 0644, proc_dointvec_minmax);
  4886. set_table_entry(&table[10], "flags", &sd->flags,
  4887. sizeof(int), 0644, proc_dointvec_minmax);
  4888. set_table_entry(&table[11], "name", sd->name,
  4889. CORENAME_MAX_SIZE, 0444, proc_dostring);
  4890. /* &table[12] is terminator */
  4891. return table;
  4892. }
  4893. static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
  4894. {
  4895. struct ctl_table *entry, *table;
  4896. struct sched_domain *sd;
  4897. int domain_num = 0, i;
  4898. char buf[32];
  4899. for_each_domain(cpu, sd)
  4900. domain_num++;
  4901. entry = table = sd_alloc_ctl_entry(domain_num + 1);
  4902. if (table == NULL)
  4903. return NULL;
  4904. i = 0;
  4905. for_each_domain(cpu, sd) {
  4906. snprintf(buf, 32, "domain%d", i);
  4907. entry->procname = kstrdup(buf, GFP_KERNEL);
  4908. entry->mode = 0555;
  4909. entry->child = sd_alloc_ctl_domain_table(sd);
  4910. entry++;
  4911. i++;
  4912. }
  4913. return table;
  4914. }
  4915. static struct ctl_table_header *sd_sysctl_header;
  4916. static void register_sched_domain_sysctl(void)
  4917. {
  4918. int i, cpu_num = num_possible_cpus();
  4919. struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
  4920. char buf[32];
  4921. WARN_ON(sd_ctl_dir[0].child);
  4922. sd_ctl_dir[0].child = entry;
  4923. if (entry == NULL)
  4924. return;
  4925. for_each_possible_cpu(i) {
  4926. snprintf(buf, 32, "cpu%d", i);
  4927. entry->procname = kstrdup(buf, GFP_KERNEL);
  4928. entry->mode = 0555;
  4929. entry->child = sd_alloc_ctl_cpu_table(i);
  4930. entry++;
  4931. }
  4932. WARN_ON(sd_sysctl_header);
  4933. sd_sysctl_header = register_sysctl_table(sd_ctl_root);
  4934. }
  4935. /* may be called multiple times per register */
  4936. static void unregister_sched_domain_sysctl(void)
  4937. {
  4938. if (sd_sysctl_header)
  4939. unregister_sysctl_table(sd_sysctl_header);
  4940. sd_sysctl_header = NULL;
  4941. if (sd_ctl_dir[0].child)
  4942. sd_free_ctl_entry(&sd_ctl_dir[0].child);
  4943. }
  4944. #else
  4945. static void register_sched_domain_sysctl(void)
  4946. {
  4947. }
  4948. static void unregister_sched_domain_sysctl(void)
  4949. {
  4950. }
  4951. #endif
  4952. static void set_rq_online(struct rq *rq)
  4953. {
  4954. if (!rq->online) {
  4955. const struct sched_class *class;
  4956. cpumask_set_cpu(rq->cpu, rq->rd->online);
  4957. rq->online = 1;
  4958. for_each_class(class) {
  4959. if (class->rq_online)
  4960. class->rq_online(rq);
  4961. }
  4962. }
  4963. }
  4964. static void set_rq_offline(struct rq *rq)
  4965. {
  4966. if (rq->online) {
  4967. const struct sched_class *class;
  4968. for_each_class(class) {
  4969. if (class->rq_offline)
  4970. class->rq_offline(rq);
  4971. }
  4972. cpumask_clear_cpu(rq->cpu, rq->rd->online);
  4973. rq->online = 0;
  4974. }
  4975. }
  4976. /*
  4977. * migration_call - callback that gets triggered when a CPU is added.
  4978. * Here we can start up the necessary migration thread for the new CPU.
  4979. */
  4980. static int __cpuinit
  4981. migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
  4982. {
  4983. struct task_struct *p;
  4984. int cpu = (long)hcpu;
  4985. unsigned long flags;
  4986. struct rq *rq;
  4987. switch (action) {
  4988. case CPU_UP_PREPARE:
  4989. case CPU_UP_PREPARE_FROZEN:
  4990. p = kthread_create(migration_thread, hcpu, "migration/%d", cpu);
  4991. if (IS_ERR(p))
  4992. return NOTIFY_BAD;
  4993. kthread_bind(p, cpu);
  4994. /* Must be high prio: stop_machine expects to yield to it. */
  4995. rq = task_rq_lock(p, &flags);
  4996. __setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
  4997. task_rq_unlock(rq, &flags);
  4998. get_task_struct(p);
  4999. cpu_rq(cpu)->migration_thread = p;
  5000. rq->calc_load_update = calc_load_update;
  5001. break;
  5002. case CPU_ONLINE:
  5003. case CPU_ONLINE_FROZEN:
  5004. /* Strictly unnecessary, as first user will wake it. */
  5005. wake_up_process(cpu_rq(cpu)->migration_thread);
  5006. /* Update our root-domain */
  5007. rq = cpu_rq(cpu);
  5008. raw_spin_lock_irqsave(&rq->lock, flags);
  5009. if (rq->rd) {
  5010. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  5011. set_rq_online(rq);
  5012. }
  5013. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5014. break;
  5015. #ifdef CONFIG_HOTPLUG_CPU
  5016. case CPU_UP_CANCELED:
  5017. case CPU_UP_CANCELED_FROZEN:
  5018. if (!cpu_rq(cpu)->migration_thread)
  5019. break;
  5020. /* Unbind it from offline cpu so it can run. Fall thru. */
  5021. kthread_bind(cpu_rq(cpu)->migration_thread,
  5022. cpumask_any(cpu_online_mask));
  5023. kthread_stop(cpu_rq(cpu)->migration_thread);
  5024. put_task_struct(cpu_rq(cpu)->migration_thread);
  5025. cpu_rq(cpu)->migration_thread = NULL;
  5026. break;
  5027. case CPU_DEAD:
  5028. case CPU_DEAD_FROZEN:
  5029. cpuset_lock(); /* around calls to cpuset_cpus_allowed_lock() */
  5030. migrate_live_tasks(cpu);
  5031. rq = cpu_rq(cpu);
  5032. kthread_stop(rq->migration_thread);
  5033. put_task_struct(rq->migration_thread);
  5034. rq->migration_thread = NULL;
  5035. /* Idle task back to normal (off runqueue, low prio) */
  5036. raw_spin_lock_irq(&rq->lock);
  5037. update_rq_clock(rq);
  5038. deactivate_task(rq, rq->idle, 0);
  5039. __setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
  5040. rq->idle->sched_class = &idle_sched_class;
  5041. migrate_dead_tasks(cpu);
  5042. raw_spin_unlock_irq(&rq->lock);
  5043. cpuset_unlock();
  5044. migrate_nr_uninterruptible(rq);
  5045. BUG_ON(rq->nr_running != 0);
  5046. calc_global_load_remove(rq);
  5047. /*
  5048. * No need to migrate the tasks: it was best-effort if
  5049. * they didn't take sched_hotcpu_mutex. Just wake up
  5050. * the requestors.
  5051. */
  5052. raw_spin_lock_irq(&rq->lock);
  5053. while (!list_empty(&rq->migration_queue)) {
  5054. struct migration_req *req;
  5055. req = list_entry(rq->migration_queue.next,
  5056. struct migration_req, list);
  5057. list_del_init(&req->list);
  5058. raw_spin_unlock_irq(&rq->lock);
  5059. complete(&req->done);
  5060. raw_spin_lock_irq(&rq->lock);
  5061. }
  5062. raw_spin_unlock_irq(&rq->lock);
  5063. break;
  5064. case CPU_DYING:
  5065. case CPU_DYING_FROZEN:
  5066. /* Update our root-domain */
  5067. rq = cpu_rq(cpu);
  5068. raw_spin_lock_irqsave(&rq->lock, flags);
  5069. if (rq->rd) {
  5070. BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
  5071. set_rq_offline(rq);
  5072. }
  5073. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5074. break;
  5075. #endif
  5076. }
  5077. return NOTIFY_OK;
  5078. }
  5079. /*
  5080. * Register at high priority so that task migration (migrate_all_tasks)
  5081. * happens before everything else. This has to be lower priority than
  5082. * the notifier in the perf_event subsystem, though.
  5083. */
  5084. static struct notifier_block __cpuinitdata migration_notifier = {
  5085. .notifier_call = migration_call,
  5086. .priority = 10
  5087. };
  5088. static int __init migration_init(void)
  5089. {
  5090. void *cpu = (void *)(long)smp_processor_id();
  5091. int err;
  5092. /* Start one for the boot CPU: */
  5093. err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu);
  5094. BUG_ON(err == NOTIFY_BAD);
  5095. migration_call(&migration_notifier, CPU_ONLINE, cpu);
  5096. register_cpu_notifier(&migration_notifier);
  5097. return 0;
  5098. }
  5099. early_initcall(migration_init);
  5100. #endif
  5101. #ifdef CONFIG_SMP
  5102. #ifdef CONFIG_SCHED_DEBUG
  5103. static __read_mostly int sched_domain_debug_enabled;
  5104. static int __init sched_domain_debug_setup(char *str)
  5105. {
  5106. sched_domain_debug_enabled = 1;
  5107. return 0;
  5108. }
  5109. early_param("sched_debug", sched_domain_debug_setup);
  5110. static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
  5111. struct cpumask *groupmask)
  5112. {
  5113. struct sched_group *group = sd->groups;
  5114. char str[256];
  5115. cpulist_scnprintf(str, sizeof(str), sched_domain_span(sd));
  5116. cpumask_clear(groupmask);
  5117. printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
  5118. if (!(sd->flags & SD_LOAD_BALANCE)) {
  5119. printk("does not load-balance\n");
  5120. if (sd->parent)
  5121. printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain"
  5122. " has parent");
  5123. return -1;
  5124. }
  5125. printk(KERN_CONT "span %s level %s\n", str, sd->name);
  5126. if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
  5127. printk(KERN_ERR "ERROR: domain->span does not contain "
  5128. "CPU%d\n", cpu);
  5129. }
  5130. if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
  5131. printk(KERN_ERR "ERROR: domain->groups does not contain"
  5132. " CPU%d\n", cpu);
  5133. }
  5134. printk(KERN_DEBUG "%*s groups:", level + 1, "");
  5135. do {
  5136. if (!group) {
  5137. printk("\n");
  5138. printk(KERN_ERR "ERROR: group is NULL\n");
  5139. break;
  5140. }
  5141. if (!group->cpu_power) {
  5142. printk(KERN_CONT "\n");
  5143. printk(KERN_ERR "ERROR: domain->cpu_power not "
  5144. "set\n");
  5145. break;
  5146. }
  5147. if (!cpumask_weight(sched_group_cpus(group))) {
  5148. printk(KERN_CONT "\n");
  5149. printk(KERN_ERR "ERROR: empty group\n");
  5150. break;
  5151. }
  5152. if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
  5153. printk(KERN_CONT "\n");
  5154. printk(KERN_ERR "ERROR: repeated CPUs\n");
  5155. break;
  5156. }
  5157. cpumask_or(groupmask, groupmask, sched_group_cpus(group));
  5158. cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group));
  5159. printk(KERN_CONT " %s", str);
  5160. if (group->cpu_power != SCHED_LOAD_SCALE) {
  5161. printk(KERN_CONT " (cpu_power = %d)",
  5162. group->cpu_power);
  5163. }
  5164. group = group->next;
  5165. } while (group != sd->groups);
  5166. printk(KERN_CONT "\n");
  5167. if (!cpumask_equal(sched_domain_span(sd), groupmask))
  5168. printk(KERN_ERR "ERROR: groups don't span domain->span\n");
  5169. if (sd->parent &&
  5170. !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
  5171. printk(KERN_ERR "ERROR: parent span is not a superset "
  5172. "of domain->span\n");
  5173. return 0;
  5174. }
  5175. static void sched_domain_debug(struct sched_domain *sd, int cpu)
  5176. {
  5177. cpumask_var_t groupmask;
  5178. int level = 0;
  5179. if (!sched_domain_debug_enabled)
  5180. return;
  5181. if (!sd) {
  5182. printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu);
  5183. return;
  5184. }
  5185. printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu);
  5186. if (!alloc_cpumask_var(&groupmask, GFP_KERNEL)) {
  5187. printk(KERN_DEBUG "Cannot load-balance (out of memory)\n");
  5188. return;
  5189. }
  5190. for (;;) {
  5191. if (sched_domain_debug_one(sd, cpu, level, groupmask))
  5192. break;
  5193. level++;
  5194. sd = sd->parent;
  5195. if (!sd)
  5196. break;
  5197. }
  5198. free_cpumask_var(groupmask);
  5199. }
  5200. #else /* !CONFIG_SCHED_DEBUG */
  5201. # define sched_domain_debug(sd, cpu) do { } while (0)
  5202. #endif /* CONFIG_SCHED_DEBUG */
  5203. static int sd_degenerate(struct sched_domain *sd)
  5204. {
  5205. if (cpumask_weight(sched_domain_span(sd)) == 1)
  5206. return 1;
  5207. /* Following flags need at least 2 groups */
  5208. if (sd->flags & (SD_LOAD_BALANCE |
  5209. SD_BALANCE_NEWIDLE |
  5210. SD_BALANCE_FORK |
  5211. SD_BALANCE_EXEC |
  5212. SD_SHARE_CPUPOWER |
  5213. SD_SHARE_PKG_RESOURCES)) {
  5214. if (sd->groups != sd->groups->next)
  5215. return 0;
  5216. }
  5217. /* Following flags don't use groups */
  5218. if (sd->flags & (SD_WAKE_AFFINE))
  5219. return 0;
  5220. return 1;
  5221. }
  5222. static int
  5223. sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
  5224. {
  5225. unsigned long cflags = sd->flags, pflags = parent->flags;
  5226. if (sd_degenerate(parent))
  5227. return 1;
  5228. if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
  5229. return 0;
  5230. /* Flags needing groups don't count if only 1 group in parent */
  5231. if (parent->groups == parent->groups->next) {
  5232. pflags &= ~(SD_LOAD_BALANCE |
  5233. SD_BALANCE_NEWIDLE |
  5234. SD_BALANCE_FORK |
  5235. SD_BALANCE_EXEC |
  5236. SD_SHARE_CPUPOWER |
  5237. SD_SHARE_PKG_RESOURCES);
  5238. if (nr_node_ids == 1)
  5239. pflags &= ~SD_SERIALIZE;
  5240. }
  5241. if (~cflags & pflags)
  5242. return 0;
  5243. return 1;
  5244. }
  5245. static void free_rootdomain(struct root_domain *rd)
  5246. {
  5247. synchronize_sched();
  5248. cpupri_cleanup(&rd->cpupri);
  5249. free_cpumask_var(rd->rto_mask);
  5250. free_cpumask_var(rd->online);
  5251. free_cpumask_var(rd->span);
  5252. kfree(rd);
  5253. }
  5254. static void rq_attach_root(struct rq *rq, struct root_domain *rd)
  5255. {
  5256. struct root_domain *old_rd = NULL;
  5257. unsigned long flags;
  5258. raw_spin_lock_irqsave(&rq->lock, flags);
  5259. if (rq->rd) {
  5260. old_rd = rq->rd;
  5261. if (cpumask_test_cpu(rq->cpu, old_rd->online))
  5262. set_rq_offline(rq);
  5263. cpumask_clear_cpu(rq->cpu, old_rd->span);
  5264. /*
  5265. * If we dont want to free the old_rt yet then
  5266. * set old_rd to NULL to skip the freeing later
  5267. * in this function:
  5268. */
  5269. if (!atomic_dec_and_test(&old_rd->refcount))
  5270. old_rd = NULL;
  5271. }
  5272. atomic_inc(&rd->refcount);
  5273. rq->rd = rd;
  5274. cpumask_set_cpu(rq->cpu, rd->span);
  5275. if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
  5276. set_rq_online(rq);
  5277. raw_spin_unlock_irqrestore(&rq->lock, flags);
  5278. if (old_rd)
  5279. free_rootdomain(old_rd);
  5280. }
  5281. static int init_rootdomain(struct root_domain *rd, bool bootmem)
  5282. {
  5283. gfp_t gfp = GFP_KERNEL;
  5284. memset(rd, 0, sizeof(*rd));
  5285. if (bootmem)
  5286. gfp = GFP_NOWAIT;
  5287. if (!alloc_cpumask_var(&rd->span, gfp))
  5288. goto out;
  5289. if (!alloc_cpumask_var(&rd->online, gfp))
  5290. goto free_span;
  5291. if (!alloc_cpumask_var(&rd->rto_mask, gfp))
  5292. goto free_online;
  5293. if (cpupri_init(&rd->cpupri, bootmem) != 0)
  5294. goto free_rto_mask;
  5295. return 0;
  5296. free_rto_mask:
  5297. free_cpumask_var(rd->rto_mask);
  5298. free_online:
  5299. free_cpumask_var(rd->online);
  5300. free_span:
  5301. free_cpumask_var(rd->span);
  5302. out:
  5303. return -ENOMEM;
  5304. }
  5305. static void init_defrootdomain(void)
  5306. {
  5307. init_rootdomain(&def_root_domain, true);
  5308. atomic_set(&def_root_domain.refcount, 1);
  5309. }
  5310. static struct root_domain *alloc_rootdomain(void)
  5311. {
  5312. struct root_domain *rd;
  5313. rd = kmalloc(sizeof(*rd), GFP_KERNEL);
  5314. if (!rd)
  5315. return NULL;
  5316. if (init_rootdomain(rd, false) != 0) {
  5317. kfree(rd);
  5318. return NULL;
  5319. }
  5320. return rd;
  5321. }
  5322. /*
  5323. * Attach the domain 'sd' to 'cpu' as its base domain. Callers must
  5324. * hold the hotplug lock.
  5325. */
  5326. static void
  5327. cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
  5328. {
  5329. struct rq *rq = cpu_rq(cpu);
  5330. struct sched_domain *tmp;
  5331. /* Remove the sched domains which do not contribute to scheduling. */
  5332. for (tmp = sd; tmp; ) {
  5333. struct sched_domain *parent = tmp->parent;
  5334. if (!parent)
  5335. break;
  5336. if (sd_parent_degenerate(tmp, parent)) {
  5337. tmp->parent = parent->parent;
  5338. if (parent->parent)
  5339. parent->parent->child = tmp;
  5340. } else
  5341. tmp = tmp->parent;
  5342. }
  5343. if (sd && sd_degenerate(sd)) {
  5344. sd = sd->parent;
  5345. if (sd)
  5346. sd->child = NULL;
  5347. }
  5348. sched_domain_debug(sd, cpu);
  5349. rq_attach_root(rq, rd);
  5350. rcu_assign_pointer(rq->sd, sd);
  5351. }
  5352. /* cpus with isolated domains */
  5353. static cpumask_var_t cpu_isolated_map;
  5354. /* Setup the mask of cpus configured for isolated domains */
  5355. static int __init isolated_cpu_setup(char *str)
  5356. {
  5357. alloc_bootmem_cpumask_var(&cpu_isolated_map);
  5358. cpulist_parse(str, cpu_isolated_map);
  5359. return 1;
  5360. }
  5361. __setup("isolcpus=", isolated_cpu_setup);
  5362. /*
  5363. * init_sched_build_groups takes the cpumask we wish to span, and a pointer
  5364. * to a function which identifies what group(along with sched group) a CPU
  5365. * belongs to. The return value of group_fn must be a >= 0 and < nr_cpu_ids
  5366. * (due to the fact that we keep track of groups covered with a struct cpumask).
  5367. *
  5368. * init_sched_build_groups will build a circular linked list of the groups
  5369. * covered by the given span, and will set each group's ->cpumask correctly,
  5370. * and ->cpu_power to 0.
  5371. */
  5372. static void
  5373. init_sched_build_groups(const struct cpumask *span,
  5374. const struct cpumask *cpu_map,
  5375. int (*group_fn)(int cpu, const struct cpumask *cpu_map,
  5376. struct sched_group **sg,
  5377. struct cpumask *tmpmask),
  5378. struct cpumask *covered, struct cpumask *tmpmask)
  5379. {
  5380. struct sched_group *first = NULL, *last = NULL;
  5381. int i;
  5382. cpumask_clear(covered);
  5383. for_each_cpu(i, span) {
  5384. struct sched_group *sg;
  5385. int group = group_fn(i, cpu_map, &sg, tmpmask);
  5386. int j;
  5387. if (cpumask_test_cpu(i, covered))
  5388. continue;
  5389. cpumask_clear(sched_group_cpus(sg));
  5390. sg->cpu_power = 0;
  5391. for_each_cpu(j, span) {
  5392. if (group_fn(j, cpu_map, NULL, tmpmask) != group)
  5393. continue;
  5394. cpumask_set_cpu(j, covered);
  5395. cpumask_set_cpu(j, sched_group_cpus(sg));
  5396. }
  5397. if (!first)
  5398. first = sg;
  5399. if (last)
  5400. last->next = sg;
  5401. last = sg;
  5402. }
  5403. last->next = first;
  5404. }
  5405. #define SD_NODES_PER_DOMAIN 16
  5406. #ifdef CONFIG_NUMA
  5407. /**
  5408. * find_next_best_node - find the next node to include in a sched_domain
  5409. * @node: node whose sched_domain we're building
  5410. * @used_nodes: nodes already in the sched_domain
  5411. *
  5412. * Find the next node to include in a given scheduling domain. Simply
  5413. * finds the closest node not already in the @used_nodes map.
  5414. *
  5415. * Should use nodemask_t.
  5416. */
  5417. static int find_next_best_node(int node, nodemask_t *used_nodes)
  5418. {
  5419. int i, n, val, min_val, best_node = 0;
  5420. min_val = INT_MAX;
  5421. for (i = 0; i < nr_node_ids; i++) {
  5422. /* Start at @node */
  5423. n = (node + i) % nr_node_ids;
  5424. if (!nr_cpus_node(n))
  5425. continue;
  5426. /* Skip already used nodes */
  5427. if (node_isset(n, *used_nodes))
  5428. continue;
  5429. /* Simple min distance search */
  5430. val = node_distance(node, n);
  5431. if (val < min_val) {
  5432. min_val = val;
  5433. best_node = n;
  5434. }
  5435. }
  5436. node_set(best_node, *used_nodes);
  5437. return best_node;
  5438. }
  5439. /**
  5440. * sched_domain_node_span - get a cpumask for a node's sched_domain
  5441. * @node: node whose cpumask we're constructing
  5442. * @span: resulting cpumask
  5443. *
  5444. * Given a node, construct a good cpumask for its sched_domain to span. It
  5445. * should be one that prevents unnecessary balancing, but also spreads tasks
  5446. * out optimally.
  5447. */
  5448. static void sched_domain_node_span(int node, struct cpumask *span)
  5449. {
  5450. nodemask_t used_nodes;
  5451. int i;
  5452. cpumask_clear(span);
  5453. nodes_clear(used_nodes);
  5454. cpumask_or(span, span, cpumask_of_node(node));
  5455. node_set(node, used_nodes);
  5456. for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
  5457. int next_node = find_next_best_node(node, &used_nodes);
  5458. cpumask_or(span, span, cpumask_of_node(next_node));
  5459. }
  5460. }
  5461. #endif /* CONFIG_NUMA */
  5462. int sched_smt_power_savings = 0, sched_mc_power_savings = 0;
  5463. /*
  5464. * The cpus mask in sched_group and sched_domain hangs off the end.
  5465. *
  5466. * ( See the the comments in include/linux/sched.h:struct sched_group
  5467. * and struct sched_domain. )
  5468. */
  5469. struct static_sched_group {
  5470. struct sched_group sg;
  5471. DECLARE_BITMAP(cpus, CONFIG_NR_CPUS);
  5472. };
  5473. struct static_sched_domain {
  5474. struct sched_domain sd;
  5475. DECLARE_BITMAP(span, CONFIG_NR_CPUS);
  5476. };
  5477. struct s_data {
  5478. #ifdef CONFIG_NUMA
  5479. int sd_allnodes;
  5480. cpumask_var_t domainspan;
  5481. cpumask_var_t covered;
  5482. cpumask_var_t notcovered;
  5483. #endif
  5484. cpumask_var_t nodemask;
  5485. cpumask_var_t this_sibling_map;
  5486. cpumask_var_t this_core_map;
  5487. cpumask_var_t send_covered;
  5488. cpumask_var_t tmpmask;
  5489. struct sched_group **sched_group_nodes;
  5490. struct root_domain *rd;
  5491. };
  5492. enum s_alloc {
  5493. sa_sched_groups = 0,
  5494. sa_rootdomain,
  5495. sa_tmpmask,
  5496. sa_send_covered,
  5497. sa_this_core_map,
  5498. sa_this_sibling_map,
  5499. sa_nodemask,
  5500. sa_sched_group_nodes,
  5501. #ifdef CONFIG_NUMA
  5502. sa_notcovered,
  5503. sa_covered,
  5504. sa_domainspan,
  5505. #endif
  5506. sa_none,
  5507. };
  5508. /*
  5509. * SMT sched-domains:
  5510. */
  5511. #ifdef CONFIG_SCHED_SMT
  5512. static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
  5513. static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
  5514. static int
  5515. cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
  5516. struct sched_group **sg, struct cpumask *unused)
  5517. {
  5518. if (sg)
  5519. *sg = &per_cpu(sched_groups, cpu).sg;
  5520. return cpu;
  5521. }
  5522. #endif /* CONFIG_SCHED_SMT */
  5523. /*
  5524. * multi-core sched-domains:
  5525. */
  5526. #ifdef CONFIG_SCHED_MC
  5527. static DEFINE_PER_CPU(struct static_sched_domain, core_domains);
  5528. static DEFINE_PER_CPU(struct static_sched_group, sched_group_core);
  5529. #endif /* CONFIG_SCHED_MC */
  5530. #if defined(CONFIG_SCHED_MC) && defined(CONFIG_SCHED_SMT)
  5531. static int
  5532. cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
  5533. struct sched_group **sg, struct cpumask *mask)
  5534. {
  5535. int group;
  5536. cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
  5537. group = cpumask_first(mask);
  5538. if (sg)
  5539. *sg = &per_cpu(sched_group_core, group).sg;
  5540. return group;
  5541. }
  5542. #elif defined(CONFIG_SCHED_MC)
  5543. static int
  5544. cpu_to_core_group(int cpu, const struct cpumask *cpu_map,
  5545. struct sched_group **sg, struct cpumask *unused)
  5546. {
  5547. if (sg)
  5548. *sg = &per_cpu(sched_group_core, cpu).sg;
  5549. return cpu;
  5550. }
  5551. #endif
  5552. static DEFINE_PER_CPU(struct static_sched_domain, phys_domains);
  5553. static DEFINE_PER_CPU(struct static_sched_group, sched_group_phys);
  5554. static int
  5555. cpu_to_phys_group(int cpu, const struct cpumask *cpu_map,
  5556. struct sched_group **sg, struct cpumask *mask)
  5557. {
  5558. int group;
  5559. #ifdef CONFIG_SCHED_MC
  5560. cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map);
  5561. group = cpumask_first(mask);
  5562. #elif defined(CONFIG_SCHED_SMT)
  5563. cpumask_and(mask, topology_thread_cpumask(cpu), cpu_map);
  5564. group = cpumask_first(mask);
  5565. #else
  5566. group = cpu;
  5567. #endif
  5568. if (sg)
  5569. *sg = &per_cpu(sched_group_phys, group).sg;
  5570. return group;
  5571. }
  5572. #ifdef CONFIG_NUMA
  5573. /*
  5574. * The init_sched_build_groups can't handle what we want to do with node
  5575. * groups, so roll our own. Now each node has its own list of groups which
  5576. * gets dynamically allocated.
  5577. */
  5578. static DEFINE_PER_CPU(struct static_sched_domain, node_domains);
  5579. static struct sched_group ***sched_group_nodes_bycpu;
  5580. static DEFINE_PER_CPU(struct static_sched_domain, allnodes_domains);
  5581. static DEFINE_PER_CPU(struct static_sched_group, sched_group_allnodes);
  5582. static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map,
  5583. struct sched_group **sg,
  5584. struct cpumask *nodemask)
  5585. {
  5586. int group;
  5587. cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map);
  5588. group = cpumask_first(nodemask);
  5589. if (sg)
  5590. *sg = &per_cpu(sched_group_allnodes, group).sg;
  5591. return group;
  5592. }
  5593. static void init_numa_sched_groups_power(struct sched_group *group_head)
  5594. {
  5595. struct sched_group *sg = group_head;
  5596. int j;
  5597. if (!sg)
  5598. return;
  5599. do {
  5600. for_each_cpu(j, sched_group_cpus(sg)) {
  5601. struct sched_domain *sd;
  5602. sd = &per_cpu(phys_domains, j).sd;
  5603. if (j != group_first_cpu(sd->groups)) {
  5604. /*
  5605. * Only add "power" once for each
  5606. * physical package.
  5607. */
  5608. continue;
  5609. }
  5610. sg->cpu_power += sd->groups->cpu_power;
  5611. }
  5612. sg = sg->next;
  5613. } while (sg != group_head);
  5614. }
  5615. static int build_numa_sched_groups(struct s_data *d,
  5616. const struct cpumask *cpu_map, int num)
  5617. {
  5618. struct sched_domain *sd;
  5619. struct sched_group *sg, *prev;
  5620. int n, j;
  5621. cpumask_clear(d->covered);
  5622. cpumask_and(d->nodemask, cpumask_of_node(num), cpu_map);
  5623. if (cpumask_empty(d->nodemask)) {
  5624. d->sched_group_nodes[num] = NULL;
  5625. goto out;
  5626. }
  5627. sched_domain_node_span(num, d->domainspan);
  5628. cpumask_and(d->domainspan, d->domainspan, cpu_map);
  5629. sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
  5630. GFP_KERNEL, num);
  5631. if (!sg) {
  5632. printk(KERN_WARNING "Can not alloc domain group for node %d\n",
  5633. num);
  5634. return -ENOMEM;
  5635. }
  5636. d->sched_group_nodes[num] = sg;
  5637. for_each_cpu(j, d->nodemask) {
  5638. sd = &per_cpu(node_domains, j).sd;
  5639. sd->groups = sg;
  5640. }
  5641. sg->cpu_power = 0;
  5642. cpumask_copy(sched_group_cpus(sg), d->nodemask);
  5643. sg->next = sg;
  5644. cpumask_or(d->covered, d->covered, d->nodemask);
  5645. prev = sg;
  5646. for (j = 0; j < nr_node_ids; j++) {
  5647. n = (num + j) % nr_node_ids;
  5648. cpumask_complement(d->notcovered, d->covered);
  5649. cpumask_and(d->tmpmask, d->notcovered, cpu_map);
  5650. cpumask_and(d->tmpmask, d->tmpmask, d->domainspan);
  5651. if (cpumask_empty(d->tmpmask))
  5652. break;
  5653. cpumask_and(d->tmpmask, d->tmpmask, cpumask_of_node(n));
  5654. if (cpumask_empty(d->tmpmask))
  5655. continue;
  5656. sg = kmalloc_node(sizeof(struct sched_group) + cpumask_size(),
  5657. GFP_KERNEL, num);
  5658. if (!sg) {
  5659. printk(KERN_WARNING
  5660. "Can not alloc domain group for node %d\n", j);
  5661. return -ENOMEM;
  5662. }
  5663. sg->cpu_power = 0;
  5664. cpumask_copy(sched_group_cpus(sg), d->tmpmask);
  5665. sg->next = prev->next;
  5666. cpumask_or(d->covered, d->covered, d->tmpmask);
  5667. prev->next = sg;
  5668. prev = sg;
  5669. }
  5670. out:
  5671. return 0;
  5672. }
  5673. #endif /* CONFIG_NUMA */
  5674. #ifdef CONFIG_NUMA
  5675. /* Free memory allocated for various sched_group structures */
  5676. static void free_sched_groups(const struct cpumask *cpu_map,
  5677. struct cpumask *nodemask)
  5678. {
  5679. int cpu, i;
  5680. for_each_cpu(cpu, cpu_map) {
  5681. struct sched_group **sched_group_nodes
  5682. = sched_group_nodes_bycpu[cpu];
  5683. if (!sched_group_nodes)
  5684. continue;
  5685. for (i = 0; i < nr_node_ids; i++) {
  5686. struct sched_group *oldsg, *sg = sched_group_nodes[i];
  5687. cpumask_and(nodemask, cpumask_of_node(i), cpu_map);
  5688. if (cpumask_empty(nodemask))
  5689. continue;
  5690. if (sg == NULL)
  5691. continue;
  5692. sg = sg->next;
  5693. next_sg:
  5694. oldsg = sg;
  5695. sg = sg->next;
  5696. kfree(oldsg);
  5697. if (oldsg != sched_group_nodes[i])
  5698. goto next_sg;
  5699. }
  5700. kfree(sched_group_nodes);
  5701. sched_group_nodes_bycpu[cpu] = NULL;
  5702. }
  5703. }
  5704. #else /* !CONFIG_NUMA */
  5705. static void free_sched_groups(const struct cpumask *cpu_map,
  5706. struct cpumask *nodemask)
  5707. {
  5708. }
  5709. #endif /* CONFIG_NUMA */
  5710. /*
  5711. * Initialize sched groups cpu_power.
  5712. *
  5713. * cpu_power indicates the capacity of sched group, which is used while
  5714. * distributing the load between different sched groups in a sched domain.
  5715. * Typically cpu_power for all the groups in a sched domain will be same unless
  5716. * there are asymmetries in the topology. If there are asymmetries, group
  5717. * having more cpu_power will pickup more load compared to the group having
  5718. * less cpu_power.
  5719. */
  5720. static void init_sched_groups_power(int cpu, struct sched_domain *sd)
  5721. {
  5722. struct sched_domain *child;
  5723. struct sched_group *group;
  5724. long power;
  5725. int weight;
  5726. WARN_ON(!sd || !sd->groups);
  5727. if (cpu != group_first_cpu(sd->groups))
  5728. return;
  5729. child = sd->child;
  5730. sd->groups->cpu_power = 0;
  5731. if (!child) {
  5732. power = SCHED_LOAD_SCALE;
  5733. weight = cpumask_weight(sched_domain_span(sd));
  5734. /*
  5735. * SMT siblings share the power of a single core.
  5736. * Usually multiple threads get a better yield out of
  5737. * that one core than a single thread would have,
  5738. * reflect that in sd->smt_gain.
  5739. */
  5740. if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
  5741. power *= sd->smt_gain;
  5742. power /= weight;
  5743. power >>= SCHED_LOAD_SHIFT;
  5744. }
  5745. sd->groups->cpu_power += power;
  5746. return;
  5747. }
  5748. /*
  5749. * Add cpu_power of each child group to this groups cpu_power.
  5750. */
  5751. group = child->groups;
  5752. do {
  5753. sd->groups->cpu_power += group->cpu_power;
  5754. group = group->next;
  5755. } while (group != child->groups);
  5756. }
  5757. /*
  5758. * Initializers for schedule domains
  5759. * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
  5760. */
  5761. #ifdef CONFIG_SCHED_DEBUG
  5762. # define SD_INIT_NAME(sd, type) sd->name = #type
  5763. #else
  5764. # define SD_INIT_NAME(sd, type) do { } while (0)
  5765. #endif
  5766. #define SD_INIT(sd, type) sd_init_##type(sd)
  5767. #define SD_INIT_FUNC(type) \
  5768. static noinline void sd_init_##type(struct sched_domain *sd) \
  5769. { \
  5770. memset(sd, 0, sizeof(*sd)); \
  5771. *sd = SD_##type##_INIT; \
  5772. sd->level = SD_LV_##type; \
  5773. SD_INIT_NAME(sd, type); \
  5774. }
  5775. SD_INIT_FUNC(CPU)
  5776. #ifdef CONFIG_NUMA
  5777. SD_INIT_FUNC(ALLNODES)
  5778. SD_INIT_FUNC(NODE)
  5779. #endif
  5780. #ifdef CONFIG_SCHED_SMT
  5781. SD_INIT_FUNC(SIBLING)
  5782. #endif
  5783. #ifdef CONFIG_SCHED_MC
  5784. SD_INIT_FUNC(MC)
  5785. #endif
  5786. static int default_relax_domain_level = -1;
  5787. static int __init setup_relax_domain_level(char *str)
  5788. {
  5789. unsigned long val;
  5790. val = simple_strtoul(str, NULL, 0);
  5791. if (val < SD_LV_MAX)
  5792. default_relax_domain_level = val;
  5793. return 1;
  5794. }
  5795. __setup("relax_domain_level=", setup_relax_domain_level);
  5796. static void set_domain_attribute(struct sched_domain *sd,
  5797. struct sched_domain_attr *attr)
  5798. {
  5799. int request;
  5800. if (!attr || attr->relax_domain_level < 0) {
  5801. if (default_relax_domain_level < 0)
  5802. return;
  5803. else
  5804. request = default_relax_domain_level;
  5805. } else
  5806. request = attr->relax_domain_level;
  5807. if (request < sd->level) {
  5808. /* turn off idle balance on this domain */
  5809. sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
  5810. } else {
  5811. /* turn on idle balance on this domain */
  5812. sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE);
  5813. }
  5814. }
  5815. static void __free_domain_allocs(struct s_data *d, enum s_alloc what,
  5816. const struct cpumask *cpu_map)
  5817. {
  5818. switch (what) {
  5819. case sa_sched_groups:
  5820. free_sched_groups(cpu_map, d->tmpmask); /* fall through */
  5821. d->sched_group_nodes = NULL;
  5822. case sa_rootdomain:
  5823. free_rootdomain(d->rd); /* fall through */
  5824. case sa_tmpmask:
  5825. free_cpumask_var(d->tmpmask); /* fall through */
  5826. case sa_send_covered:
  5827. free_cpumask_var(d->send_covered); /* fall through */
  5828. case sa_this_core_map:
  5829. free_cpumask_var(d->this_core_map); /* fall through */
  5830. case sa_this_sibling_map:
  5831. free_cpumask_var(d->this_sibling_map); /* fall through */
  5832. case sa_nodemask:
  5833. free_cpumask_var(d->nodemask); /* fall through */
  5834. case sa_sched_group_nodes:
  5835. #ifdef CONFIG_NUMA
  5836. kfree(d->sched_group_nodes); /* fall through */
  5837. case sa_notcovered:
  5838. free_cpumask_var(d->notcovered); /* fall through */
  5839. case sa_covered:
  5840. free_cpumask_var(d->covered); /* fall through */
  5841. case sa_domainspan:
  5842. free_cpumask_var(d->domainspan); /* fall through */
  5843. #endif
  5844. case sa_none:
  5845. break;
  5846. }
  5847. }
  5848. static enum s_alloc __visit_domain_allocation_hell(struct s_data *d,
  5849. const struct cpumask *cpu_map)
  5850. {
  5851. #ifdef CONFIG_NUMA
  5852. if (!alloc_cpumask_var(&d->domainspan, GFP_KERNEL))
  5853. return sa_none;
  5854. if (!alloc_cpumask_var(&d->covered, GFP_KERNEL))
  5855. return sa_domainspan;
  5856. if (!alloc_cpumask_var(&d->notcovered, GFP_KERNEL))
  5857. return sa_covered;
  5858. /* Allocate the per-node list of sched groups */
  5859. d->sched_group_nodes = kcalloc(nr_node_ids,
  5860. sizeof(struct sched_group *), GFP_KERNEL);
  5861. if (!d->sched_group_nodes) {
  5862. printk(KERN_WARNING "Can not alloc sched group node list\n");
  5863. return sa_notcovered;
  5864. }
  5865. sched_group_nodes_bycpu[cpumask_first(cpu_map)] = d->sched_group_nodes;
  5866. #endif
  5867. if (!alloc_cpumask_var(&d->nodemask, GFP_KERNEL))
  5868. return sa_sched_group_nodes;
  5869. if (!alloc_cpumask_var(&d->this_sibling_map, GFP_KERNEL))
  5870. return sa_nodemask;
  5871. if (!alloc_cpumask_var(&d->this_core_map, GFP_KERNEL))
  5872. return sa_this_sibling_map;
  5873. if (!alloc_cpumask_var(&d->send_covered, GFP_KERNEL))
  5874. return sa_this_core_map;
  5875. if (!alloc_cpumask_var(&d->tmpmask, GFP_KERNEL))
  5876. return sa_send_covered;
  5877. d->rd = alloc_rootdomain();
  5878. if (!d->rd) {
  5879. printk(KERN_WARNING "Cannot alloc root domain\n");
  5880. return sa_tmpmask;
  5881. }
  5882. return sa_rootdomain;
  5883. }
  5884. static struct sched_domain *__build_numa_sched_domains(struct s_data *d,
  5885. const struct cpumask *cpu_map, struct sched_domain_attr *attr, int i)
  5886. {
  5887. struct sched_domain *sd = NULL;
  5888. #ifdef CONFIG_NUMA
  5889. struct sched_domain *parent;
  5890. d->sd_allnodes = 0;
  5891. if (cpumask_weight(cpu_map) >
  5892. SD_NODES_PER_DOMAIN * cpumask_weight(d->nodemask)) {
  5893. sd = &per_cpu(allnodes_domains, i).sd;
  5894. SD_INIT(sd, ALLNODES);
  5895. set_domain_attribute(sd, attr);
  5896. cpumask_copy(sched_domain_span(sd), cpu_map);
  5897. cpu_to_allnodes_group(i, cpu_map, &sd->groups, d->tmpmask);
  5898. d->sd_allnodes = 1;
  5899. }
  5900. parent = sd;
  5901. sd = &per_cpu(node_domains, i).sd;
  5902. SD_INIT(sd, NODE);
  5903. set_domain_attribute(sd, attr);
  5904. sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
  5905. sd->parent = parent;
  5906. if (parent)
  5907. parent->child = sd;
  5908. cpumask_and(sched_domain_span(sd), sched_domain_span(sd), cpu_map);
  5909. #endif
  5910. return sd;
  5911. }
  5912. static struct sched_domain *__build_cpu_sched_domain(struct s_data *d,
  5913. const struct cpumask *cpu_map, struct sched_domain_attr *attr,
  5914. struct sched_domain *parent, int i)
  5915. {
  5916. struct sched_domain *sd;
  5917. sd = &per_cpu(phys_domains, i).sd;
  5918. SD_INIT(sd, CPU);
  5919. set_domain_attribute(sd, attr);
  5920. cpumask_copy(sched_domain_span(sd), d->nodemask);
  5921. sd->parent = parent;
  5922. if (parent)
  5923. parent->child = sd;
  5924. cpu_to_phys_group(i, cpu_map, &sd->groups, d->tmpmask);
  5925. return sd;
  5926. }
  5927. static struct sched_domain *__build_mc_sched_domain(struct s_data *d,
  5928. const struct cpumask *cpu_map, struct sched_domain_attr *attr,
  5929. struct sched_domain *parent, int i)
  5930. {
  5931. struct sched_domain *sd = parent;
  5932. #ifdef CONFIG_SCHED_MC
  5933. sd = &per_cpu(core_domains, i).sd;
  5934. SD_INIT(sd, MC);
  5935. set_domain_attribute(sd, attr);
  5936. cpumask_and(sched_domain_span(sd), cpu_map, cpu_coregroup_mask(i));
  5937. sd->parent = parent;
  5938. parent->child = sd;
  5939. cpu_to_core_group(i, cpu_map, &sd->groups, d->tmpmask);
  5940. #endif
  5941. return sd;
  5942. }
  5943. static struct sched_domain *__build_smt_sched_domain(struct s_data *d,
  5944. const struct cpumask *cpu_map, struct sched_domain_attr *attr,
  5945. struct sched_domain *parent, int i)
  5946. {
  5947. struct sched_domain *sd = parent;
  5948. #ifdef CONFIG_SCHED_SMT
  5949. sd = &per_cpu(cpu_domains, i).sd;
  5950. SD_INIT(sd, SIBLING);
  5951. set_domain_attribute(sd, attr);
  5952. cpumask_and(sched_domain_span(sd), cpu_map, topology_thread_cpumask(i));
  5953. sd->parent = parent;
  5954. parent->child = sd;
  5955. cpu_to_cpu_group(i, cpu_map, &sd->groups, d->tmpmask);
  5956. #endif
  5957. return sd;
  5958. }
  5959. static void build_sched_groups(struct s_data *d, enum sched_domain_level l,
  5960. const struct cpumask *cpu_map, int cpu)
  5961. {
  5962. switch (l) {
  5963. #ifdef CONFIG_SCHED_SMT
  5964. case SD_LV_SIBLING: /* set up CPU (sibling) groups */
  5965. cpumask_and(d->this_sibling_map, cpu_map,
  5966. topology_thread_cpumask(cpu));
  5967. if (cpu == cpumask_first(d->this_sibling_map))
  5968. init_sched_build_groups(d->this_sibling_map, cpu_map,
  5969. &cpu_to_cpu_group,
  5970. d->send_covered, d->tmpmask);
  5971. break;
  5972. #endif
  5973. #ifdef CONFIG_SCHED_MC
  5974. case SD_LV_MC: /* set up multi-core groups */
  5975. cpumask_and(d->this_core_map, cpu_map, cpu_coregroup_mask(cpu));
  5976. if (cpu == cpumask_first(d->this_core_map))
  5977. init_sched_build_groups(d->this_core_map, cpu_map,
  5978. &cpu_to_core_group,
  5979. d->send_covered, d->tmpmask);
  5980. break;
  5981. #endif
  5982. case SD_LV_CPU: /* set up physical groups */
  5983. cpumask_and(d->nodemask, cpumask_of_node(cpu), cpu_map);
  5984. if (!cpumask_empty(d->nodemask))
  5985. init_sched_build_groups(d->nodemask, cpu_map,
  5986. &cpu_to_phys_group,
  5987. d->send_covered, d->tmpmask);
  5988. break;
  5989. #ifdef CONFIG_NUMA
  5990. case SD_LV_ALLNODES:
  5991. init_sched_build_groups(cpu_map, cpu_map, &cpu_to_allnodes_group,
  5992. d->send_covered, d->tmpmask);
  5993. break;
  5994. #endif
  5995. default:
  5996. break;
  5997. }
  5998. }
  5999. /*
  6000. * Build sched domains for a given set of cpus and attach the sched domains
  6001. * to the individual cpus
  6002. */
  6003. static int __build_sched_domains(const struct cpumask *cpu_map,
  6004. struct sched_domain_attr *attr)
  6005. {
  6006. enum s_alloc alloc_state = sa_none;
  6007. struct s_data d;
  6008. struct sched_domain *sd;
  6009. int i;
  6010. #ifdef CONFIG_NUMA
  6011. d.sd_allnodes = 0;
  6012. #endif
  6013. alloc_state = __visit_domain_allocation_hell(&d, cpu_map);
  6014. if (alloc_state != sa_rootdomain)
  6015. goto error;
  6016. alloc_state = sa_sched_groups;
  6017. /*
  6018. * Set up domains for cpus specified by the cpu_map.
  6019. */
  6020. for_each_cpu(i, cpu_map) {
  6021. cpumask_and(d.nodemask, cpumask_of_node(cpu_to_node(i)),
  6022. cpu_map);
  6023. sd = __build_numa_sched_domains(&d, cpu_map, attr, i);
  6024. sd = __build_cpu_sched_domain(&d, cpu_map, attr, sd, i);
  6025. sd = __build_mc_sched_domain(&d, cpu_map, attr, sd, i);
  6026. sd = __build_smt_sched_domain(&d, cpu_map, attr, sd, i);
  6027. }
  6028. for_each_cpu(i, cpu_map) {
  6029. build_sched_groups(&d, SD_LV_SIBLING, cpu_map, i);
  6030. build_sched_groups(&d, SD_LV_MC, cpu_map, i);
  6031. }
  6032. /* Set up physical groups */
  6033. for (i = 0; i < nr_node_ids; i++)
  6034. build_sched_groups(&d, SD_LV_CPU, cpu_map, i);
  6035. #ifdef CONFIG_NUMA
  6036. /* Set up node groups */
  6037. if (d.sd_allnodes)
  6038. build_sched_groups(&d, SD_LV_ALLNODES, cpu_map, 0);
  6039. for (i = 0; i < nr_node_ids; i++)
  6040. if (build_numa_sched_groups(&d, cpu_map, i))
  6041. goto error;
  6042. #endif
  6043. /* Calculate CPU power for physical packages and nodes */
  6044. #ifdef CONFIG_SCHED_SMT
  6045. for_each_cpu(i, cpu_map) {
  6046. sd = &per_cpu(cpu_domains, i).sd;
  6047. init_sched_groups_power(i, sd);
  6048. }
  6049. #endif
  6050. #ifdef CONFIG_SCHED_MC
  6051. for_each_cpu(i, cpu_map) {
  6052. sd = &per_cpu(core_domains, i).sd;
  6053. init_sched_groups_power(i, sd);
  6054. }
  6055. #endif
  6056. for_each_cpu(i, cpu_map) {
  6057. sd = &per_cpu(phys_domains, i).sd;
  6058. init_sched_groups_power(i, sd);
  6059. }
  6060. #ifdef CONFIG_NUMA
  6061. for (i = 0; i < nr_node_ids; i++)
  6062. init_numa_sched_groups_power(d.sched_group_nodes[i]);
  6063. if (d.sd_allnodes) {
  6064. struct sched_group *sg;
  6065. cpu_to_allnodes_group(cpumask_first(cpu_map), cpu_map, &sg,
  6066. d.tmpmask);
  6067. init_numa_sched_groups_power(sg);
  6068. }
  6069. #endif
  6070. /* Attach the domains */
  6071. for_each_cpu(i, cpu_map) {
  6072. #ifdef CONFIG_SCHED_SMT
  6073. sd = &per_cpu(cpu_domains, i).sd;
  6074. #elif defined(CONFIG_SCHED_MC)
  6075. sd = &per_cpu(core_domains, i).sd;
  6076. #else
  6077. sd = &per_cpu(phys_domains, i).sd;
  6078. #endif
  6079. cpu_attach_domain(sd, d.rd, i);
  6080. }
  6081. d.sched_group_nodes = NULL; /* don't free this we still need it */
  6082. __free_domain_allocs(&d, sa_tmpmask, cpu_map);
  6083. return 0;
  6084. error:
  6085. __free_domain_allocs(&d, alloc_state, cpu_map);
  6086. return -ENOMEM;
  6087. }
  6088. static int build_sched_domains(const struct cpumask *cpu_map)
  6089. {
  6090. return __build_sched_domains(cpu_map, NULL);
  6091. }
  6092. static cpumask_var_t *doms_cur; /* current sched domains */
  6093. static int ndoms_cur; /* number of sched domains in 'doms_cur' */
  6094. static struct sched_domain_attr *dattr_cur;
  6095. /* attribues of custom domains in 'doms_cur' */
  6096. /*
  6097. * Special case: If a kmalloc of a doms_cur partition (array of
  6098. * cpumask) fails, then fallback to a single sched domain,
  6099. * as determined by the single cpumask fallback_doms.
  6100. */
  6101. static cpumask_var_t fallback_doms;
  6102. /*
  6103. * arch_update_cpu_topology lets virtualized architectures update the
  6104. * cpu core maps. It is supposed to return 1 if the topology changed
  6105. * or 0 if it stayed the same.
  6106. */
  6107. int __attribute__((weak)) arch_update_cpu_topology(void)
  6108. {
  6109. return 0;
  6110. }
  6111. cpumask_var_t *alloc_sched_domains(unsigned int ndoms)
  6112. {
  6113. int i;
  6114. cpumask_var_t *doms;
  6115. doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL);
  6116. if (!doms)
  6117. return NULL;
  6118. for (i = 0; i < ndoms; i++) {
  6119. if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) {
  6120. free_sched_domains(doms, i);
  6121. return NULL;
  6122. }
  6123. }
  6124. return doms;
  6125. }
  6126. void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms)
  6127. {
  6128. unsigned int i;
  6129. for (i = 0; i < ndoms; i++)
  6130. free_cpumask_var(doms[i]);
  6131. kfree(doms);
  6132. }
  6133. /*
  6134. * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  6135. * For now this just excludes isolated cpus, but could be used to
  6136. * exclude other special cases in the future.
  6137. */
  6138. static int arch_init_sched_domains(const struct cpumask *cpu_map)
  6139. {
  6140. int err;
  6141. arch_update_cpu_topology();
  6142. ndoms_cur = 1;
  6143. doms_cur = alloc_sched_domains(ndoms_cur);
  6144. if (!doms_cur)
  6145. doms_cur = &fallback_doms;
  6146. cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
  6147. dattr_cur = NULL;
  6148. err = build_sched_domains(doms_cur[0]);
  6149. register_sched_domain_sysctl();
  6150. return err;
  6151. }
  6152. static void arch_destroy_sched_domains(const struct cpumask *cpu_map,
  6153. struct cpumask *tmpmask)
  6154. {
  6155. free_sched_groups(cpu_map, tmpmask);
  6156. }
  6157. /*
  6158. * Detach sched domains from a group of cpus specified in cpu_map
  6159. * These cpus will now be attached to the NULL domain
  6160. */
  6161. static void detach_destroy_domains(const struct cpumask *cpu_map)
  6162. {
  6163. /* Save because hotplug lock held. */
  6164. static DECLARE_BITMAP(tmpmask, CONFIG_NR_CPUS);
  6165. int i;
  6166. for_each_cpu(i, cpu_map)
  6167. cpu_attach_domain(NULL, &def_root_domain, i);
  6168. synchronize_sched();
  6169. arch_destroy_sched_domains(cpu_map, to_cpumask(tmpmask));
  6170. }
  6171. /* handle null as "default" */
  6172. static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
  6173. struct sched_domain_attr *new, int idx_new)
  6174. {
  6175. struct sched_domain_attr tmp;
  6176. /* fast path */
  6177. if (!new && !cur)
  6178. return 1;
  6179. tmp = SD_ATTR_INIT;
  6180. return !memcmp(cur ? (cur + idx_cur) : &tmp,
  6181. new ? (new + idx_new) : &tmp,
  6182. sizeof(struct sched_domain_attr));
  6183. }
  6184. /*
  6185. * Partition sched domains as specified by the 'ndoms_new'
  6186. * cpumasks in the array doms_new[] of cpumasks. This compares
  6187. * doms_new[] to the current sched domain partitioning, doms_cur[].
  6188. * It destroys each deleted domain and builds each new domain.
  6189. *
  6190. * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'.
  6191. * The masks don't intersect (don't overlap.) We should setup one
  6192. * sched domain for each mask. CPUs not in any of the cpumasks will
  6193. * not be load balanced. If the same cpumask appears both in the
  6194. * current 'doms_cur' domains and in the new 'doms_new', we can leave
  6195. * it as it is.
  6196. *
  6197. * The passed in 'doms_new' should be allocated using
  6198. * alloc_sched_domains. This routine takes ownership of it and will
  6199. * free_sched_domains it when done with it. If the caller failed the
  6200. * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1,
  6201. * and partition_sched_domains() will fallback to the single partition
  6202. * 'fallback_doms', it also forces the domains to be rebuilt.
  6203. *
  6204. * If doms_new == NULL it will be replaced with cpu_online_mask.
  6205. * ndoms_new == 0 is a special case for destroying existing domains,
  6206. * and it will not create the default domain.
  6207. *
  6208. * Call with hotplug lock held
  6209. */
  6210. void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
  6211. struct sched_domain_attr *dattr_new)
  6212. {
  6213. int i, j, n;
  6214. int new_topology;
  6215. mutex_lock(&sched_domains_mutex);
  6216. /* always unregister in case we don't destroy any domains */
  6217. unregister_sched_domain_sysctl();
  6218. /* Let architecture update cpu core mappings. */
  6219. new_topology = arch_update_cpu_topology();
  6220. n = doms_new ? ndoms_new : 0;
  6221. /* Destroy deleted domains */
  6222. for (i = 0; i < ndoms_cur; i++) {
  6223. for (j = 0; j < n && !new_topology; j++) {
  6224. if (cpumask_equal(doms_cur[i], doms_new[j])
  6225. && dattrs_equal(dattr_cur, i, dattr_new, j))
  6226. goto match1;
  6227. }
  6228. /* no match - a current sched domain not in new doms_new[] */
  6229. detach_destroy_domains(doms_cur[i]);
  6230. match1:
  6231. ;
  6232. }
  6233. if (doms_new == NULL) {
  6234. ndoms_cur = 0;
  6235. doms_new = &fallback_doms;
  6236. cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map);
  6237. WARN_ON_ONCE(dattr_new);
  6238. }
  6239. /* Build new domains */
  6240. for (i = 0; i < ndoms_new; i++) {
  6241. for (j = 0; j < ndoms_cur && !new_topology; j++) {
  6242. if (cpumask_equal(doms_new[i], doms_cur[j])
  6243. && dattrs_equal(dattr_new, i, dattr_cur, j))
  6244. goto match2;
  6245. }
  6246. /* no match - add a new doms_new */
  6247. __build_sched_domains(doms_new[i],
  6248. dattr_new ? dattr_new + i : NULL);
  6249. match2:
  6250. ;
  6251. }
  6252. /* Remember the new sched domains */
  6253. if (doms_cur != &fallback_doms)
  6254. free_sched_domains(doms_cur, ndoms_cur);
  6255. kfree(dattr_cur); /* kfree(NULL) is safe */
  6256. doms_cur = doms_new;
  6257. dattr_cur = dattr_new;
  6258. ndoms_cur = ndoms_new;
  6259. register_sched_domain_sysctl();
  6260. mutex_unlock(&sched_domains_mutex);
  6261. }
  6262. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  6263. static void arch_reinit_sched_domains(void)
  6264. {
  6265. get_online_cpus();
  6266. /* Destroy domains first to force the rebuild */
  6267. partition_sched_domains(0, NULL, NULL);
  6268. rebuild_sched_domains();
  6269. put_online_cpus();
  6270. }
  6271. static ssize_t sched_power_savings_store(const char *buf, size_t count, int smt)
  6272. {
  6273. unsigned int level = 0;
  6274. if (sscanf(buf, "%u", &level) != 1)
  6275. return -EINVAL;
  6276. /*
  6277. * level is always be positive so don't check for
  6278. * level < POWERSAVINGS_BALANCE_NONE which is 0
  6279. * What happens on 0 or 1 byte write,
  6280. * need to check for count as well?
  6281. */
  6282. if (level >= MAX_POWERSAVINGS_BALANCE_LEVELS)
  6283. return -EINVAL;
  6284. if (smt)
  6285. sched_smt_power_savings = level;
  6286. else
  6287. sched_mc_power_savings = level;
  6288. arch_reinit_sched_domains();
  6289. return count;
  6290. }
  6291. #ifdef CONFIG_SCHED_MC
  6292. static ssize_t sched_mc_power_savings_show(struct sysdev_class *class,
  6293. char *page)
  6294. {
  6295. return sprintf(page, "%u\n", sched_mc_power_savings);
  6296. }
  6297. static ssize_t sched_mc_power_savings_store(struct sysdev_class *class,
  6298. const char *buf, size_t count)
  6299. {
  6300. return sched_power_savings_store(buf, count, 0);
  6301. }
  6302. static SYSDEV_CLASS_ATTR(sched_mc_power_savings, 0644,
  6303. sched_mc_power_savings_show,
  6304. sched_mc_power_savings_store);
  6305. #endif
  6306. #ifdef CONFIG_SCHED_SMT
  6307. static ssize_t sched_smt_power_savings_show(struct sysdev_class *dev,
  6308. char *page)
  6309. {
  6310. return sprintf(page, "%u\n", sched_smt_power_savings);
  6311. }
  6312. static ssize_t sched_smt_power_savings_store(struct sysdev_class *dev,
  6313. const char *buf, size_t count)
  6314. {
  6315. return sched_power_savings_store(buf, count, 1);
  6316. }
  6317. static SYSDEV_CLASS_ATTR(sched_smt_power_savings, 0644,
  6318. sched_smt_power_savings_show,
  6319. sched_smt_power_savings_store);
  6320. #endif
  6321. int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
  6322. {
  6323. int err = 0;
  6324. #ifdef CONFIG_SCHED_SMT
  6325. if (smt_capable())
  6326. err = sysfs_create_file(&cls->kset.kobj,
  6327. &attr_sched_smt_power_savings.attr);
  6328. #endif
  6329. #ifdef CONFIG_SCHED_MC
  6330. if (!err && mc_capable())
  6331. err = sysfs_create_file(&cls->kset.kobj,
  6332. &attr_sched_mc_power_savings.attr);
  6333. #endif
  6334. return err;
  6335. }
  6336. #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
  6337. #ifndef CONFIG_CPUSETS
  6338. /*
  6339. * Add online and remove offline CPUs from the scheduler domains.
  6340. * When cpusets are enabled they take over this function.
  6341. */
  6342. static int update_sched_domains(struct notifier_block *nfb,
  6343. unsigned long action, void *hcpu)
  6344. {
  6345. switch (action) {
  6346. case CPU_ONLINE:
  6347. case CPU_ONLINE_FROZEN:
  6348. case CPU_DOWN_PREPARE:
  6349. case CPU_DOWN_PREPARE_FROZEN:
  6350. case CPU_DOWN_FAILED:
  6351. case CPU_DOWN_FAILED_FROZEN:
  6352. partition_sched_domains(1, NULL, NULL);
  6353. return NOTIFY_OK;
  6354. default:
  6355. return NOTIFY_DONE;
  6356. }
  6357. }
  6358. #endif
  6359. static int update_runtime(struct notifier_block *nfb,
  6360. unsigned long action, void *hcpu)
  6361. {
  6362. int cpu = (int)(long)hcpu;
  6363. switch (action) {
  6364. case CPU_DOWN_PREPARE:
  6365. case CPU_DOWN_PREPARE_FROZEN:
  6366. disable_runtime(cpu_rq(cpu));
  6367. return NOTIFY_OK;
  6368. case CPU_DOWN_FAILED:
  6369. case CPU_DOWN_FAILED_FROZEN:
  6370. case CPU_ONLINE:
  6371. case CPU_ONLINE_FROZEN:
  6372. enable_runtime(cpu_rq(cpu));
  6373. return NOTIFY_OK;
  6374. default:
  6375. return NOTIFY_DONE;
  6376. }
  6377. }
  6378. void __init sched_init_smp(void)
  6379. {
  6380. cpumask_var_t non_isolated_cpus;
  6381. alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL);
  6382. alloc_cpumask_var(&fallback_doms, GFP_KERNEL);
  6383. #if defined(CONFIG_NUMA)
  6384. sched_group_nodes_bycpu = kzalloc(nr_cpu_ids * sizeof(void **),
  6385. GFP_KERNEL);
  6386. BUG_ON(sched_group_nodes_bycpu == NULL);
  6387. #endif
  6388. get_online_cpus();
  6389. mutex_lock(&sched_domains_mutex);
  6390. arch_init_sched_domains(cpu_active_mask);
  6391. cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map);
  6392. if (cpumask_empty(non_isolated_cpus))
  6393. cpumask_set_cpu(smp_processor_id(), non_isolated_cpus);
  6394. mutex_unlock(&sched_domains_mutex);
  6395. put_online_cpus();
  6396. #ifndef CONFIG_CPUSETS
  6397. /* XXX: Theoretical race here - CPU may be hotplugged now */
  6398. hotcpu_notifier(update_sched_domains, 0);
  6399. #endif
  6400. /* RT runtime code needs to handle some hotplug events */
  6401. hotcpu_notifier(update_runtime, 0);
  6402. init_hrtick();
  6403. /* Move init over to a non-isolated CPU */
  6404. if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0)
  6405. BUG();
  6406. sched_init_granularity();
  6407. free_cpumask_var(non_isolated_cpus);
  6408. init_sched_rt_class();
  6409. }
  6410. #else
  6411. void __init sched_init_smp(void)
  6412. {
  6413. sched_init_granularity();
  6414. }
  6415. #endif /* CONFIG_SMP */
  6416. const_debug unsigned int sysctl_timer_migration = 1;
  6417. int in_sched_functions(unsigned long addr)
  6418. {
  6419. return in_lock_functions(addr) ||
  6420. (addr >= (unsigned long)__sched_text_start
  6421. && addr < (unsigned long)__sched_text_end);
  6422. }
  6423. static void init_cfs_rq(struct cfs_rq *cfs_rq, struct rq *rq)
  6424. {
  6425. cfs_rq->tasks_timeline = RB_ROOT;
  6426. INIT_LIST_HEAD(&cfs_rq->tasks);
  6427. #ifdef CONFIG_FAIR_GROUP_SCHED
  6428. cfs_rq->rq = rq;
  6429. #endif
  6430. cfs_rq->min_vruntime = (u64)(-(1LL << 20));
  6431. }
  6432. static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
  6433. {
  6434. struct rt_prio_array *array;
  6435. int i;
  6436. array = &rt_rq->active;
  6437. for (i = 0; i < MAX_RT_PRIO; i++) {
  6438. INIT_LIST_HEAD(array->queue + i);
  6439. __clear_bit(i, array->bitmap);
  6440. }
  6441. /* delimiter for bitsearch: */
  6442. __set_bit(MAX_RT_PRIO, array->bitmap);
  6443. #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
  6444. rt_rq->highest_prio.curr = MAX_RT_PRIO;
  6445. #ifdef CONFIG_SMP
  6446. rt_rq->highest_prio.next = MAX_RT_PRIO;
  6447. #endif
  6448. #endif
  6449. #ifdef CONFIG_SMP
  6450. rt_rq->rt_nr_migratory = 0;
  6451. rt_rq->overloaded = 0;
  6452. plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
  6453. #endif
  6454. rt_rq->rt_time = 0;
  6455. rt_rq->rt_throttled = 0;
  6456. rt_rq->rt_runtime = 0;
  6457. raw_spin_lock_init(&rt_rq->rt_runtime_lock);
  6458. #ifdef CONFIG_RT_GROUP_SCHED
  6459. rt_rq->rt_nr_boosted = 0;
  6460. rt_rq->rq = rq;
  6461. #endif
  6462. }
  6463. #ifdef CONFIG_FAIR_GROUP_SCHED
  6464. static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
  6465. struct sched_entity *se, int cpu, int add,
  6466. struct sched_entity *parent)
  6467. {
  6468. struct rq *rq = cpu_rq(cpu);
  6469. tg->cfs_rq[cpu] = cfs_rq;
  6470. init_cfs_rq(cfs_rq, rq);
  6471. cfs_rq->tg = tg;
  6472. if (add)
  6473. list_add(&cfs_rq->leaf_cfs_rq_list, &rq->leaf_cfs_rq_list);
  6474. tg->se[cpu] = se;
  6475. /* se could be NULL for init_task_group */
  6476. if (!se)
  6477. return;
  6478. if (!parent)
  6479. se->cfs_rq = &rq->cfs;
  6480. else
  6481. se->cfs_rq = parent->my_q;
  6482. se->my_q = cfs_rq;
  6483. se->load.weight = tg->shares;
  6484. se->load.inv_weight = 0;
  6485. se->parent = parent;
  6486. }
  6487. #endif
  6488. #ifdef CONFIG_RT_GROUP_SCHED
  6489. static void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
  6490. struct sched_rt_entity *rt_se, int cpu, int add,
  6491. struct sched_rt_entity *parent)
  6492. {
  6493. struct rq *rq = cpu_rq(cpu);
  6494. tg->rt_rq[cpu] = rt_rq;
  6495. init_rt_rq(rt_rq, rq);
  6496. rt_rq->tg = tg;
  6497. rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
  6498. if (add)
  6499. list_add(&rt_rq->leaf_rt_rq_list, &rq->leaf_rt_rq_list);
  6500. tg->rt_se[cpu] = rt_se;
  6501. if (!rt_se)
  6502. return;
  6503. if (!parent)
  6504. rt_se->rt_rq = &rq->rt;
  6505. else
  6506. rt_se->rt_rq = parent->my_q;
  6507. rt_se->my_q = rt_rq;
  6508. rt_se->parent = parent;
  6509. INIT_LIST_HEAD(&rt_se->run_list);
  6510. }
  6511. #endif
  6512. void __init sched_init(void)
  6513. {
  6514. int i, j;
  6515. unsigned long alloc_size = 0, ptr;
  6516. #ifdef CONFIG_FAIR_GROUP_SCHED
  6517. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  6518. #endif
  6519. #ifdef CONFIG_RT_GROUP_SCHED
  6520. alloc_size += 2 * nr_cpu_ids * sizeof(void **);
  6521. #endif
  6522. #ifdef CONFIG_CPUMASK_OFFSTACK
  6523. alloc_size += num_possible_cpus() * cpumask_size();
  6524. #endif
  6525. if (alloc_size) {
  6526. ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
  6527. #ifdef CONFIG_FAIR_GROUP_SCHED
  6528. init_task_group.se = (struct sched_entity **)ptr;
  6529. ptr += nr_cpu_ids * sizeof(void **);
  6530. init_task_group.cfs_rq = (struct cfs_rq **)ptr;
  6531. ptr += nr_cpu_ids * sizeof(void **);
  6532. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6533. #ifdef CONFIG_RT_GROUP_SCHED
  6534. init_task_group.rt_se = (struct sched_rt_entity **)ptr;
  6535. ptr += nr_cpu_ids * sizeof(void **);
  6536. init_task_group.rt_rq = (struct rt_rq **)ptr;
  6537. ptr += nr_cpu_ids * sizeof(void **);
  6538. #endif /* CONFIG_RT_GROUP_SCHED */
  6539. #ifdef CONFIG_CPUMASK_OFFSTACK
  6540. for_each_possible_cpu(i) {
  6541. per_cpu(load_balance_tmpmask, i) = (void *)ptr;
  6542. ptr += cpumask_size();
  6543. }
  6544. #endif /* CONFIG_CPUMASK_OFFSTACK */
  6545. }
  6546. #ifdef CONFIG_SMP
  6547. init_defrootdomain();
  6548. #endif
  6549. init_rt_bandwidth(&def_rt_bandwidth,
  6550. global_rt_period(), global_rt_runtime());
  6551. #ifdef CONFIG_RT_GROUP_SCHED
  6552. init_rt_bandwidth(&init_task_group.rt_bandwidth,
  6553. global_rt_period(), global_rt_runtime());
  6554. #endif /* CONFIG_RT_GROUP_SCHED */
  6555. #ifdef CONFIG_CGROUP_SCHED
  6556. list_add(&init_task_group.list, &task_groups);
  6557. INIT_LIST_HEAD(&init_task_group.children);
  6558. #endif /* CONFIG_CGROUP_SCHED */
  6559. #if defined CONFIG_FAIR_GROUP_SCHED && defined CONFIG_SMP
  6560. update_shares_data = __alloc_percpu(nr_cpu_ids * sizeof(unsigned long),
  6561. __alignof__(unsigned long));
  6562. #endif
  6563. for_each_possible_cpu(i) {
  6564. struct rq *rq;
  6565. rq = cpu_rq(i);
  6566. raw_spin_lock_init(&rq->lock);
  6567. rq->nr_running = 0;
  6568. rq->calc_load_active = 0;
  6569. rq->calc_load_update = jiffies + LOAD_FREQ;
  6570. init_cfs_rq(&rq->cfs, rq);
  6571. init_rt_rq(&rq->rt, rq);
  6572. #ifdef CONFIG_FAIR_GROUP_SCHED
  6573. init_task_group.shares = init_task_group_load;
  6574. INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);
  6575. #ifdef CONFIG_CGROUP_SCHED
  6576. /*
  6577. * How much cpu bandwidth does init_task_group get?
  6578. *
  6579. * In case of task-groups formed thr' the cgroup filesystem, it
  6580. * gets 100% of the cpu resources in the system. This overall
  6581. * system cpu resource is divided among the tasks of
  6582. * init_task_group and its child task-groups in a fair manner,
  6583. * based on each entity's (task or task-group's) weight
  6584. * (se->load.weight).
  6585. *
  6586. * In other words, if init_task_group has 10 tasks of weight
  6587. * 1024) and two child groups A0 and A1 (of weight 1024 each),
  6588. * then A0's share of the cpu resource is:
  6589. *
  6590. * A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%
  6591. *
  6592. * We achieve this by letting init_task_group's tasks sit
  6593. * directly in rq->cfs (i.e init_task_group->se[] = NULL).
  6594. */
  6595. init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, 1, NULL);
  6596. #endif
  6597. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6598. rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
  6599. #ifdef CONFIG_RT_GROUP_SCHED
  6600. INIT_LIST_HEAD(&rq->leaf_rt_rq_list);
  6601. #ifdef CONFIG_CGROUP_SCHED
  6602. init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, 1, NULL);
  6603. #endif
  6604. #endif
  6605. for (j = 0; j < CPU_LOAD_IDX_MAX; j++)
  6606. rq->cpu_load[j] = 0;
  6607. #ifdef CONFIG_SMP
  6608. rq->sd = NULL;
  6609. rq->rd = NULL;
  6610. rq->post_schedule = 0;
  6611. rq->active_balance = 0;
  6612. rq->next_balance = jiffies;
  6613. rq->push_cpu = 0;
  6614. rq->cpu = i;
  6615. rq->online = 0;
  6616. rq->migration_thread = NULL;
  6617. rq->idle_stamp = 0;
  6618. rq->avg_idle = 2*sysctl_sched_migration_cost;
  6619. INIT_LIST_HEAD(&rq->migration_queue);
  6620. rq_attach_root(rq, &def_root_domain);
  6621. #endif
  6622. init_rq_hrtick(rq);
  6623. atomic_set(&rq->nr_iowait, 0);
  6624. }
  6625. set_load_weight(&init_task);
  6626. #ifdef CONFIG_PREEMPT_NOTIFIERS
  6627. INIT_HLIST_HEAD(&init_task.preempt_notifiers);
  6628. #endif
  6629. #ifdef CONFIG_SMP
  6630. open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
  6631. #endif
  6632. #ifdef CONFIG_RT_MUTEXES
  6633. plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
  6634. #endif
  6635. /*
  6636. * The boot idle thread does lazy MMU switching as well:
  6637. */
  6638. atomic_inc(&init_mm.mm_count);
  6639. enter_lazy_tlb(&init_mm, current);
  6640. /*
  6641. * Make us the idle thread. Technically, schedule() should not be
  6642. * called from this thread, however somewhere below it might be,
  6643. * but because we are the idle thread, we just pick up running again
  6644. * when this runqueue becomes "idle".
  6645. */
  6646. init_idle(current, smp_processor_id());
  6647. calc_load_update = jiffies + LOAD_FREQ;
  6648. /*
  6649. * During early bootup we pretend to be a normal task:
  6650. */
  6651. current->sched_class = &fair_sched_class;
  6652. /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
  6653. zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
  6654. #ifdef CONFIG_SMP
  6655. #ifdef CONFIG_NO_HZ
  6656. zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
  6657. alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
  6658. #endif
  6659. /* May be allocated at isolcpus cmdline parse time */
  6660. if (cpu_isolated_map == NULL)
  6661. zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
  6662. #endif /* SMP */
  6663. perf_event_init();
  6664. scheduler_running = 1;
  6665. }
  6666. #ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
  6667. static inline int preempt_count_equals(int preempt_offset)
  6668. {
  6669. int nested = (preempt_count() & ~PREEMPT_ACTIVE) + rcu_preempt_depth();
  6670. return (nested == PREEMPT_INATOMIC_BASE + preempt_offset);
  6671. }
  6672. void __might_sleep(const char *file, int line, int preempt_offset)
  6673. {
  6674. #ifdef in_atomic
  6675. static unsigned long prev_jiffy; /* ratelimiting */
  6676. if ((preempt_count_equals(preempt_offset) && !irqs_disabled()) ||
  6677. system_state != SYSTEM_RUNNING || oops_in_progress)
  6678. return;
  6679. if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
  6680. return;
  6681. prev_jiffy = jiffies;
  6682. printk(KERN_ERR
  6683. "BUG: sleeping function called from invalid context at %s:%d\n",
  6684. file, line);
  6685. printk(KERN_ERR
  6686. "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
  6687. in_atomic(), irqs_disabled(),
  6688. current->pid, current->comm);
  6689. debug_show_held_locks(current);
  6690. if (irqs_disabled())
  6691. print_irqtrace_events(current);
  6692. dump_stack();
  6693. #endif
  6694. }
  6695. EXPORT_SYMBOL(__might_sleep);
  6696. #endif
  6697. #ifdef CONFIG_MAGIC_SYSRQ
  6698. static void normalize_task(struct rq *rq, struct task_struct *p)
  6699. {
  6700. int on_rq;
  6701. update_rq_clock(rq);
  6702. on_rq = p->se.on_rq;
  6703. if (on_rq)
  6704. deactivate_task(rq, p, 0);
  6705. __setscheduler(rq, p, SCHED_NORMAL, 0);
  6706. if (on_rq) {
  6707. activate_task(rq, p, 0);
  6708. resched_task(rq->curr);
  6709. }
  6710. }
  6711. void normalize_rt_tasks(void)
  6712. {
  6713. struct task_struct *g, *p;
  6714. unsigned long flags;
  6715. struct rq *rq;
  6716. read_lock_irqsave(&tasklist_lock, flags);
  6717. do_each_thread(g, p) {
  6718. /*
  6719. * Only normalize user tasks:
  6720. */
  6721. if (!p->mm)
  6722. continue;
  6723. p->se.exec_start = 0;
  6724. #ifdef CONFIG_SCHEDSTATS
  6725. p->se.wait_start = 0;
  6726. p->se.sleep_start = 0;
  6727. p->se.block_start = 0;
  6728. #endif
  6729. if (!rt_task(p)) {
  6730. /*
  6731. * Renice negative nice level userspace
  6732. * tasks back to 0:
  6733. */
  6734. if (TASK_NICE(p) < 0 && p->mm)
  6735. set_user_nice(p, 0);
  6736. continue;
  6737. }
  6738. raw_spin_lock(&p->pi_lock);
  6739. rq = __task_rq_lock(p);
  6740. normalize_task(rq, p);
  6741. __task_rq_unlock(rq);
  6742. raw_spin_unlock(&p->pi_lock);
  6743. } while_each_thread(g, p);
  6744. read_unlock_irqrestore(&tasklist_lock, flags);
  6745. }
  6746. #endif /* CONFIG_MAGIC_SYSRQ */
  6747. #ifdef CONFIG_IA64
  6748. /*
  6749. * These functions are only useful for the IA64 MCA handling.
  6750. *
  6751. * They can only be called when the whole system has been
  6752. * stopped - every CPU needs to be quiescent, and no scheduling
  6753. * activity can take place. Using them for anything else would
  6754. * be a serious bug, and as a result, they aren't even visible
  6755. * under any other configuration.
  6756. */
  6757. /**
  6758. * curr_task - return the current task for a given cpu.
  6759. * @cpu: the processor in question.
  6760. *
  6761. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  6762. */
  6763. struct task_struct *curr_task(int cpu)
  6764. {
  6765. return cpu_curr(cpu);
  6766. }
  6767. /**
  6768. * set_curr_task - set the current task for a given cpu.
  6769. * @cpu: the processor in question.
  6770. * @p: the task pointer to set.
  6771. *
  6772. * Description: This function must only be used when non-maskable interrupts
  6773. * are serviced on a separate stack. It allows the architecture to switch the
  6774. * notion of the current task on a cpu in a non-blocking manner. This function
  6775. * must be called with all CPU's synchronized, and interrupts disabled, the
  6776. * and caller must save the original value of the current task (see
  6777. * curr_task() above) and restore that value before reenabling interrupts and
  6778. * re-starting the system.
  6779. *
  6780. * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED!
  6781. */
  6782. void set_curr_task(int cpu, struct task_struct *p)
  6783. {
  6784. cpu_curr(cpu) = p;
  6785. }
  6786. #endif
  6787. #ifdef CONFIG_FAIR_GROUP_SCHED
  6788. static void free_fair_sched_group(struct task_group *tg)
  6789. {
  6790. int i;
  6791. for_each_possible_cpu(i) {
  6792. if (tg->cfs_rq)
  6793. kfree(tg->cfs_rq[i]);
  6794. if (tg->se)
  6795. kfree(tg->se[i]);
  6796. }
  6797. kfree(tg->cfs_rq);
  6798. kfree(tg->se);
  6799. }
  6800. static
  6801. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  6802. {
  6803. struct cfs_rq *cfs_rq;
  6804. struct sched_entity *se;
  6805. struct rq *rq;
  6806. int i;
  6807. tg->cfs_rq = kzalloc(sizeof(cfs_rq) * nr_cpu_ids, GFP_KERNEL);
  6808. if (!tg->cfs_rq)
  6809. goto err;
  6810. tg->se = kzalloc(sizeof(se) * nr_cpu_ids, GFP_KERNEL);
  6811. if (!tg->se)
  6812. goto err;
  6813. tg->shares = NICE_0_LOAD;
  6814. for_each_possible_cpu(i) {
  6815. rq = cpu_rq(i);
  6816. cfs_rq = kzalloc_node(sizeof(struct cfs_rq),
  6817. GFP_KERNEL, cpu_to_node(i));
  6818. if (!cfs_rq)
  6819. goto err;
  6820. se = kzalloc_node(sizeof(struct sched_entity),
  6821. GFP_KERNEL, cpu_to_node(i));
  6822. if (!se)
  6823. goto err_free_rq;
  6824. init_tg_cfs_entry(tg, cfs_rq, se, i, 0, parent->se[i]);
  6825. }
  6826. return 1;
  6827. err_free_rq:
  6828. kfree(cfs_rq);
  6829. err:
  6830. return 0;
  6831. }
  6832. static inline void register_fair_sched_group(struct task_group *tg, int cpu)
  6833. {
  6834. list_add_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list,
  6835. &cpu_rq(cpu)->leaf_cfs_rq_list);
  6836. }
  6837. static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  6838. {
  6839. list_del_rcu(&tg->cfs_rq[cpu]->leaf_cfs_rq_list);
  6840. }
  6841. #else /* !CONFG_FAIR_GROUP_SCHED */
  6842. static inline void free_fair_sched_group(struct task_group *tg)
  6843. {
  6844. }
  6845. static inline
  6846. int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
  6847. {
  6848. return 1;
  6849. }
  6850. static inline void register_fair_sched_group(struct task_group *tg, int cpu)
  6851. {
  6852. }
  6853. static inline void unregister_fair_sched_group(struct task_group *tg, int cpu)
  6854. {
  6855. }
  6856. #endif /* CONFIG_FAIR_GROUP_SCHED */
  6857. #ifdef CONFIG_RT_GROUP_SCHED
  6858. static void free_rt_sched_group(struct task_group *tg)
  6859. {
  6860. int i;
  6861. destroy_rt_bandwidth(&tg->rt_bandwidth);
  6862. for_each_possible_cpu(i) {
  6863. if (tg->rt_rq)
  6864. kfree(tg->rt_rq[i]);
  6865. if (tg->rt_se)
  6866. kfree(tg->rt_se[i]);
  6867. }
  6868. kfree(tg->rt_rq);
  6869. kfree(tg->rt_se);
  6870. }
  6871. static
  6872. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  6873. {
  6874. struct rt_rq *rt_rq;
  6875. struct sched_rt_entity *rt_se;
  6876. struct rq *rq;
  6877. int i;
  6878. tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
  6879. if (!tg->rt_rq)
  6880. goto err;
  6881. tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
  6882. if (!tg->rt_se)
  6883. goto err;
  6884. init_rt_bandwidth(&tg->rt_bandwidth,
  6885. ktime_to_ns(def_rt_bandwidth.rt_period), 0);
  6886. for_each_possible_cpu(i) {
  6887. rq = cpu_rq(i);
  6888. rt_rq = kzalloc_node(sizeof(struct rt_rq),
  6889. GFP_KERNEL, cpu_to_node(i));
  6890. if (!rt_rq)
  6891. goto err;
  6892. rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
  6893. GFP_KERNEL, cpu_to_node(i));
  6894. if (!rt_se)
  6895. goto err_free_rq;
  6896. init_tg_rt_entry(tg, rt_rq, rt_se, i, 0, parent->rt_se[i]);
  6897. }
  6898. return 1;
  6899. err_free_rq:
  6900. kfree(rt_rq);
  6901. err:
  6902. return 0;
  6903. }
  6904. static inline void register_rt_sched_group(struct task_group *tg, int cpu)
  6905. {
  6906. list_add_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list,
  6907. &cpu_rq(cpu)->leaf_rt_rq_list);
  6908. }
  6909. static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
  6910. {
  6911. list_del_rcu(&tg->rt_rq[cpu]->leaf_rt_rq_list);
  6912. }
  6913. #else /* !CONFIG_RT_GROUP_SCHED */
  6914. static inline void free_rt_sched_group(struct task_group *tg)
  6915. {
  6916. }
  6917. static inline
  6918. int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
  6919. {
  6920. return 1;
  6921. }
  6922. static inline void register_rt_sched_group(struct task_group *tg, int cpu)
  6923. {
  6924. }
  6925. static inline void unregister_rt_sched_group(struct task_group *tg, int cpu)
  6926. {
  6927. }
  6928. #endif /* CONFIG_RT_GROUP_SCHED */
  6929. #ifdef CONFIG_CGROUP_SCHED
  6930. static void free_sched_group(struct task_group *tg)
  6931. {
  6932. free_fair_sched_group(tg);
  6933. free_rt_sched_group(tg);
  6934. kfree(tg);
  6935. }
  6936. /* allocate runqueue etc for a new task group */
  6937. struct task_group *sched_create_group(struct task_group *parent)
  6938. {
  6939. struct task_group *tg;
  6940. unsigned long flags;
  6941. int i;
  6942. tg = kzalloc(sizeof(*tg), GFP_KERNEL);
  6943. if (!tg)
  6944. return ERR_PTR(-ENOMEM);
  6945. if (!alloc_fair_sched_group(tg, parent))
  6946. goto err;
  6947. if (!alloc_rt_sched_group(tg, parent))
  6948. goto err;
  6949. spin_lock_irqsave(&task_group_lock, flags);
  6950. for_each_possible_cpu(i) {
  6951. register_fair_sched_group(tg, i);
  6952. register_rt_sched_group(tg, i);
  6953. }
  6954. list_add_rcu(&tg->list, &task_groups);
  6955. WARN_ON(!parent); /* root should already exist */
  6956. tg->parent = parent;
  6957. INIT_LIST_HEAD(&tg->children);
  6958. list_add_rcu(&tg->siblings, &parent->children);
  6959. spin_unlock_irqrestore(&task_group_lock, flags);
  6960. return tg;
  6961. err:
  6962. free_sched_group(tg);
  6963. return ERR_PTR(-ENOMEM);
  6964. }
  6965. /* rcu callback to free various structures associated with a task group */
  6966. static void free_sched_group_rcu(struct rcu_head *rhp)
  6967. {
  6968. /* now it should be safe to free those cfs_rqs */
  6969. free_sched_group(container_of(rhp, struct task_group, rcu));
  6970. }
  6971. /* Destroy runqueue etc associated with a task group */
  6972. void sched_destroy_group(struct task_group *tg)
  6973. {
  6974. unsigned long flags;
  6975. int i;
  6976. spin_lock_irqsave(&task_group_lock, flags);
  6977. for_each_possible_cpu(i) {
  6978. unregister_fair_sched_group(tg, i);
  6979. unregister_rt_sched_group(tg, i);
  6980. }
  6981. list_del_rcu(&tg->list);
  6982. list_del_rcu(&tg->siblings);
  6983. spin_unlock_irqrestore(&task_group_lock, flags);
  6984. /* wait for possible concurrent references to cfs_rqs complete */
  6985. call_rcu(&tg->rcu, free_sched_group_rcu);
  6986. }
  6987. /* change task's runqueue when it moves between groups.
  6988. * The caller of this function should have put the task in its new group
  6989. * by now. This function just updates tsk->se.cfs_rq and tsk->se.parent to
  6990. * reflect its new group.
  6991. */
  6992. void sched_move_task(struct task_struct *tsk)
  6993. {
  6994. int on_rq, running;
  6995. unsigned long flags;
  6996. struct rq *rq;
  6997. rq = task_rq_lock(tsk, &flags);
  6998. update_rq_clock(rq);
  6999. running = task_current(rq, tsk);
  7000. on_rq = tsk->se.on_rq;
  7001. if (on_rq)
  7002. dequeue_task(rq, tsk, 0);
  7003. if (unlikely(running))
  7004. tsk->sched_class->put_prev_task(rq, tsk);
  7005. set_task_rq(tsk, task_cpu(tsk));
  7006. #ifdef CONFIG_FAIR_GROUP_SCHED
  7007. if (tsk->sched_class->moved_group)
  7008. tsk->sched_class->moved_group(tsk, on_rq);
  7009. #endif
  7010. if (unlikely(running))
  7011. tsk->sched_class->set_curr_task(rq);
  7012. if (on_rq)
  7013. enqueue_task(rq, tsk, 0, false);
  7014. task_rq_unlock(rq, &flags);
  7015. }
  7016. #endif /* CONFIG_CGROUP_SCHED */
  7017. #ifdef CONFIG_FAIR_GROUP_SCHED
  7018. static void __set_se_shares(struct sched_entity *se, unsigned long shares)
  7019. {
  7020. struct cfs_rq *cfs_rq = se->cfs_rq;
  7021. int on_rq;
  7022. on_rq = se->on_rq;
  7023. if (on_rq)
  7024. dequeue_entity(cfs_rq, se, 0);
  7025. se->load.weight = shares;
  7026. se->load.inv_weight = 0;
  7027. if (on_rq)
  7028. enqueue_entity(cfs_rq, se, 0);
  7029. }
  7030. static void set_se_shares(struct sched_entity *se, unsigned long shares)
  7031. {
  7032. struct cfs_rq *cfs_rq = se->cfs_rq;
  7033. struct rq *rq = cfs_rq->rq;
  7034. unsigned long flags;
  7035. raw_spin_lock_irqsave(&rq->lock, flags);
  7036. __set_se_shares(se, shares);
  7037. raw_spin_unlock_irqrestore(&rq->lock, flags);
  7038. }
  7039. static DEFINE_MUTEX(shares_mutex);
  7040. int sched_group_set_shares(struct task_group *tg, unsigned long shares)
  7041. {
  7042. int i;
  7043. unsigned long flags;
  7044. /*
  7045. * We can't change the weight of the root cgroup.
  7046. */
  7047. if (!tg->se[0])
  7048. return -EINVAL;
  7049. if (shares < MIN_SHARES)
  7050. shares = MIN_SHARES;
  7051. else if (shares > MAX_SHARES)
  7052. shares = MAX_SHARES;
  7053. mutex_lock(&shares_mutex);
  7054. if (tg->shares == shares)
  7055. goto done;
  7056. spin_lock_irqsave(&task_group_lock, flags);
  7057. for_each_possible_cpu(i)
  7058. unregister_fair_sched_group(tg, i);
  7059. list_del_rcu(&tg->siblings);
  7060. spin_unlock_irqrestore(&task_group_lock, flags);
  7061. /* wait for any ongoing reference to this group to finish */
  7062. synchronize_sched();
  7063. /*
  7064. * Now we are free to modify the group's share on each cpu
  7065. * w/o tripping rebalance_share or load_balance_fair.
  7066. */
  7067. tg->shares = shares;
  7068. for_each_possible_cpu(i) {
  7069. /*
  7070. * force a rebalance
  7071. */
  7072. cfs_rq_set_shares(tg->cfs_rq[i], 0);
  7073. set_se_shares(tg->se[i], shares);
  7074. }
  7075. /*
  7076. * Enable load balance activity on this group, by inserting it back on
  7077. * each cpu's rq->leaf_cfs_rq_list.
  7078. */
  7079. spin_lock_irqsave(&task_group_lock, flags);
  7080. for_each_possible_cpu(i)
  7081. register_fair_sched_group(tg, i);
  7082. list_add_rcu(&tg->siblings, &tg->parent->children);
  7083. spin_unlock_irqrestore(&task_group_lock, flags);
  7084. done:
  7085. mutex_unlock(&shares_mutex);
  7086. return 0;
  7087. }
  7088. unsigned long sched_group_shares(struct task_group *tg)
  7089. {
  7090. return tg->shares;
  7091. }
  7092. #endif
  7093. #ifdef CONFIG_RT_GROUP_SCHED
  7094. /*
  7095. * Ensure that the real time constraints are schedulable.
  7096. */
  7097. static DEFINE_MUTEX(rt_constraints_mutex);
  7098. static unsigned long to_ratio(u64 period, u64 runtime)
  7099. {
  7100. if (runtime == RUNTIME_INF)
  7101. return 1ULL << 20;
  7102. return div64_u64(runtime << 20, period);
  7103. }
  7104. /* Must be called with tasklist_lock held */
  7105. static inline int tg_has_rt_tasks(struct task_group *tg)
  7106. {
  7107. struct task_struct *g, *p;
  7108. do_each_thread(g, p) {
  7109. if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
  7110. return 1;
  7111. } while_each_thread(g, p);
  7112. return 0;
  7113. }
  7114. struct rt_schedulable_data {
  7115. struct task_group *tg;
  7116. u64 rt_period;
  7117. u64 rt_runtime;
  7118. };
  7119. static int tg_schedulable(struct task_group *tg, void *data)
  7120. {
  7121. struct rt_schedulable_data *d = data;
  7122. struct task_group *child;
  7123. unsigned long total, sum = 0;
  7124. u64 period, runtime;
  7125. period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  7126. runtime = tg->rt_bandwidth.rt_runtime;
  7127. if (tg == d->tg) {
  7128. period = d->rt_period;
  7129. runtime = d->rt_runtime;
  7130. }
  7131. /*
  7132. * Cannot have more runtime than the period.
  7133. */
  7134. if (runtime > period && runtime != RUNTIME_INF)
  7135. return -EINVAL;
  7136. /*
  7137. * Ensure we don't starve existing RT tasks.
  7138. */
  7139. if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
  7140. return -EBUSY;
  7141. total = to_ratio(period, runtime);
  7142. /*
  7143. * Nobody can have more than the global setting allows.
  7144. */
  7145. if (total > to_ratio(global_rt_period(), global_rt_runtime()))
  7146. return -EINVAL;
  7147. /*
  7148. * The sum of our children's runtime should not exceed our own.
  7149. */
  7150. list_for_each_entry_rcu(child, &tg->children, siblings) {
  7151. period = ktime_to_ns(child->rt_bandwidth.rt_period);
  7152. runtime = child->rt_bandwidth.rt_runtime;
  7153. if (child == d->tg) {
  7154. period = d->rt_period;
  7155. runtime = d->rt_runtime;
  7156. }
  7157. sum += to_ratio(period, runtime);
  7158. }
  7159. if (sum > total)
  7160. return -EINVAL;
  7161. return 0;
  7162. }
  7163. static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
  7164. {
  7165. struct rt_schedulable_data data = {
  7166. .tg = tg,
  7167. .rt_period = period,
  7168. .rt_runtime = runtime,
  7169. };
  7170. return walk_tg_tree(tg_schedulable, tg_nop, &data);
  7171. }
  7172. static int tg_set_bandwidth(struct task_group *tg,
  7173. u64 rt_period, u64 rt_runtime)
  7174. {
  7175. int i, err = 0;
  7176. mutex_lock(&rt_constraints_mutex);
  7177. read_lock(&tasklist_lock);
  7178. err = __rt_schedulable(tg, rt_period, rt_runtime);
  7179. if (err)
  7180. goto unlock;
  7181. raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  7182. tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
  7183. tg->rt_bandwidth.rt_runtime = rt_runtime;
  7184. for_each_possible_cpu(i) {
  7185. struct rt_rq *rt_rq = tg->rt_rq[i];
  7186. raw_spin_lock(&rt_rq->rt_runtime_lock);
  7187. rt_rq->rt_runtime = rt_runtime;
  7188. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  7189. }
  7190. raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
  7191. unlock:
  7192. read_unlock(&tasklist_lock);
  7193. mutex_unlock(&rt_constraints_mutex);
  7194. return err;
  7195. }
  7196. int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
  7197. {
  7198. u64 rt_runtime, rt_period;
  7199. rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
  7200. rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
  7201. if (rt_runtime_us < 0)
  7202. rt_runtime = RUNTIME_INF;
  7203. return tg_set_bandwidth(tg, rt_period, rt_runtime);
  7204. }
  7205. long sched_group_rt_runtime(struct task_group *tg)
  7206. {
  7207. u64 rt_runtime_us;
  7208. if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
  7209. return -1;
  7210. rt_runtime_us = tg->rt_bandwidth.rt_runtime;
  7211. do_div(rt_runtime_us, NSEC_PER_USEC);
  7212. return rt_runtime_us;
  7213. }
  7214. int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
  7215. {
  7216. u64 rt_runtime, rt_period;
  7217. rt_period = (u64)rt_period_us * NSEC_PER_USEC;
  7218. rt_runtime = tg->rt_bandwidth.rt_runtime;
  7219. if (rt_period == 0)
  7220. return -EINVAL;
  7221. return tg_set_bandwidth(tg, rt_period, rt_runtime);
  7222. }
  7223. long sched_group_rt_period(struct task_group *tg)
  7224. {
  7225. u64 rt_period_us;
  7226. rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
  7227. do_div(rt_period_us, NSEC_PER_USEC);
  7228. return rt_period_us;
  7229. }
  7230. static int sched_rt_global_constraints(void)
  7231. {
  7232. u64 runtime, period;
  7233. int ret = 0;
  7234. if (sysctl_sched_rt_period <= 0)
  7235. return -EINVAL;
  7236. runtime = global_rt_runtime();
  7237. period = global_rt_period();
  7238. /*
  7239. * Sanity check on the sysctl variables.
  7240. */
  7241. if (runtime > period && runtime != RUNTIME_INF)
  7242. return -EINVAL;
  7243. mutex_lock(&rt_constraints_mutex);
  7244. read_lock(&tasklist_lock);
  7245. ret = __rt_schedulable(NULL, 0, 0);
  7246. read_unlock(&tasklist_lock);
  7247. mutex_unlock(&rt_constraints_mutex);
  7248. return ret;
  7249. }
  7250. int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
  7251. {
  7252. /* Don't accept realtime tasks when there is no way for them to run */
  7253. if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
  7254. return 0;
  7255. return 1;
  7256. }
  7257. #else /* !CONFIG_RT_GROUP_SCHED */
  7258. static int sched_rt_global_constraints(void)
  7259. {
  7260. unsigned long flags;
  7261. int i;
  7262. if (sysctl_sched_rt_period <= 0)
  7263. return -EINVAL;
  7264. /*
  7265. * There's always some RT tasks in the root group
  7266. * -- migration, kstopmachine etc..
  7267. */
  7268. if (sysctl_sched_rt_runtime == 0)
  7269. return -EBUSY;
  7270. raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
  7271. for_each_possible_cpu(i) {
  7272. struct rt_rq *rt_rq = &cpu_rq(i)->rt;
  7273. raw_spin_lock(&rt_rq->rt_runtime_lock);
  7274. rt_rq->rt_runtime = global_rt_runtime();
  7275. raw_spin_unlock(&rt_rq->rt_runtime_lock);
  7276. }
  7277. raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
  7278. return 0;
  7279. }
  7280. #endif /* CONFIG_RT_GROUP_SCHED */
  7281. int sched_rt_handler(struct ctl_table *table, int write,
  7282. void __user *buffer, size_t *lenp,
  7283. loff_t *ppos)
  7284. {
  7285. int ret;
  7286. int old_period, old_runtime;
  7287. static DEFINE_MUTEX(mutex);
  7288. mutex_lock(&mutex);
  7289. old_period = sysctl_sched_rt_period;
  7290. old_runtime = sysctl_sched_rt_runtime;
  7291. ret = proc_dointvec(table, write, buffer, lenp, ppos);
  7292. if (!ret && write) {
  7293. ret = sched_rt_global_constraints();
  7294. if (ret) {
  7295. sysctl_sched_rt_period = old_period;
  7296. sysctl_sched_rt_runtime = old_runtime;
  7297. } else {
  7298. def_rt_bandwidth.rt_runtime = global_rt_runtime();
  7299. def_rt_bandwidth.rt_period =
  7300. ns_to_ktime(global_rt_period());
  7301. }
  7302. }
  7303. mutex_unlock(&mutex);
  7304. return ret;
  7305. }
  7306. #ifdef CONFIG_CGROUP_SCHED
  7307. /* return corresponding task_group object of a cgroup */
  7308. static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
  7309. {
  7310. return container_of(cgroup_subsys_state(cgrp, cpu_cgroup_subsys_id),
  7311. struct task_group, css);
  7312. }
  7313. static struct cgroup_subsys_state *
  7314. cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7315. {
  7316. struct task_group *tg, *parent;
  7317. if (!cgrp->parent) {
  7318. /* This is early initialization for the top cgroup */
  7319. return &init_task_group.css;
  7320. }
  7321. parent = cgroup_tg(cgrp->parent);
  7322. tg = sched_create_group(parent);
  7323. if (IS_ERR(tg))
  7324. return ERR_PTR(-ENOMEM);
  7325. return &tg->css;
  7326. }
  7327. static void
  7328. cpu_cgroup_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7329. {
  7330. struct task_group *tg = cgroup_tg(cgrp);
  7331. sched_destroy_group(tg);
  7332. }
  7333. static int
  7334. cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
  7335. {
  7336. #ifdef CONFIG_RT_GROUP_SCHED
  7337. if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
  7338. return -EINVAL;
  7339. #else
  7340. /* We don't support RT-tasks being in separate groups */
  7341. if (tsk->sched_class != &fair_sched_class)
  7342. return -EINVAL;
  7343. #endif
  7344. return 0;
  7345. }
  7346. static int
  7347. cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  7348. struct task_struct *tsk, bool threadgroup)
  7349. {
  7350. int retval = cpu_cgroup_can_attach_task(cgrp, tsk);
  7351. if (retval)
  7352. return retval;
  7353. if (threadgroup) {
  7354. struct task_struct *c;
  7355. rcu_read_lock();
  7356. list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
  7357. retval = cpu_cgroup_can_attach_task(cgrp, c);
  7358. if (retval) {
  7359. rcu_read_unlock();
  7360. return retval;
  7361. }
  7362. }
  7363. rcu_read_unlock();
  7364. }
  7365. return 0;
  7366. }
  7367. static void
  7368. cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
  7369. struct cgroup *old_cont, struct task_struct *tsk,
  7370. bool threadgroup)
  7371. {
  7372. sched_move_task(tsk);
  7373. if (threadgroup) {
  7374. struct task_struct *c;
  7375. rcu_read_lock();
  7376. list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) {
  7377. sched_move_task(c);
  7378. }
  7379. rcu_read_unlock();
  7380. }
  7381. }
  7382. #ifdef CONFIG_FAIR_GROUP_SCHED
  7383. static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype,
  7384. u64 shareval)
  7385. {
  7386. return sched_group_set_shares(cgroup_tg(cgrp), shareval);
  7387. }
  7388. static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft)
  7389. {
  7390. struct task_group *tg = cgroup_tg(cgrp);
  7391. return (u64) tg->shares;
  7392. }
  7393. #endif /* CONFIG_FAIR_GROUP_SCHED */
  7394. #ifdef CONFIG_RT_GROUP_SCHED
  7395. static int cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft,
  7396. s64 val)
  7397. {
  7398. return sched_group_set_rt_runtime(cgroup_tg(cgrp), val);
  7399. }
  7400. static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft)
  7401. {
  7402. return sched_group_rt_runtime(cgroup_tg(cgrp));
  7403. }
  7404. static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype,
  7405. u64 rt_period_us)
  7406. {
  7407. return sched_group_set_rt_period(cgroup_tg(cgrp), rt_period_us);
  7408. }
  7409. static u64 cpu_rt_period_read_uint(struct cgroup *cgrp, struct cftype *cft)
  7410. {
  7411. return sched_group_rt_period(cgroup_tg(cgrp));
  7412. }
  7413. #endif /* CONFIG_RT_GROUP_SCHED */
  7414. static struct cftype cpu_files[] = {
  7415. #ifdef CONFIG_FAIR_GROUP_SCHED
  7416. {
  7417. .name = "shares",
  7418. .read_u64 = cpu_shares_read_u64,
  7419. .write_u64 = cpu_shares_write_u64,
  7420. },
  7421. #endif
  7422. #ifdef CONFIG_RT_GROUP_SCHED
  7423. {
  7424. .name = "rt_runtime_us",
  7425. .read_s64 = cpu_rt_runtime_read,
  7426. .write_s64 = cpu_rt_runtime_write,
  7427. },
  7428. {
  7429. .name = "rt_period_us",
  7430. .read_u64 = cpu_rt_period_read_uint,
  7431. .write_u64 = cpu_rt_period_write_uint,
  7432. },
  7433. #endif
  7434. };
  7435. static int cpu_cgroup_populate(struct cgroup_subsys *ss, struct cgroup *cont)
  7436. {
  7437. return cgroup_add_files(cont, ss, cpu_files, ARRAY_SIZE(cpu_files));
  7438. }
  7439. struct cgroup_subsys cpu_cgroup_subsys = {
  7440. .name = "cpu",
  7441. .create = cpu_cgroup_create,
  7442. .destroy = cpu_cgroup_destroy,
  7443. .can_attach = cpu_cgroup_can_attach,
  7444. .attach = cpu_cgroup_attach,
  7445. .populate = cpu_cgroup_populate,
  7446. .subsys_id = cpu_cgroup_subsys_id,
  7447. .early_init = 1,
  7448. };
  7449. #endif /* CONFIG_CGROUP_SCHED */
  7450. #ifdef CONFIG_CGROUP_CPUACCT
  7451. /*
  7452. * CPU accounting code for task groups.
  7453. *
  7454. * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
  7455. * (balbir@in.ibm.com).
  7456. */
  7457. /* track cpu usage of a group of tasks and its child groups */
  7458. struct cpuacct {
  7459. struct cgroup_subsys_state css;
  7460. /* cpuusage holds pointer to a u64-type object on every cpu */
  7461. u64 *cpuusage;
  7462. struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
  7463. struct cpuacct *parent;
  7464. };
  7465. struct cgroup_subsys cpuacct_subsys;
  7466. /* return cpu accounting group corresponding to this container */
  7467. static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp)
  7468. {
  7469. return container_of(cgroup_subsys_state(cgrp, cpuacct_subsys_id),
  7470. struct cpuacct, css);
  7471. }
  7472. /* return cpu accounting group to which this task belongs */
  7473. static inline struct cpuacct *task_ca(struct task_struct *tsk)
  7474. {
  7475. return container_of(task_subsys_state(tsk, cpuacct_subsys_id),
  7476. struct cpuacct, css);
  7477. }
  7478. /* create a new cpu accounting group */
  7479. static struct cgroup_subsys_state *cpuacct_create(
  7480. struct cgroup_subsys *ss, struct cgroup *cgrp)
  7481. {
  7482. struct cpuacct *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
  7483. int i;
  7484. if (!ca)
  7485. goto out;
  7486. ca->cpuusage = alloc_percpu(u64);
  7487. if (!ca->cpuusage)
  7488. goto out_free_ca;
  7489. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  7490. if (percpu_counter_init(&ca->cpustat[i], 0))
  7491. goto out_free_counters;
  7492. if (cgrp->parent)
  7493. ca->parent = cgroup_ca(cgrp->parent);
  7494. return &ca->css;
  7495. out_free_counters:
  7496. while (--i >= 0)
  7497. percpu_counter_destroy(&ca->cpustat[i]);
  7498. free_percpu(ca->cpuusage);
  7499. out_free_ca:
  7500. kfree(ca);
  7501. out:
  7502. return ERR_PTR(-ENOMEM);
  7503. }
  7504. /* destroy an existing cpu accounting group */
  7505. static void
  7506. cpuacct_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7507. {
  7508. struct cpuacct *ca = cgroup_ca(cgrp);
  7509. int i;
  7510. for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
  7511. percpu_counter_destroy(&ca->cpustat[i]);
  7512. free_percpu(ca->cpuusage);
  7513. kfree(ca);
  7514. }
  7515. static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
  7516. {
  7517. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7518. u64 data;
  7519. #ifndef CONFIG_64BIT
  7520. /*
  7521. * Take rq->lock to make 64-bit read safe on 32-bit platforms.
  7522. */
  7523. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  7524. data = *cpuusage;
  7525. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  7526. #else
  7527. data = *cpuusage;
  7528. #endif
  7529. return data;
  7530. }
  7531. static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
  7532. {
  7533. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7534. #ifndef CONFIG_64BIT
  7535. /*
  7536. * Take rq->lock to make 64-bit write safe on 32-bit platforms.
  7537. */
  7538. raw_spin_lock_irq(&cpu_rq(cpu)->lock);
  7539. *cpuusage = val;
  7540. raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
  7541. #else
  7542. *cpuusage = val;
  7543. #endif
  7544. }
  7545. /* return total cpu usage (in nanoseconds) of a group */
  7546. static u64 cpuusage_read(struct cgroup *cgrp, struct cftype *cft)
  7547. {
  7548. struct cpuacct *ca = cgroup_ca(cgrp);
  7549. u64 totalcpuusage = 0;
  7550. int i;
  7551. for_each_present_cpu(i)
  7552. totalcpuusage += cpuacct_cpuusage_read(ca, i);
  7553. return totalcpuusage;
  7554. }
  7555. static int cpuusage_write(struct cgroup *cgrp, struct cftype *cftype,
  7556. u64 reset)
  7557. {
  7558. struct cpuacct *ca = cgroup_ca(cgrp);
  7559. int err = 0;
  7560. int i;
  7561. if (reset) {
  7562. err = -EINVAL;
  7563. goto out;
  7564. }
  7565. for_each_present_cpu(i)
  7566. cpuacct_cpuusage_write(ca, i, 0);
  7567. out:
  7568. return err;
  7569. }
  7570. static int cpuacct_percpu_seq_read(struct cgroup *cgroup, struct cftype *cft,
  7571. struct seq_file *m)
  7572. {
  7573. struct cpuacct *ca = cgroup_ca(cgroup);
  7574. u64 percpu;
  7575. int i;
  7576. for_each_present_cpu(i) {
  7577. percpu = cpuacct_cpuusage_read(ca, i);
  7578. seq_printf(m, "%llu ", (unsigned long long) percpu);
  7579. }
  7580. seq_printf(m, "\n");
  7581. return 0;
  7582. }
  7583. static const char *cpuacct_stat_desc[] = {
  7584. [CPUACCT_STAT_USER] = "user",
  7585. [CPUACCT_STAT_SYSTEM] = "system",
  7586. };
  7587. static int cpuacct_stats_show(struct cgroup *cgrp, struct cftype *cft,
  7588. struct cgroup_map_cb *cb)
  7589. {
  7590. struct cpuacct *ca = cgroup_ca(cgrp);
  7591. int i;
  7592. for (i = 0; i < CPUACCT_STAT_NSTATS; i++) {
  7593. s64 val = percpu_counter_read(&ca->cpustat[i]);
  7594. val = cputime64_to_clock_t(val);
  7595. cb->fill(cb, cpuacct_stat_desc[i], val);
  7596. }
  7597. return 0;
  7598. }
  7599. static struct cftype files[] = {
  7600. {
  7601. .name = "usage",
  7602. .read_u64 = cpuusage_read,
  7603. .write_u64 = cpuusage_write,
  7604. },
  7605. {
  7606. .name = "usage_percpu",
  7607. .read_seq_string = cpuacct_percpu_seq_read,
  7608. },
  7609. {
  7610. .name = "stat",
  7611. .read_map = cpuacct_stats_show,
  7612. },
  7613. };
  7614. static int cpuacct_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
  7615. {
  7616. return cgroup_add_files(cgrp, ss, files, ARRAY_SIZE(files));
  7617. }
  7618. /*
  7619. * charge this task's execution time to its accounting group.
  7620. *
  7621. * called with rq->lock held.
  7622. */
  7623. static void cpuacct_charge(struct task_struct *tsk, u64 cputime)
  7624. {
  7625. struct cpuacct *ca;
  7626. int cpu;
  7627. if (unlikely(!cpuacct_subsys.active))
  7628. return;
  7629. cpu = task_cpu(tsk);
  7630. rcu_read_lock();
  7631. ca = task_ca(tsk);
  7632. for (; ca; ca = ca->parent) {
  7633. u64 *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
  7634. *cpuusage += cputime;
  7635. }
  7636. rcu_read_unlock();
  7637. }
  7638. /*
  7639. * When CONFIG_VIRT_CPU_ACCOUNTING is enabled one jiffy can be very large
  7640. * in cputime_t units. As a result, cpuacct_update_stats calls
  7641. * percpu_counter_add with values large enough to always overflow the
  7642. * per cpu batch limit causing bad SMP scalability.
  7643. *
  7644. * To fix this we scale percpu_counter_batch by cputime_one_jiffy so we
  7645. * batch the same amount of time with CONFIG_VIRT_CPU_ACCOUNTING disabled
  7646. * and enabled. We cap it at INT_MAX which is the largest allowed batch value.
  7647. */
  7648. #ifdef CONFIG_SMP
  7649. #define CPUACCT_BATCH \
  7650. min_t(long, percpu_counter_batch * cputime_one_jiffy, INT_MAX)
  7651. #else
  7652. #define CPUACCT_BATCH 0
  7653. #endif
  7654. /*
  7655. * Charge the system/user time to the task's accounting group.
  7656. */
  7657. static void cpuacct_update_stats(struct task_struct *tsk,
  7658. enum cpuacct_stat_index idx, cputime_t val)
  7659. {
  7660. struct cpuacct *ca;
  7661. int batch = CPUACCT_BATCH;
  7662. if (unlikely(!cpuacct_subsys.active))
  7663. return;
  7664. rcu_read_lock();
  7665. ca = task_ca(tsk);
  7666. do {
  7667. __percpu_counter_add(&ca->cpustat[idx], val, batch);
  7668. ca = ca->parent;
  7669. } while (ca);
  7670. rcu_read_unlock();
  7671. }
  7672. struct cgroup_subsys cpuacct_subsys = {
  7673. .name = "cpuacct",
  7674. .create = cpuacct_create,
  7675. .destroy = cpuacct_destroy,
  7676. .populate = cpuacct_populate,
  7677. .subsys_id = cpuacct_subsys_id,
  7678. };
  7679. #endif /* CONFIG_CGROUP_CPUACCT */
  7680. #ifndef CONFIG_SMP
  7681. int rcu_expedited_torture_stats(char *page)
  7682. {
  7683. return 0;
  7684. }
  7685. EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
  7686. void synchronize_sched_expedited(void)
  7687. {
  7688. }
  7689. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  7690. #else /* #ifndef CONFIG_SMP */
  7691. static DEFINE_PER_CPU(struct migration_req, rcu_migration_req);
  7692. static DEFINE_MUTEX(rcu_sched_expedited_mutex);
  7693. #define RCU_EXPEDITED_STATE_POST -2
  7694. #define RCU_EXPEDITED_STATE_IDLE -1
  7695. static int rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
  7696. int rcu_expedited_torture_stats(char *page)
  7697. {
  7698. int cnt = 0;
  7699. int cpu;
  7700. cnt += sprintf(&page[cnt], "state: %d /", rcu_expedited_state);
  7701. for_each_online_cpu(cpu) {
  7702. cnt += sprintf(&page[cnt], " %d:%d",
  7703. cpu, per_cpu(rcu_migration_req, cpu).dest_cpu);
  7704. }
  7705. cnt += sprintf(&page[cnt], "\n");
  7706. return cnt;
  7707. }
  7708. EXPORT_SYMBOL_GPL(rcu_expedited_torture_stats);
  7709. static long synchronize_sched_expedited_count;
  7710. /*
  7711. * Wait for an rcu-sched grace period to elapse, but use "big hammer"
  7712. * approach to force grace period to end quickly. This consumes
  7713. * significant time on all CPUs, and is thus not recommended for
  7714. * any sort of common-case code.
  7715. *
  7716. * Note that it is illegal to call this function while holding any
  7717. * lock that is acquired by a CPU-hotplug notifier. Failing to
  7718. * observe this restriction will result in deadlock.
  7719. */
  7720. void synchronize_sched_expedited(void)
  7721. {
  7722. int cpu;
  7723. unsigned long flags;
  7724. bool need_full_sync = 0;
  7725. struct rq *rq;
  7726. struct migration_req *req;
  7727. long snap;
  7728. int trycount = 0;
  7729. smp_mb(); /* ensure prior mod happens before capturing snap. */
  7730. snap = ACCESS_ONCE(synchronize_sched_expedited_count) + 1;
  7731. get_online_cpus();
  7732. while (!mutex_trylock(&rcu_sched_expedited_mutex)) {
  7733. put_online_cpus();
  7734. if (trycount++ < 10)
  7735. udelay(trycount * num_online_cpus());
  7736. else {
  7737. synchronize_sched();
  7738. return;
  7739. }
  7740. if (ACCESS_ONCE(synchronize_sched_expedited_count) - snap > 0) {
  7741. smp_mb(); /* ensure test happens before caller kfree */
  7742. return;
  7743. }
  7744. get_online_cpus();
  7745. }
  7746. rcu_expedited_state = RCU_EXPEDITED_STATE_POST;
  7747. for_each_online_cpu(cpu) {
  7748. rq = cpu_rq(cpu);
  7749. req = &per_cpu(rcu_migration_req, cpu);
  7750. init_completion(&req->done);
  7751. req->task = NULL;
  7752. req->dest_cpu = RCU_MIGRATION_NEED_QS;
  7753. raw_spin_lock_irqsave(&rq->lock, flags);
  7754. list_add(&req->list, &rq->migration_queue);
  7755. raw_spin_unlock_irqrestore(&rq->lock, flags);
  7756. wake_up_process(rq->migration_thread);
  7757. }
  7758. for_each_online_cpu(cpu) {
  7759. rcu_expedited_state = cpu;
  7760. req = &per_cpu(rcu_migration_req, cpu);
  7761. rq = cpu_rq(cpu);
  7762. wait_for_completion(&req->done);
  7763. raw_spin_lock_irqsave(&rq->lock, flags);
  7764. if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
  7765. need_full_sync = 1;
  7766. req->dest_cpu = RCU_MIGRATION_IDLE;
  7767. raw_spin_unlock_irqrestore(&rq->lock, flags);
  7768. }
  7769. rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
  7770. synchronize_sched_expedited_count++;
  7771. mutex_unlock(&rcu_sched_expedited_mutex);
  7772. put_online_cpus();
  7773. if (need_full_sync)
  7774. synchronize_sched();
  7775. }
  7776. EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
  7777. #endif /* #else #ifndef CONFIG_SMP */