page_alloc.c 182 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kmemcheck.h>
  27. #include <linux/module.h>
  28. #include <linux/suspend.h>
  29. #include <linux/pagevec.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/slab.h>
  32. #include <linux/ratelimit.h>
  33. #include <linux/oom.h>
  34. #include <linux/notifier.h>
  35. #include <linux/topology.h>
  36. #include <linux/sysctl.h>
  37. #include <linux/cpu.h>
  38. #include <linux/cpuset.h>
  39. #include <linux/memory_hotplug.h>
  40. #include <linux/nodemask.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/vmstat.h>
  43. #include <linux/mempolicy.h>
  44. #include <linux/stop_machine.h>
  45. #include <linux/sort.h>
  46. #include <linux/pfn.h>
  47. #include <linux/backing-dev.h>
  48. #include <linux/fault-inject.h>
  49. #include <linux/page-isolation.h>
  50. #include <linux/debugobjects.h>
  51. #include <linux/kmemleak.h>
  52. #include <linux/compaction.h>
  53. #include <trace/events/kmem.h>
  54. #include <linux/prefetch.h>
  55. #include <linux/mm_inline.h>
  56. #include <linux/migrate.h>
  57. #include <linux/page-debug-flags.h>
  58. #include <linux/hugetlb.h>
  59. #include <linux/sched/rt.h>
  60. #include <asm/sections.h>
  61. #include <asm/tlbflush.h>
  62. #include <asm/div64.h>
  63. #include "internal.h"
  64. /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  65. static DEFINE_MUTEX(pcp_batch_high_lock);
  66. #define MIN_PERCPU_PAGELIST_FRACTION (8)
  67. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  68. DEFINE_PER_CPU(int, numa_node);
  69. EXPORT_PER_CPU_SYMBOL(numa_node);
  70. #endif
  71. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  72. /*
  73. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  74. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  75. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  76. * defined in <linux/topology.h>.
  77. */
  78. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  79. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  80. int _node_numa_mem_[MAX_NUMNODES];
  81. #endif
  82. /*
  83. * Array of node states.
  84. */
  85. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  86. [N_POSSIBLE] = NODE_MASK_ALL,
  87. [N_ONLINE] = { { [0] = 1UL } },
  88. #ifndef CONFIG_NUMA
  89. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  90. #ifdef CONFIG_HIGHMEM
  91. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  92. #endif
  93. #ifdef CONFIG_MOVABLE_NODE
  94. [N_MEMORY] = { { [0] = 1UL } },
  95. #endif
  96. [N_CPU] = { { [0] = 1UL } },
  97. #endif /* NUMA */
  98. };
  99. EXPORT_SYMBOL(node_states);
  100. /* Protect totalram_pages and zone->managed_pages */
  101. static DEFINE_SPINLOCK(managed_page_count_lock);
  102. unsigned long totalram_pages __read_mostly;
  103. unsigned long totalreserve_pages __read_mostly;
  104. /*
  105. * When calculating the number of globally allowed dirty pages, there
  106. * is a certain number of per-zone reserves that should not be
  107. * considered dirtyable memory. This is the sum of those reserves
  108. * over all existing zones that contribute dirtyable memory.
  109. */
  110. unsigned long dirty_balance_reserve __read_mostly;
  111. int percpu_pagelist_fraction;
  112. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  113. #ifdef CONFIG_PM_SLEEP
  114. /*
  115. * The following functions are used by the suspend/hibernate code to temporarily
  116. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  117. * while devices are suspended. To avoid races with the suspend/hibernate code,
  118. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  119. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  120. * guaranteed not to run in parallel with that modification).
  121. */
  122. static gfp_t saved_gfp_mask;
  123. void pm_restore_gfp_mask(void)
  124. {
  125. WARN_ON(!mutex_is_locked(&pm_mutex));
  126. if (saved_gfp_mask) {
  127. gfp_allowed_mask = saved_gfp_mask;
  128. saved_gfp_mask = 0;
  129. }
  130. }
  131. void pm_restrict_gfp_mask(void)
  132. {
  133. WARN_ON(!mutex_is_locked(&pm_mutex));
  134. WARN_ON(saved_gfp_mask);
  135. saved_gfp_mask = gfp_allowed_mask;
  136. gfp_allowed_mask &= ~GFP_IOFS;
  137. }
  138. bool pm_suspended_storage(void)
  139. {
  140. if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
  141. return false;
  142. return true;
  143. }
  144. #endif /* CONFIG_PM_SLEEP */
  145. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  146. int pageblock_order __read_mostly;
  147. #endif
  148. static void __free_pages_ok(struct page *page, unsigned int order);
  149. /*
  150. * results with 256, 32 in the lowmem_reserve sysctl:
  151. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  152. * 1G machine -> (16M dma, 784M normal, 224M high)
  153. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  154. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  155. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  156. *
  157. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  158. * don't need any ZONE_NORMAL reservation
  159. */
  160. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  161. #ifdef CONFIG_ZONE_DMA
  162. 256,
  163. #endif
  164. #ifdef CONFIG_ZONE_DMA32
  165. 256,
  166. #endif
  167. #ifdef CONFIG_HIGHMEM
  168. 32,
  169. #endif
  170. 32,
  171. };
  172. EXPORT_SYMBOL(totalram_pages);
  173. static char * const zone_names[MAX_NR_ZONES] = {
  174. #ifdef CONFIG_ZONE_DMA
  175. "DMA",
  176. #endif
  177. #ifdef CONFIG_ZONE_DMA32
  178. "DMA32",
  179. #endif
  180. "Normal",
  181. #ifdef CONFIG_HIGHMEM
  182. "HighMem",
  183. #endif
  184. "Movable",
  185. };
  186. int min_free_kbytes = 1024;
  187. int user_min_free_kbytes = -1;
  188. static unsigned long __meminitdata nr_kernel_pages;
  189. static unsigned long __meminitdata nr_all_pages;
  190. static unsigned long __meminitdata dma_reserve;
  191. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  192. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  193. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  194. static unsigned long __initdata required_kernelcore;
  195. static unsigned long __initdata required_movablecore;
  196. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  197. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  198. int movable_zone;
  199. EXPORT_SYMBOL(movable_zone);
  200. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  201. #if MAX_NUMNODES > 1
  202. int nr_node_ids __read_mostly = MAX_NUMNODES;
  203. int nr_online_nodes __read_mostly = 1;
  204. EXPORT_SYMBOL(nr_node_ids);
  205. EXPORT_SYMBOL(nr_online_nodes);
  206. #endif
  207. int page_group_by_mobility_disabled __read_mostly;
  208. void set_pageblock_migratetype(struct page *page, int migratetype)
  209. {
  210. if (unlikely(page_group_by_mobility_disabled &&
  211. migratetype < MIGRATE_PCPTYPES))
  212. migratetype = MIGRATE_UNMOVABLE;
  213. set_pageblock_flags_group(page, (unsigned long)migratetype,
  214. PB_migrate, PB_migrate_end);
  215. }
  216. bool oom_killer_disabled __read_mostly;
  217. #ifdef CONFIG_DEBUG_VM
  218. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  219. {
  220. int ret = 0;
  221. unsigned seq;
  222. unsigned long pfn = page_to_pfn(page);
  223. unsigned long sp, start_pfn;
  224. do {
  225. seq = zone_span_seqbegin(zone);
  226. start_pfn = zone->zone_start_pfn;
  227. sp = zone->spanned_pages;
  228. if (!zone_spans_pfn(zone, pfn))
  229. ret = 1;
  230. } while (zone_span_seqretry(zone, seq));
  231. if (ret)
  232. pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
  233. pfn, zone_to_nid(zone), zone->name,
  234. start_pfn, start_pfn + sp);
  235. return ret;
  236. }
  237. static int page_is_consistent(struct zone *zone, struct page *page)
  238. {
  239. if (!pfn_valid_within(page_to_pfn(page)))
  240. return 0;
  241. if (zone != page_zone(page))
  242. return 0;
  243. return 1;
  244. }
  245. /*
  246. * Temporary debugging check for pages not lying within a given zone.
  247. */
  248. static int bad_range(struct zone *zone, struct page *page)
  249. {
  250. if (page_outside_zone_boundaries(zone, page))
  251. return 1;
  252. if (!page_is_consistent(zone, page))
  253. return 1;
  254. return 0;
  255. }
  256. #else
  257. static inline int bad_range(struct zone *zone, struct page *page)
  258. {
  259. return 0;
  260. }
  261. #endif
  262. static void bad_page(struct page *page, const char *reason,
  263. unsigned long bad_flags)
  264. {
  265. static unsigned long resume;
  266. static unsigned long nr_shown;
  267. static unsigned long nr_unshown;
  268. /* Don't complain about poisoned pages */
  269. if (PageHWPoison(page)) {
  270. page_mapcount_reset(page); /* remove PageBuddy */
  271. return;
  272. }
  273. /*
  274. * Allow a burst of 60 reports, then keep quiet for that minute;
  275. * or allow a steady drip of one report per second.
  276. */
  277. if (nr_shown == 60) {
  278. if (time_before(jiffies, resume)) {
  279. nr_unshown++;
  280. goto out;
  281. }
  282. if (nr_unshown) {
  283. printk(KERN_ALERT
  284. "BUG: Bad page state: %lu messages suppressed\n",
  285. nr_unshown);
  286. nr_unshown = 0;
  287. }
  288. nr_shown = 0;
  289. }
  290. if (nr_shown++ == 0)
  291. resume = jiffies + 60 * HZ;
  292. printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
  293. current->comm, page_to_pfn(page));
  294. dump_page_badflags(page, reason, bad_flags);
  295. print_modules();
  296. dump_stack();
  297. out:
  298. /* Leave bad fields for debug, except PageBuddy could make trouble */
  299. page_mapcount_reset(page); /* remove PageBuddy */
  300. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  301. }
  302. /*
  303. * Higher-order pages are called "compound pages". They are structured thusly:
  304. *
  305. * The first PAGE_SIZE page is called the "head page".
  306. *
  307. * The remaining PAGE_SIZE pages are called "tail pages".
  308. *
  309. * All pages have PG_compound set. All tail pages have their ->first_page
  310. * pointing at the head page.
  311. *
  312. * The first tail page's ->lru.next holds the address of the compound page's
  313. * put_page() function. Its ->lru.prev holds the order of allocation.
  314. * This usage means that zero-order pages may not be compound.
  315. */
  316. static void free_compound_page(struct page *page)
  317. {
  318. __free_pages_ok(page, compound_order(page));
  319. }
  320. void prep_compound_page(struct page *page, unsigned long order)
  321. {
  322. int i;
  323. int nr_pages = 1 << order;
  324. set_compound_page_dtor(page, free_compound_page);
  325. set_compound_order(page, order);
  326. __SetPageHead(page);
  327. for (i = 1; i < nr_pages; i++) {
  328. struct page *p = page + i;
  329. set_page_count(p, 0);
  330. p->first_page = page;
  331. /* Make sure p->first_page is always valid for PageTail() */
  332. smp_wmb();
  333. __SetPageTail(p);
  334. }
  335. }
  336. /* update __split_huge_page_refcount if you change this function */
  337. static int destroy_compound_page(struct page *page, unsigned long order)
  338. {
  339. int i;
  340. int nr_pages = 1 << order;
  341. int bad = 0;
  342. if (unlikely(compound_order(page) != order)) {
  343. bad_page(page, "wrong compound order", 0);
  344. bad++;
  345. }
  346. __ClearPageHead(page);
  347. for (i = 1; i < nr_pages; i++) {
  348. struct page *p = page + i;
  349. if (unlikely(!PageTail(p))) {
  350. bad_page(page, "PageTail not set", 0);
  351. bad++;
  352. } else if (unlikely(p->first_page != page)) {
  353. bad_page(page, "first_page not consistent", 0);
  354. bad++;
  355. }
  356. __ClearPageTail(p);
  357. }
  358. return bad;
  359. }
  360. static inline void prep_zero_page(struct page *page, unsigned int order,
  361. gfp_t gfp_flags)
  362. {
  363. int i;
  364. /*
  365. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  366. * and __GFP_HIGHMEM from hard or soft interrupt context.
  367. */
  368. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  369. for (i = 0; i < (1 << order); i++)
  370. clear_highpage(page + i);
  371. }
  372. #ifdef CONFIG_DEBUG_PAGEALLOC
  373. unsigned int _debug_guardpage_minorder;
  374. static int __init debug_guardpage_minorder_setup(char *buf)
  375. {
  376. unsigned long res;
  377. if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
  378. printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
  379. return 0;
  380. }
  381. _debug_guardpage_minorder = res;
  382. printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
  383. return 0;
  384. }
  385. __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
  386. static inline void set_page_guard_flag(struct page *page)
  387. {
  388. __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  389. }
  390. static inline void clear_page_guard_flag(struct page *page)
  391. {
  392. __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  393. }
  394. #else
  395. static inline void set_page_guard_flag(struct page *page) { }
  396. static inline void clear_page_guard_flag(struct page *page) { }
  397. #endif
  398. static inline void set_page_order(struct page *page, unsigned int order)
  399. {
  400. set_page_private(page, order);
  401. __SetPageBuddy(page);
  402. }
  403. static inline void rmv_page_order(struct page *page)
  404. {
  405. __ClearPageBuddy(page);
  406. set_page_private(page, 0);
  407. }
  408. /*
  409. * This function checks whether a page is free && is the buddy
  410. * we can do coalesce a page and its buddy if
  411. * (a) the buddy is not in a hole &&
  412. * (b) the buddy is in the buddy system &&
  413. * (c) a page and its buddy have the same order &&
  414. * (d) a page and its buddy are in the same zone.
  415. *
  416. * For recording whether a page is in the buddy system, we set ->_mapcount
  417. * PAGE_BUDDY_MAPCOUNT_VALUE.
  418. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
  419. * serialized by zone->lock.
  420. *
  421. * For recording page's order, we use page_private(page).
  422. */
  423. static inline int page_is_buddy(struct page *page, struct page *buddy,
  424. unsigned int order)
  425. {
  426. if (!pfn_valid_within(page_to_pfn(buddy)))
  427. return 0;
  428. if (page_is_guard(buddy) && page_order(buddy) == order) {
  429. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  430. if (page_zone_id(page) != page_zone_id(buddy))
  431. return 0;
  432. return 1;
  433. }
  434. if (PageBuddy(buddy) && page_order(buddy) == order) {
  435. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  436. /*
  437. * zone check is done late to avoid uselessly
  438. * calculating zone/node ids for pages that could
  439. * never merge.
  440. */
  441. if (page_zone_id(page) != page_zone_id(buddy))
  442. return 0;
  443. return 1;
  444. }
  445. return 0;
  446. }
  447. /*
  448. * Freeing function for a buddy system allocator.
  449. *
  450. * The concept of a buddy system is to maintain direct-mapped table
  451. * (containing bit values) for memory blocks of various "orders".
  452. * The bottom level table contains the map for the smallest allocatable
  453. * units of memory (here, pages), and each level above it describes
  454. * pairs of units from the levels below, hence, "buddies".
  455. * At a high level, all that happens here is marking the table entry
  456. * at the bottom level available, and propagating the changes upward
  457. * as necessary, plus some accounting needed to play nicely with other
  458. * parts of the VM system.
  459. * At each level, we keep a list of pages, which are heads of continuous
  460. * free pages of length of (1 << order) and marked with _mapcount
  461. * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
  462. * field.
  463. * So when we are allocating or freeing one, we can derive the state of the
  464. * other. That is, if we allocate a small block, and both were
  465. * free, the remainder of the region must be split into blocks.
  466. * If a block is freed, and its buddy is also free, then this
  467. * triggers coalescing into a block of larger size.
  468. *
  469. * -- nyc
  470. */
  471. static inline void __free_one_page(struct page *page,
  472. unsigned long pfn,
  473. struct zone *zone, unsigned int order,
  474. int migratetype)
  475. {
  476. unsigned long page_idx;
  477. unsigned long combined_idx;
  478. unsigned long uninitialized_var(buddy_idx);
  479. struct page *buddy;
  480. int max_order = MAX_ORDER;
  481. VM_BUG_ON(!zone_is_initialized(zone));
  482. if (unlikely(PageCompound(page)))
  483. if (unlikely(destroy_compound_page(page, order)))
  484. return;
  485. VM_BUG_ON(migratetype == -1);
  486. if (is_migrate_isolate(migratetype)) {
  487. /*
  488. * We restrict max order of merging to prevent merge
  489. * between freepages on isolate pageblock and normal
  490. * pageblock. Without this, pageblock isolation
  491. * could cause incorrect freepage accounting.
  492. */
  493. max_order = min(MAX_ORDER, pageblock_order + 1);
  494. } else {
  495. __mod_zone_freepage_state(zone, 1 << order, migratetype);
  496. }
  497. page_idx = pfn & ((1 << max_order) - 1);
  498. VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
  499. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  500. while (order < max_order - 1) {
  501. buddy_idx = __find_buddy_index(page_idx, order);
  502. buddy = page + (buddy_idx - page_idx);
  503. if (!page_is_buddy(page, buddy, order))
  504. break;
  505. /*
  506. * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  507. * merge with it and move up one order.
  508. */
  509. if (page_is_guard(buddy)) {
  510. clear_page_guard_flag(buddy);
  511. set_page_private(buddy, 0);
  512. if (!is_migrate_isolate(migratetype)) {
  513. __mod_zone_freepage_state(zone, 1 << order,
  514. migratetype);
  515. }
  516. } else {
  517. list_del(&buddy->lru);
  518. zone->free_area[order].nr_free--;
  519. rmv_page_order(buddy);
  520. }
  521. combined_idx = buddy_idx & page_idx;
  522. page = page + (combined_idx - page_idx);
  523. page_idx = combined_idx;
  524. order++;
  525. }
  526. set_page_order(page, order);
  527. /*
  528. * If this is not the largest possible page, check if the buddy
  529. * of the next-highest order is free. If it is, it's possible
  530. * that pages are being freed that will coalesce soon. In case,
  531. * that is happening, add the free page to the tail of the list
  532. * so it's less likely to be used soon and more likely to be merged
  533. * as a higher order page
  534. */
  535. if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
  536. struct page *higher_page, *higher_buddy;
  537. combined_idx = buddy_idx & page_idx;
  538. higher_page = page + (combined_idx - page_idx);
  539. buddy_idx = __find_buddy_index(combined_idx, order + 1);
  540. higher_buddy = higher_page + (buddy_idx - combined_idx);
  541. if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
  542. list_add_tail(&page->lru,
  543. &zone->free_area[order].free_list[migratetype]);
  544. goto out;
  545. }
  546. }
  547. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  548. out:
  549. zone->free_area[order].nr_free++;
  550. }
  551. static inline int free_pages_check(struct page *page)
  552. {
  553. const char *bad_reason = NULL;
  554. unsigned long bad_flags = 0;
  555. if (unlikely(page_mapcount(page)))
  556. bad_reason = "nonzero mapcount";
  557. if (unlikely(page->mapping != NULL))
  558. bad_reason = "non-NULL mapping";
  559. if (unlikely(atomic_read(&page->_count) != 0))
  560. bad_reason = "nonzero _count";
  561. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
  562. bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
  563. bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
  564. }
  565. #ifdef CONFIG_MEMCG
  566. if (unlikely(page->mem_cgroup))
  567. bad_reason = "page still charged to cgroup";
  568. #endif
  569. if (unlikely(bad_reason)) {
  570. bad_page(page, bad_reason, bad_flags);
  571. return 1;
  572. }
  573. page_cpupid_reset_last(page);
  574. if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  575. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  576. return 0;
  577. }
  578. /*
  579. * Frees a number of pages from the PCP lists
  580. * Assumes all pages on list are in same zone, and of same order.
  581. * count is the number of pages to free.
  582. *
  583. * If the zone was previously in an "all pages pinned" state then look to
  584. * see if this freeing clears that state.
  585. *
  586. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  587. * pinned" detection logic.
  588. */
  589. static void free_pcppages_bulk(struct zone *zone, int count,
  590. struct per_cpu_pages *pcp)
  591. {
  592. int migratetype = 0;
  593. int batch_free = 0;
  594. int to_free = count;
  595. unsigned long nr_scanned;
  596. spin_lock(&zone->lock);
  597. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  598. if (nr_scanned)
  599. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  600. while (to_free) {
  601. struct page *page;
  602. struct list_head *list;
  603. /*
  604. * Remove pages from lists in a round-robin fashion. A
  605. * batch_free count is maintained that is incremented when an
  606. * empty list is encountered. This is so more pages are freed
  607. * off fuller lists instead of spinning excessively around empty
  608. * lists
  609. */
  610. do {
  611. batch_free++;
  612. if (++migratetype == MIGRATE_PCPTYPES)
  613. migratetype = 0;
  614. list = &pcp->lists[migratetype];
  615. } while (list_empty(list));
  616. /* This is the only non-empty list. Free them all. */
  617. if (batch_free == MIGRATE_PCPTYPES)
  618. batch_free = to_free;
  619. do {
  620. int mt; /* migratetype of the to-be-freed page */
  621. page = list_entry(list->prev, struct page, lru);
  622. /* must delete as __free_one_page list manipulates */
  623. list_del(&page->lru);
  624. mt = get_freepage_migratetype(page);
  625. if (unlikely(has_isolate_pageblock(zone)))
  626. mt = get_pageblock_migratetype(page);
  627. /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
  628. __free_one_page(page, page_to_pfn(page), zone, 0, mt);
  629. trace_mm_page_pcpu_drain(page, 0, mt);
  630. } while (--to_free && --batch_free && !list_empty(list));
  631. }
  632. spin_unlock(&zone->lock);
  633. }
  634. static void free_one_page(struct zone *zone,
  635. struct page *page, unsigned long pfn,
  636. unsigned int order,
  637. int migratetype)
  638. {
  639. unsigned long nr_scanned;
  640. spin_lock(&zone->lock);
  641. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  642. if (nr_scanned)
  643. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  644. if (unlikely(has_isolate_pageblock(zone) ||
  645. is_migrate_isolate(migratetype))) {
  646. migratetype = get_pfnblock_migratetype(page, pfn);
  647. }
  648. __free_one_page(page, pfn, zone, order, migratetype);
  649. spin_unlock(&zone->lock);
  650. }
  651. static bool free_pages_prepare(struct page *page, unsigned int order)
  652. {
  653. int i;
  654. int bad = 0;
  655. VM_BUG_ON_PAGE(PageTail(page), page);
  656. VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page);
  657. trace_mm_page_free(page, order);
  658. kmemcheck_free_shadow(page, order);
  659. if (PageAnon(page))
  660. page->mapping = NULL;
  661. for (i = 0; i < (1 << order); i++)
  662. bad += free_pages_check(page + i);
  663. if (bad)
  664. return false;
  665. if (!PageHighMem(page)) {
  666. debug_check_no_locks_freed(page_address(page),
  667. PAGE_SIZE << order);
  668. debug_check_no_obj_freed(page_address(page),
  669. PAGE_SIZE << order);
  670. }
  671. arch_free_page(page, order);
  672. kernel_map_pages(page, 1 << order, 0);
  673. return true;
  674. }
  675. static void __free_pages_ok(struct page *page, unsigned int order)
  676. {
  677. unsigned long flags;
  678. int migratetype;
  679. unsigned long pfn = page_to_pfn(page);
  680. if (!free_pages_prepare(page, order))
  681. return;
  682. migratetype = get_pfnblock_migratetype(page, pfn);
  683. local_irq_save(flags);
  684. __count_vm_events(PGFREE, 1 << order);
  685. set_freepage_migratetype(page, migratetype);
  686. free_one_page(page_zone(page), page, pfn, order, migratetype);
  687. local_irq_restore(flags);
  688. }
  689. void __init __free_pages_bootmem(struct page *page, unsigned int order)
  690. {
  691. unsigned int nr_pages = 1 << order;
  692. struct page *p = page;
  693. unsigned int loop;
  694. prefetchw(p);
  695. for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
  696. prefetchw(p + 1);
  697. __ClearPageReserved(p);
  698. set_page_count(p, 0);
  699. }
  700. __ClearPageReserved(p);
  701. set_page_count(p, 0);
  702. page_zone(page)->managed_pages += nr_pages;
  703. set_page_refcounted(page);
  704. __free_pages(page, order);
  705. }
  706. #ifdef CONFIG_CMA
  707. /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
  708. void __init init_cma_reserved_pageblock(struct page *page)
  709. {
  710. unsigned i = pageblock_nr_pages;
  711. struct page *p = page;
  712. do {
  713. __ClearPageReserved(p);
  714. set_page_count(p, 0);
  715. } while (++p, --i);
  716. set_pageblock_migratetype(page, MIGRATE_CMA);
  717. if (pageblock_order >= MAX_ORDER) {
  718. i = pageblock_nr_pages;
  719. p = page;
  720. do {
  721. set_page_refcounted(p);
  722. __free_pages(p, MAX_ORDER - 1);
  723. p += MAX_ORDER_NR_PAGES;
  724. } while (i -= MAX_ORDER_NR_PAGES);
  725. } else {
  726. set_page_refcounted(page);
  727. __free_pages(page, pageblock_order);
  728. }
  729. adjust_managed_page_count(page, pageblock_nr_pages);
  730. }
  731. #endif
  732. /*
  733. * The order of subdivision here is critical for the IO subsystem.
  734. * Please do not alter this order without good reasons and regression
  735. * testing. Specifically, as large blocks of memory are subdivided,
  736. * the order in which smaller blocks are delivered depends on the order
  737. * they're subdivided in this function. This is the primary factor
  738. * influencing the order in which pages are delivered to the IO
  739. * subsystem according to empirical testing, and this is also justified
  740. * by considering the behavior of a buddy system containing a single
  741. * large block of memory acted on by a series of small allocations.
  742. * This behavior is a critical factor in sglist merging's success.
  743. *
  744. * -- nyc
  745. */
  746. static inline void expand(struct zone *zone, struct page *page,
  747. int low, int high, struct free_area *area,
  748. int migratetype)
  749. {
  750. unsigned long size = 1 << high;
  751. while (high > low) {
  752. area--;
  753. high--;
  754. size >>= 1;
  755. VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  756. #ifdef CONFIG_DEBUG_PAGEALLOC
  757. if (high < debug_guardpage_minorder()) {
  758. /*
  759. * Mark as guard pages (or page), that will allow to
  760. * merge back to allocator when buddy will be freed.
  761. * Corresponding page table entries will not be touched,
  762. * pages will stay not present in virtual address space
  763. */
  764. INIT_LIST_HEAD(&page[size].lru);
  765. set_page_guard_flag(&page[size]);
  766. set_page_private(&page[size], high);
  767. /* Guard pages are not available for any usage */
  768. __mod_zone_freepage_state(zone, -(1 << high),
  769. migratetype);
  770. continue;
  771. }
  772. #endif
  773. list_add(&page[size].lru, &area->free_list[migratetype]);
  774. area->nr_free++;
  775. set_page_order(&page[size], high);
  776. }
  777. }
  778. /*
  779. * This page is about to be returned from the page allocator
  780. */
  781. static inline int check_new_page(struct page *page)
  782. {
  783. const char *bad_reason = NULL;
  784. unsigned long bad_flags = 0;
  785. if (unlikely(page_mapcount(page)))
  786. bad_reason = "nonzero mapcount";
  787. if (unlikely(page->mapping != NULL))
  788. bad_reason = "non-NULL mapping";
  789. if (unlikely(atomic_read(&page->_count) != 0))
  790. bad_reason = "nonzero _count";
  791. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
  792. bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
  793. bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
  794. }
  795. #ifdef CONFIG_MEMCG
  796. if (unlikely(page->mem_cgroup))
  797. bad_reason = "page still charged to cgroup";
  798. #endif
  799. if (unlikely(bad_reason)) {
  800. bad_page(page, bad_reason, bad_flags);
  801. return 1;
  802. }
  803. return 0;
  804. }
  805. static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
  806. {
  807. int i;
  808. for (i = 0; i < (1 << order); i++) {
  809. struct page *p = page + i;
  810. if (unlikely(check_new_page(p)))
  811. return 1;
  812. }
  813. set_page_private(page, 0);
  814. set_page_refcounted(page);
  815. arch_alloc_page(page, order);
  816. kernel_map_pages(page, 1 << order, 1);
  817. if (gfp_flags & __GFP_ZERO)
  818. prep_zero_page(page, order, gfp_flags);
  819. if (order && (gfp_flags & __GFP_COMP))
  820. prep_compound_page(page, order);
  821. return 0;
  822. }
  823. /*
  824. * Go through the free lists for the given migratetype and remove
  825. * the smallest available page from the freelists
  826. */
  827. static inline
  828. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  829. int migratetype)
  830. {
  831. unsigned int current_order;
  832. struct free_area *area;
  833. struct page *page;
  834. /* Find a page of the appropriate size in the preferred list */
  835. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  836. area = &(zone->free_area[current_order]);
  837. if (list_empty(&area->free_list[migratetype]))
  838. continue;
  839. page = list_entry(area->free_list[migratetype].next,
  840. struct page, lru);
  841. list_del(&page->lru);
  842. rmv_page_order(page);
  843. area->nr_free--;
  844. expand(zone, page, order, current_order, area, migratetype);
  845. set_freepage_migratetype(page, migratetype);
  846. return page;
  847. }
  848. return NULL;
  849. }
  850. /*
  851. * This array describes the order lists are fallen back to when
  852. * the free lists for the desirable migrate type are depleted
  853. */
  854. static int fallbacks[MIGRATE_TYPES][4] = {
  855. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  856. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  857. #ifdef CONFIG_CMA
  858. [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  859. [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
  860. #else
  861. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  862. #endif
  863. [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
  864. #ifdef CONFIG_MEMORY_ISOLATION
  865. [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
  866. #endif
  867. };
  868. /*
  869. * Move the free pages in a range to the free lists of the requested type.
  870. * Note that start_page and end_pages are not aligned on a pageblock
  871. * boundary. If alignment is required, use move_freepages_block()
  872. */
  873. int move_freepages(struct zone *zone,
  874. struct page *start_page, struct page *end_page,
  875. int migratetype)
  876. {
  877. struct page *page;
  878. unsigned long order;
  879. int pages_moved = 0;
  880. #ifndef CONFIG_HOLES_IN_ZONE
  881. /*
  882. * page_zone is not safe to call in this context when
  883. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  884. * anyway as we check zone boundaries in move_freepages_block().
  885. * Remove at a later date when no bug reports exist related to
  886. * grouping pages by mobility
  887. */
  888. VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
  889. #endif
  890. for (page = start_page; page <= end_page;) {
  891. /* Make sure we are not inadvertently changing nodes */
  892. VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
  893. if (!pfn_valid_within(page_to_pfn(page))) {
  894. page++;
  895. continue;
  896. }
  897. if (!PageBuddy(page)) {
  898. page++;
  899. continue;
  900. }
  901. order = page_order(page);
  902. list_move(&page->lru,
  903. &zone->free_area[order].free_list[migratetype]);
  904. set_freepage_migratetype(page, migratetype);
  905. page += 1 << order;
  906. pages_moved += 1 << order;
  907. }
  908. return pages_moved;
  909. }
  910. int move_freepages_block(struct zone *zone, struct page *page,
  911. int migratetype)
  912. {
  913. unsigned long start_pfn, end_pfn;
  914. struct page *start_page, *end_page;
  915. start_pfn = page_to_pfn(page);
  916. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  917. start_page = pfn_to_page(start_pfn);
  918. end_page = start_page + pageblock_nr_pages - 1;
  919. end_pfn = start_pfn + pageblock_nr_pages - 1;
  920. /* Do not cross zone boundaries */
  921. if (!zone_spans_pfn(zone, start_pfn))
  922. start_page = page;
  923. if (!zone_spans_pfn(zone, end_pfn))
  924. return 0;
  925. return move_freepages(zone, start_page, end_page, migratetype);
  926. }
  927. static void change_pageblock_range(struct page *pageblock_page,
  928. int start_order, int migratetype)
  929. {
  930. int nr_pageblocks = 1 << (start_order - pageblock_order);
  931. while (nr_pageblocks--) {
  932. set_pageblock_migratetype(pageblock_page, migratetype);
  933. pageblock_page += pageblock_nr_pages;
  934. }
  935. }
  936. /*
  937. * If breaking a large block of pages, move all free pages to the preferred
  938. * allocation list. If falling back for a reclaimable kernel allocation, be
  939. * more aggressive about taking ownership of free pages.
  940. *
  941. * On the other hand, never change migration type of MIGRATE_CMA pageblocks
  942. * nor move CMA pages to different free lists. We don't want unmovable pages
  943. * to be allocated from MIGRATE_CMA areas.
  944. *
  945. * Returns the new migratetype of the pageblock (or the same old migratetype
  946. * if it was unchanged).
  947. */
  948. static int try_to_steal_freepages(struct zone *zone, struct page *page,
  949. int start_type, int fallback_type)
  950. {
  951. int current_order = page_order(page);
  952. /*
  953. * When borrowing from MIGRATE_CMA, we need to release the excess
  954. * buddy pages to CMA itself. We also ensure the freepage_migratetype
  955. * is set to CMA so it is returned to the correct freelist in case
  956. * the page ends up being not actually allocated from the pcp lists.
  957. */
  958. if (is_migrate_cma(fallback_type))
  959. return fallback_type;
  960. /* Take ownership for orders >= pageblock_order */
  961. if (current_order >= pageblock_order) {
  962. change_pageblock_range(page, current_order, start_type);
  963. return start_type;
  964. }
  965. if (current_order >= pageblock_order / 2 ||
  966. start_type == MIGRATE_RECLAIMABLE ||
  967. page_group_by_mobility_disabled) {
  968. int pages;
  969. pages = move_freepages_block(zone, page, start_type);
  970. /* Claim the whole block if over half of it is free */
  971. if (pages >= (1 << (pageblock_order-1)) ||
  972. page_group_by_mobility_disabled) {
  973. set_pageblock_migratetype(page, start_type);
  974. return start_type;
  975. }
  976. }
  977. return fallback_type;
  978. }
  979. /* Remove an element from the buddy allocator from the fallback list */
  980. static inline struct page *
  981. __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
  982. {
  983. struct free_area *area;
  984. unsigned int current_order;
  985. struct page *page;
  986. int migratetype, new_type, i;
  987. /* Find the largest possible block of pages in the other list */
  988. for (current_order = MAX_ORDER-1;
  989. current_order >= order && current_order <= MAX_ORDER-1;
  990. --current_order) {
  991. for (i = 0;; i++) {
  992. migratetype = fallbacks[start_migratetype][i];
  993. /* MIGRATE_RESERVE handled later if necessary */
  994. if (migratetype == MIGRATE_RESERVE)
  995. break;
  996. area = &(zone->free_area[current_order]);
  997. if (list_empty(&area->free_list[migratetype]))
  998. continue;
  999. page = list_entry(area->free_list[migratetype].next,
  1000. struct page, lru);
  1001. area->nr_free--;
  1002. new_type = try_to_steal_freepages(zone, page,
  1003. start_migratetype,
  1004. migratetype);
  1005. /* Remove the page from the freelists */
  1006. list_del(&page->lru);
  1007. rmv_page_order(page);
  1008. expand(zone, page, order, current_order, area,
  1009. new_type);
  1010. /* The freepage_migratetype may differ from pageblock's
  1011. * migratetype depending on the decisions in
  1012. * try_to_steal_freepages. This is OK as long as it does
  1013. * not differ for MIGRATE_CMA type.
  1014. */
  1015. set_freepage_migratetype(page, new_type);
  1016. trace_mm_page_alloc_extfrag(page, order, current_order,
  1017. start_migratetype, migratetype, new_type);
  1018. return page;
  1019. }
  1020. }
  1021. return NULL;
  1022. }
  1023. /*
  1024. * Do the hard work of removing an element from the buddy allocator.
  1025. * Call me with the zone->lock already held.
  1026. */
  1027. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  1028. int migratetype)
  1029. {
  1030. struct page *page;
  1031. retry_reserve:
  1032. page = __rmqueue_smallest(zone, order, migratetype);
  1033. if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
  1034. page = __rmqueue_fallback(zone, order, migratetype);
  1035. /*
  1036. * Use MIGRATE_RESERVE rather than fail an allocation. goto
  1037. * is used because __rmqueue_smallest is an inline function
  1038. * and we want just one call site
  1039. */
  1040. if (!page) {
  1041. migratetype = MIGRATE_RESERVE;
  1042. goto retry_reserve;
  1043. }
  1044. }
  1045. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  1046. return page;
  1047. }
  1048. /*
  1049. * Obtain a specified number of elements from the buddy allocator, all under
  1050. * a single hold of the lock, for efficiency. Add them to the supplied list.
  1051. * Returns the number of new pages which were placed at *list.
  1052. */
  1053. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  1054. unsigned long count, struct list_head *list,
  1055. int migratetype, bool cold)
  1056. {
  1057. int i;
  1058. spin_lock(&zone->lock);
  1059. for (i = 0; i < count; ++i) {
  1060. struct page *page = __rmqueue(zone, order, migratetype);
  1061. if (unlikely(page == NULL))
  1062. break;
  1063. /*
  1064. * Split buddy pages returned by expand() are received here
  1065. * in physical page order. The page is added to the callers and
  1066. * list and the list head then moves forward. From the callers
  1067. * perspective, the linked list is ordered by page number in
  1068. * some conditions. This is useful for IO devices that can
  1069. * merge IO requests if the physical pages are ordered
  1070. * properly.
  1071. */
  1072. if (likely(!cold))
  1073. list_add(&page->lru, list);
  1074. else
  1075. list_add_tail(&page->lru, list);
  1076. list = &page->lru;
  1077. if (is_migrate_cma(get_freepage_migratetype(page)))
  1078. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
  1079. -(1 << order));
  1080. }
  1081. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  1082. spin_unlock(&zone->lock);
  1083. return i;
  1084. }
  1085. #ifdef CONFIG_NUMA
  1086. /*
  1087. * Called from the vmstat counter updater to drain pagesets of this
  1088. * currently executing processor on remote nodes after they have
  1089. * expired.
  1090. *
  1091. * Note that this function must be called with the thread pinned to
  1092. * a single processor.
  1093. */
  1094. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  1095. {
  1096. unsigned long flags;
  1097. int to_drain, batch;
  1098. local_irq_save(flags);
  1099. batch = ACCESS_ONCE(pcp->batch);
  1100. to_drain = min(pcp->count, batch);
  1101. if (to_drain > 0) {
  1102. free_pcppages_bulk(zone, to_drain, pcp);
  1103. pcp->count -= to_drain;
  1104. }
  1105. local_irq_restore(flags);
  1106. }
  1107. #endif
  1108. /*
  1109. * Drain pcplists of the indicated processor and zone.
  1110. *
  1111. * The processor must either be the current processor and the
  1112. * thread pinned to the current processor or a processor that
  1113. * is not online.
  1114. */
  1115. static void drain_pages_zone(unsigned int cpu, struct zone *zone)
  1116. {
  1117. unsigned long flags;
  1118. struct per_cpu_pageset *pset;
  1119. struct per_cpu_pages *pcp;
  1120. local_irq_save(flags);
  1121. pset = per_cpu_ptr(zone->pageset, cpu);
  1122. pcp = &pset->pcp;
  1123. if (pcp->count) {
  1124. free_pcppages_bulk(zone, pcp->count, pcp);
  1125. pcp->count = 0;
  1126. }
  1127. local_irq_restore(flags);
  1128. }
  1129. /*
  1130. * Drain pcplists of all zones on the indicated processor.
  1131. *
  1132. * The processor must either be the current processor and the
  1133. * thread pinned to the current processor or a processor that
  1134. * is not online.
  1135. */
  1136. static void drain_pages(unsigned int cpu)
  1137. {
  1138. struct zone *zone;
  1139. for_each_populated_zone(zone) {
  1140. drain_pages_zone(cpu, zone);
  1141. }
  1142. }
  1143. /*
  1144. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  1145. *
  1146. * The CPU has to be pinned. When zone parameter is non-NULL, spill just
  1147. * the single zone's pages.
  1148. */
  1149. void drain_local_pages(struct zone *zone)
  1150. {
  1151. int cpu = smp_processor_id();
  1152. if (zone)
  1153. drain_pages_zone(cpu, zone);
  1154. else
  1155. drain_pages(cpu);
  1156. }
  1157. /*
  1158. * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  1159. *
  1160. * When zone parameter is non-NULL, spill just the single zone's pages.
  1161. *
  1162. * Note that this code is protected against sending an IPI to an offline
  1163. * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
  1164. * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
  1165. * nothing keeps CPUs from showing up after we populated the cpumask and
  1166. * before the call to on_each_cpu_mask().
  1167. */
  1168. void drain_all_pages(struct zone *zone)
  1169. {
  1170. int cpu;
  1171. /*
  1172. * Allocate in the BSS so we wont require allocation in
  1173. * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
  1174. */
  1175. static cpumask_t cpus_with_pcps;
  1176. /*
  1177. * We don't care about racing with CPU hotplug event
  1178. * as offline notification will cause the notified
  1179. * cpu to drain that CPU pcps and on_each_cpu_mask
  1180. * disables preemption as part of its processing
  1181. */
  1182. for_each_online_cpu(cpu) {
  1183. struct per_cpu_pageset *pcp;
  1184. struct zone *z;
  1185. bool has_pcps = false;
  1186. if (zone) {
  1187. pcp = per_cpu_ptr(zone->pageset, cpu);
  1188. if (pcp->pcp.count)
  1189. has_pcps = true;
  1190. } else {
  1191. for_each_populated_zone(z) {
  1192. pcp = per_cpu_ptr(z->pageset, cpu);
  1193. if (pcp->pcp.count) {
  1194. has_pcps = true;
  1195. break;
  1196. }
  1197. }
  1198. }
  1199. if (has_pcps)
  1200. cpumask_set_cpu(cpu, &cpus_with_pcps);
  1201. else
  1202. cpumask_clear_cpu(cpu, &cpus_with_pcps);
  1203. }
  1204. on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
  1205. zone, 1);
  1206. }
  1207. #ifdef CONFIG_HIBERNATION
  1208. void mark_free_pages(struct zone *zone)
  1209. {
  1210. unsigned long pfn, max_zone_pfn;
  1211. unsigned long flags;
  1212. unsigned int order, t;
  1213. struct list_head *curr;
  1214. if (zone_is_empty(zone))
  1215. return;
  1216. spin_lock_irqsave(&zone->lock, flags);
  1217. max_zone_pfn = zone_end_pfn(zone);
  1218. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  1219. if (pfn_valid(pfn)) {
  1220. struct page *page = pfn_to_page(pfn);
  1221. if (!swsusp_page_is_forbidden(page))
  1222. swsusp_unset_page_free(page);
  1223. }
  1224. for_each_migratetype_order(order, t) {
  1225. list_for_each(curr, &zone->free_area[order].free_list[t]) {
  1226. unsigned long i;
  1227. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  1228. for (i = 0; i < (1UL << order); i++)
  1229. swsusp_set_page_free(pfn_to_page(pfn + i));
  1230. }
  1231. }
  1232. spin_unlock_irqrestore(&zone->lock, flags);
  1233. }
  1234. #endif /* CONFIG_PM */
  1235. /*
  1236. * Free a 0-order page
  1237. * cold == true ? free a cold page : free a hot page
  1238. */
  1239. void free_hot_cold_page(struct page *page, bool cold)
  1240. {
  1241. struct zone *zone = page_zone(page);
  1242. struct per_cpu_pages *pcp;
  1243. unsigned long flags;
  1244. unsigned long pfn = page_to_pfn(page);
  1245. int migratetype;
  1246. if (!free_pages_prepare(page, 0))
  1247. return;
  1248. migratetype = get_pfnblock_migratetype(page, pfn);
  1249. set_freepage_migratetype(page, migratetype);
  1250. local_irq_save(flags);
  1251. __count_vm_event(PGFREE);
  1252. /*
  1253. * We only track unmovable, reclaimable and movable on pcp lists.
  1254. * Free ISOLATE pages back to the allocator because they are being
  1255. * offlined but treat RESERVE as movable pages so we can get those
  1256. * areas back if necessary. Otherwise, we may have to free
  1257. * excessively into the page allocator
  1258. */
  1259. if (migratetype >= MIGRATE_PCPTYPES) {
  1260. if (unlikely(is_migrate_isolate(migratetype))) {
  1261. free_one_page(zone, page, pfn, 0, migratetype);
  1262. goto out;
  1263. }
  1264. migratetype = MIGRATE_MOVABLE;
  1265. }
  1266. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1267. if (!cold)
  1268. list_add(&page->lru, &pcp->lists[migratetype]);
  1269. else
  1270. list_add_tail(&page->lru, &pcp->lists[migratetype]);
  1271. pcp->count++;
  1272. if (pcp->count >= pcp->high) {
  1273. unsigned long batch = ACCESS_ONCE(pcp->batch);
  1274. free_pcppages_bulk(zone, batch, pcp);
  1275. pcp->count -= batch;
  1276. }
  1277. out:
  1278. local_irq_restore(flags);
  1279. }
  1280. /*
  1281. * Free a list of 0-order pages
  1282. */
  1283. void free_hot_cold_page_list(struct list_head *list, bool cold)
  1284. {
  1285. struct page *page, *next;
  1286. list_for_each_entry_safe(page, next, list, lru) {
  1287. trace_mm_page_free_batched(page, cold);
  1288. free_hot_cold_page(page, cold);
  1289. }
  1290. }
  1291. /*
  1292. * split_page takes a non-compound higher-order page, and splits it into
  1293. * n (1<<order) sub-pages: page[0..n]
  1294. * Each sub-page must be freed individually.
  1295. *
  1296. * Note: this is probably too low level an operation for use in drivers.
  1297. * Please consult with lkml before using this in your driver.
  1298. */
  1299. void split_page(struct page *page, unsigned int order)
  1300. {
  1301. int i;
  1302. VM_BUG_ON_PAGE(PageCompound(page), page);
  1303. VM_BUG_ON_PAGE(!page_count(page), page);
  1304. #ifdef CONFIG_KMEMCHECK
  1305. /*
  1306. * Split shadow pages too, because free(page[0]) would
  1307. * otherwise free the whole shadow.
  1308. */
  1309. if (kmemcheck_page_is_tracked(page))
  1310. split_page(virt_to_page(page[0].shadow), order);
  1311. #endif
  1312. for (i = 1; i < (1 << order); i++)
  1313. set_page_refcounted(page + i);
  1314. }
  1315. EXPORT_SYMBOL_GPL(split_page);
  1316. int __isolate_free_page(struct page *page, unsigned int order)
  1317. {
  1318. unsigned long watermark;
  1319. struct zone *zone;
  1320. int mt;
  1321. BUG_ON(!PageBuddy(page));
  1322. zone = page_zone(page);
  1323. mt = get_pageblock_migratetype(page);
  1324. if (!is_migrate_isolate(mt)) {
  1325. /* Obey watermarks as if the page was being allocated */
  1326. watermark = low_wmark_pages(zone) + (1 << order);
  1327. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  1328. return 0;
  1329. __mod_zone_freepage_state(zone, -(1UL << order), mt);
  1330. }
  1331. /* Remove page from free list */
  1332. list_del(&page->lru);
  1333. zone->free_area[order].nr_free--;
  1334. rmv_page_order(page);
  1335. /* Set the pageblock if the isolated page is at least a pageblock */
  1336. if (order >= pageblock_order - 1) {
  1337. struct page *endpage = page + (1 << order) - 1;
  1338. for (; page < endpage; page += pageblock_nr_pages) {
  1339. int mt = get_pageblock_migratetype(page);
  1340. if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
  1341. set_pageblock_migratetype(page,
  1342. MIGRATE_MOVABLE);
  1343. }
  1344. }
  1345. return 1UL << order;
  1346. }
  1347. /*
  1348. * Similar to split_page except the page is already free. As this is only
  1349. * being used for migration, the migratetype of the block also changes.
  1350. * As this is called with interrupts disabled, the caller is responsible
  1351. * for calling arch_alloc_page() and kernel_map_page() after interrupts
  1352. * are enabled.
  1353. *
  1354. * Note: this is probably too low level an operation for use in drivers.
  1355. * Please consult with lkml before using this in your driver.
  1356. */
  1357. int split_free_page(struct page *page)
  1358. {
  1359. unsigned int order;
  1360. int nr_pages;
  1361. order = page_order(page);
  1362. nr_pages = __isolate_free_page(page, order);
  1363. if (!nr_pages)
  1364. return 0;
  1365. /* Split into individual pages */
  1366. set_page_refcounted(page);
  1367. split_page(page, order);
  1368. return nr_pages;
  1369. }
  1370. /*
  1371. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  1372. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  1373. * or two.
  1374. */
  1375. static inline
  1376. struct page *buffered_rmqueue(struct zone *preferred_zone,
  1377. struct zone *zone, unsigned int order,
  1378. gfp_t gfp_flags, int migratetype)
  1379. {
  1380. unsigned long flags;
  1381. struct page *page;
  1382. bool cold = ((gfp_flags & __GFP_COLD) != 0);
  1383. again:
  1384. if (likely(order == 0)) {
  1385. struct per_cpu_pages *pcp;
  1386. struct list_head *list;
  1387. local_irq_save(flags);
  1388. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1389. list = &pcp->lists[migratetype];
  1390. if (list_empty(list)) {
  1391. pcp->count += rmqueue_bulk(zone, 0,
  1392. pcp->batch, list,
  1393. migratetype, cold);
  1394. if (unlikely(list_empty(list)))
  1395. goto failed;
  1396. }
  1397. if (cold)
  1398. page = list_entry(list->prev, struct page, lru);
  1399. else
  1400. page = list_entry(list->next, struct page, lru);
  1401. list_del(&page->lru);
  1402. pcp->count--;
  1403. } else {
  1404. if (unlikely(gfp_flags & __GFP_NOFAIL)) {
  1405. /*
  1406. * __GFP_NOFAIL is not to be used in new code.
  1407. *
  1408. * All __GFP_NOFAIL callers should be fixed so that they
  1409. * properly detect and handle allocation failures.
  1410. *
  1411. * We most definitely don't want callers attempting to
  1412. * allocate greater than order-1 page units with
  1413. * __GFP_NOFAIL.
  1414. */
  1415. WARN_ON_ONCE(order > 1);
  1416. }
  1417. spin_lock_irqsave(&zone->lock, flags);
  1418. page = __rmqueue(zone, order, migratetype);
  1419. spin_unlock(&zone->lock);
  1420. if (!page)
  1421. goto failed;
  1422. __mod_zone_freepage_state(zone, -(1 << order),
  1423. get_freepage_migratetype(page));
  1424. }
  1425. __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
  1426. if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
  1427. !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
  1428. set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  1429. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  1430. zone_statistics(preferred_zone, zone, gfp_flags);
  1431. local_irq_restore(flags);
  1432. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  1433. if (prep_new_page(page, order, gfp_flags))
  1434. goto again;
  1435. return page;
  1436. failed:
  1437. local_irq_restore(flags);
  1438. return NULL;
  1439. }
  1440. #ifdef CONFIG_FAIL_PAGE_ALLOC
  1441. static struct {
  1442. struct fault_attr attr;
  1443. u32 ignore_gfp_highmem;
  1444. u32 ignore_gfp_wait;
  1445. u32 min_order;
  1446. } fail_page_alloc = {
  1447. .attr = FAULT_ATTR_INITIALIZER,
  1448. .ignore_gfp_wait = 1,
  1449. .ignore_gfp_highmem = 1,
  1450. .min_order = 1,
  1451. };
  1452. static int __init setup_fail_page_alloc(char *str)
  1453. {
  1454. return setup_fault_attr(&fail_page_alloc.attr, str);
  1455. }
  1456. __setup("fail_page_alloc=", setup_fail_page_alloc);
  1457. static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1458. {
  1459. if (order < fail_page_alloc.min_order)
  1460. return false;
  1461. if (gfp_mask & __GFP_NOFAIL)
  1462. return false;
  1463. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  1464. return false;
  1465. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  1466. return false;
  1467. return should_fail(&fail_page_alloc.attr, 1 << order);
  1468. }
  1469. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1470. static int __init fail_page_alloc_debugfs(void)
  1471. {
  1472. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  1473. struct dentry *dir;
  1474. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  1475. &fail_page_alloc.attr);
  1476. if (IS_ERR(dir))
  1477. return PTR_ERR(dir);
  1478. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  1479. &fail_page_alloc.ignore_gfp_wait))
  1480. goto fail;
  1481. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  1482. &fail_page_alloc.ignore_gfp_highmem))
  1483. goto fail;
  1484. if (!debugfs_create_u32("min-order", mode, dir,
  1485. &fail_page_alloc.min_order))
  1486. goto fail;
  1487. return 0;
  1488. fail:
  1489. debugfs_remove_recursive(dir);
  1490. return -ENOMEM;
  1491. }
  1492. late_initcall(fail_page_alloc_debugfs);
  1493. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1494. #else /* CONFIG_FAIL_PAGE_ALLOC */
  1495. static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1496. {
  1497. return false;
  1498. }
  1499. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  1500. /*
  1501. * Return true if free pages are above 'mark'. This takes into account the order
  1502. * of the allocation.
  1503. */
  1504. static bool __zone_watermark_ok(struct zone *z, unsigned int order,
  1505. unsigned long mark, int classzone_idx, int alloc_flags,
  1506. long free_pages)
  1507. {
  1508. /* free_pages may go negative - that's OK */
  1509. long min = mark;
  1510. int o;
  1511. long free_cma = 0;
  1512. free_pages -= (1 << order) - 1;
  1513. if (alloc_flags & ALLOC_HIGH)
  1514. min -= min / 2;
  1515. if (alloc_flags & ALLOC_HARDER)
  1516. min -= min / 4;
  1517. #ifdef CONFIG_CMA
  1518. /* If allocation can't use CMA areas don't use free CMA pages */
  1519. if (!(alloc_flags & ALLOC_CMA))
  1520. free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
  1521. #endif
  1522. if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
  1523. return false;
  1524. for (o = 0; o < order; o++) {
  1525. /* At the next order, this order's pages become unavailable */
  1526. free_pages -= z->free_area[o].nr_free << o;
  1527. /* Require fewer higher order pages to be free */
  1528. min >>= 1;
  1529. if (free_pages <= min)
  1530. return false;
  1531. }
  1532. return true;
  1533. }
  1534. bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  1535. int classzone_idx, int alloc_flags)
  1536. {
  1537. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  1538. zone_page_state(z, NR_FREE_PAGES));
  1539. }
  1540. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  1541. unsigned long mark, int classzone_idx, int alloc_flags)
  1542. {
  1543. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  1544. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  1545. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  1546. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  1547. free_pages);
  1548. }
  1549. #ifdef CONFIG_NUMA
  1550. /*
  1551. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  1552. * skip over zones that are not allowed by the cpuset, or that have
  1553. * been recently (in last second) found to be nearly full. See further
  1554. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  1555. * that have to skip over a lot of full or unallowed zones.
  1556. *
  1557. * If the zonelist cache is present in the passed zonelist, then
  1558. * returns a pointer to the allowed node mask (either the current
  1559. * tasks mems_allowed, or node_states[N_MEMORY].)
  1560. *
  1561. * If the zonelist cache is not available for this zonelist, does
  1562. * nothing and returns NULL.
  1563. *
  1564. * If the fullzones BITMAP in the zonelist cache is stale (more than
  1565. * a second since last zap'd) then we zap it out (clear its bits.)
  1566. *
  1567. * We hold off even calling zlc_setup, until after we've checked the
  1568. * first zone in the zonelist, on the theory that most allocations will
  1569. * be satisfied from that first zone, so best to examine that zone as
  1570. * quickly as we can.
  1571. */
  1572. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1573. {
  1574. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1575. nodemask_t *allowednodes; /* zonelist_cache approximation */
  1576. zlc = zonelist->zlcache_ptr;
  1577. if (!zlc)
  1578. return NULL;
  1579. if (time_after(jiffies, zlc->last_full_zap + HZ)) {
  1580. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1581. zlc->last_full_zap = jiffies;
  1582. }
  1583. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  1584. &cpuset_current_mems_allowed :
  1585. &node_states[N_MEMORY];
  1586. return allowednodes;
  1587. }
  1588. /*
  1589. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  1590. * if it is worth looking at further for free memory:
  1591. * 1) Check that the zone isn't thought to be full (doesn't have its
  1592. * bit set in the zonelist_cache fullzones BITMAP).
  1593. * 2) Check that the zones node (obtained from the zonelist_cache
  1594. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  1595. * Return true (non-zero) if zone is worth looking at further, or
  1596. * else return false (zero) if it is not.
  1597. *
  1598. * This check -ignores- the distinction between various watermarks,
  1599. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  1600. * found to be full for any variation of these watermarks, it will
  1601. * be considered full for up to one second by all requests, unless
  1602. * we are so low on memory on all allowed nodes that we are forced
  1603. * into the second scan of the zonelist.
  1604. *
  1605. * In the second scan we ignore this zonelist cache and exactly
  1606. * apply the watermarks to all zones, even it is slower to do so.
  1607. * We are low on memory in the second scan, and should leave no stone
  1608. * unturned looking for a free page.
  1609. */
  1610. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1611. nodemask_t *allowednodes)
  1612. {
  1613. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1614. int i; /* index of *z in zonelist zones */
  1615. int n; /* node that zone *z is on */
  1616. zlc = zonelist->zlcache_ptr;
  1617. if (!zlc)
  1618. return 1;
  1619. i = z - zonelist->_zonerefs;
  1620. n = zlc->z_to_n[i];
  1621. /* This zone is worth trying if it is allowed but not full */
  1622. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  1623. }
  1624. /*
  1625. * Given 'z' scanning a zonelist, set the corresponding bit in
  1626. * zlc->fullzones, so that subsequent attempts to allocate a page
  1627. * from that zone don't waste time re-examining it.
  1628. */
  1629. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1630. {
  1631. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1632. int i; /* index of *z in zonelist zones */
  1633. zlc = zonelist->zlcache_ptr;
  1634. if (!zlc)
  1635. return;
  1636. i = z - zonelist->_zonerefs;
  1637. set_bit(i, zlc->fullzones);
  1638. }
  1639. /*
  1640. * clear all zones full, called after direct reclaim makes progress so that
  1641. * a zone that was recently full is not skipped over for up to a second
  1642. */
  1643. static void zlc_clear_zones_full(struct zonelist *zonelist)
  1644. {
  1645. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1646. zlc = zonelist->zlcache_ptr;
  1647. if (!zlc)
  1648. return;
  1649. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1650. }
  1651. static bool zone_local(struct zone *local_zone, struct zone *zone)
  1652. {
  1653. return local_zone->node == zone->node;
  1654. }
  1655. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  1656. {
  1657. return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
  1658. RECLAIM_DISTANCE;
  1659. }
  1660. #else /* CONFIG_NUMA */
  1661. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1662. {
  1663. return NULL;
  1664. }
  1665. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1666. nodemask_t *allowednodes)
  1667. {
  1668. return 1;
  1669. }
  1670. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1671. {
  1672. }
  1673. static void zlc_clear_zones_full(struct zonelist *zonelist)
  1674. {
  1675. }
  1676. static bool zone_local(struct zone *local_zone, struct zone *zone)
  1677. {
  1678. return true;
  1679. }
  1680. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  1681. {
  1682. return true;
  1683. }
  1684. #endif /* CONFIG_NUMA */
  1685. static void reset_alloc_batches(struct zone *preferred_zone)
  1686. {
  1687. struct zone *zone = preferred_zone->zone_pgdat->node_zones;
  1688. do {
  1689. mod_zone_page_state(zone, NR_ALLOC_BATCH,
  1690. high_wmark_pages(zone) - low_wmark_pages(zone) -
  1691. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  1692. clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  1693. } while (zone++ != preferred_zone);
  1694. }
  1695. /*
  1696. * get_page_from_freelist goes through the zonelist trying to allocate
  1697. * a page.
  1698. */
  1699. static struct page *
  1700. get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
  1701. struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
  1702. struct zone *preferred_zone, int classzone_idx, int migratetype)
  1703. {
  1704. struct zoneref *z;
  1705. struct page *page = NULL;
  1706. struct zone *zone;
  1707. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  1708. int zlc_active = 0; /* set if using zonelist_cache */
  1709. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  1710. bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
  1711. (gfp_mask & __GFP_WRITE);
  1712. int nr_fair_skipped = 0;
  1713. bool zonelist_rescan;
  1714. zonelist_scan:
  1715. zonelist_rescan = false;
  1716. /*
  1717. * Scan zonelist, looking for a zone with enough free.
  1718. * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
  1719. */
  1720. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1721. high_zoneidx, nodemask) {
  1722. unsigned long mark;
  1723. if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
  1724. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1725. continue;
  1726. if (cpusets_enabled() &&
  1727. (alloc_flags & ALLOC_CPUSET) &&
  1728. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1729. continue;
  1730. /*
  1731. * Distribute pages in proportion to the individual
  1732. * zone size to ensure fair page aging. The zone a
  1733. * page was allocated in should have no effect on the
  1734. * time the page has in memory before being reclaimed.
  1735. */
  1736. if (alloc_flags & ALLOC_FAIR) {
  1737. if (!zone_local(preferred_zone, zone))
  1738. break;
  1739. if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
  1740. nr_fair_skipped++;
  1741. continue;
  1742. }
  1743. }
  1744. /*
  1745. * When allocating a page cache page for writing, we
  1746. * want to get it from a zone that is within its dirty
  1747. * limit, such that no single zone holds more than its
  1748. * proportional share of globally allowed dirty pages.
  1749. * The dirty limits take into account the zone's
  1750. * lowmem reserves and high watermark so that kswapd
  1751. * should be able to balance it without having to
  1752. * write pages from its LRU list.
  1753. *
  1754. * This may look like it could increase pressure on
  1755. * lower zones by failing allocations in higher zones
  1756. * before they are full. But the pages that do spill
  1757. * over are limited as the lower zones are protected
  1758. * by this very same mechanism. It should not become
  1759. * a practical burden to them.
  1760. *
  1761. * XXX: For now, allow allocations to potentially
  1762. * exceed the per-zone dirty limit in the slowpath
  1763. * (ALLOC_WMARK_LOW unset) before going into reclaim,
  1764. * which is important when on a NUMA setup the allowed
  1765. * zones are together not big enough to reach the
  1766. * global limit. The proper fix for these situations
  1767. * will require awareness of zones in the
  1768. * dirty-throttling and the flusher threads.
  1769. */
  1770. if (consider_zone_dirty && !zone_dirty_ok(zone))
  1771. continue;
  1772. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  1773. if (!zone_watermark_ok(zone, order, mark,
  1774. classzone_idx, alloc_flags)) {
  1775. int ret;
  1776. /* Checked here to keep the fast path fast */
  1777. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  1778. if (alloc_flags & ALLOC_NO_WATERMARKS)
  1779. goto try_this_zone;
  1780. if (IS_ENABLED(CONFIG_NUMA) &&
  1781. !did_zlc_setup && nr_online_nodes > 1) {
  1782. /*
  1783. * we do zlc_setup if there are multiple nodes
  1784. * and before considering the first zone allowed
  1785. * by the cpuset.
  1786. */
  1787. allowednodes = zlc_setup(zonelist, alloc_flags);
  1788. zlc_active = 1;
  1789. did_zlc_setup = 1;
  1790. }
  1791. if (zone_reclaim_mode == 0 ||
  1792. !zone_allows_reclaim(preferred_zone, zone))
  1793. goto this_zone_full;
  1794. /*
  1795. * As we may have just activated ZLC, check if the first
  1796. * eligible zone has failed zone_reclaim recently.
  1797. */
  1798. if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
  1799. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1800. continue;
  1801. ret = zone_reclaim(zone, gfp_mask, order);
  1802. switch (ret) {
  1803. case ZONE_RECLAIM_NOSCAN:
  1804. /* did not scan */
  1805. continue;
  1806. case ZONE_RECLAIM_FULL:
  1807. /* scanned but unreclaimable */
  1808. continue;
  1809. default:
  1810. /* did we reclaim enough */
  1811. if (zone_watermark_ok(zone, order, mark,
  1812. classzone_idx, alloc_flags))
  1813. goto try_this_zone;
  1814. /*
  1815. * Failed to reclaim enough to meet watermark.
  1816. * Only mark the zone full if checking the min
  1817. * watermark or if we failed to reclaim just
  1818. * 1<<order pages or else the page allocator
  1819. * fastpath will prematurely mark zones full
  1820. * when the watermark is between the low and
  1821. * min watermarks.
  1822. */
  1823. if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
  1824. ret == ZONE_RECLAIM_SOME)
  1825. goto this_zone_full;
  1826. continue;
  1827. }
  1828. }
  1829. try_this_zone:
  1830. page = buffered_rmqueue(preferred_zone, zone, order,
  1831. gfp_mask, migratetype);
  1832. if (page)
  1833. break;
  1834. this_zone_full:
  1835. if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
  1836. zlc_mark_zone_full(zonelist, z);
  1837. }
  1838. if (page) {
  1839. /*
  1840. * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
  1841. * necessary to allocate the page. The expectation is
  1842. * that the caller is taking steps that will free more
  1843. * memory. The caller should avoid the page being used
  1844. * for !PFMEMALLOC purposes.
  1845. */
  1846. page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
  1847. return page;
  1848. }
  1849. /*
  1850. * The first pass makes sure allocations are spread fairly within the
  1851. * local node. However, the local node might have free pages left
  1852. * after the fairness batches are exhausted, and remote zones haven't
  1853. * even been considered yet. Try once more without fairness, and
  1854. * include remote zones now, before entering the slowpath and waking
  1855. * kswapd: prefer spilling to a remote zone over swapping locally.
  1856. */
  1857. if (alloc_flags & ALLOC_FAIR) {
  1858. alloc_flags &= ~ALLOC_FAIR;
  1859. if (nr_fair_skipped) {
  1860. zonelist_rescan = true;
  1861. reset_alloc_batches(preferred_zone);
  1862. }
  1863. if (nr_online_nodes > 1)
  1864. zonelist_rescan = true;
  1865. }
  1866. if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
  1867. /* Disable zlc cache for second zonelist scan */
  1868. zlc_active = 0;
  1869. zonelist_rescan = true;
  1870. }
  1871. if (zonelist_rescan)
  1872. goto zonelist_scan;
  1873. return NULL;
  1874. }
  1875. /*
  1876. * Large machines with many possible nodes should not always dump per-node
  1877. * meminfo in irq context.
  1878. */
  1879. static inline bool should_suppress_show_mem(void)
  1880. {
  1881. bool ret = false;
  1882. #if NODES_SHIFT > 8
  1883. ret = in_interrupt();
  1884. #endif
  1885. return ret;
  1886. }
  1887. static DEFINE_RATELIMIT_STATE(nopage_rs,
  1888. DEFAULT_RATELIMIT_INTERVAL,
  1889. DEFAULT_RATELIMIT_BURST);
  1890. void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
  1891. {
  1892. unsigned int filter = SHOW_MEM_FILTER_NODES;
  1893. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
  1894. debug_guardpage_minorder() > 0)
  1895. return;
  1896. /*
  1897. * This documents exceptions given to allocations in certain
  1898. * contexts that are allowed to allocate outside current's set
  1899. * of allowed nodes.
  1900. */
  1901. if (!(gfp_mask & __GFP_NOMEMALLOC))
  1902. if (test_thread_flag(TIF_MEMDIE) ||
  1903. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  1904. filter &= ~SHOW_MEM_FILTER_NODES;
  1905. if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
  1906. filter &= ~SHOW_MEM_FILTER_NODES;
  1907. if (fmt) {
  1908. struct va_format vaf;
  1909. va_list args;
  1910. va_start(args, fmt);
  1911. vaf.fmt = fmt;
  1912. vaf.va = &args;
  1913. pr_warn("%pV", &vaf);
  1914. va_end(args);
  1915. }
  1916. pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
  1917. current->comm, order, gfp_mask);
  1918. dump_stack();
  1919. if (!should_suppress_show_mem())
  1920. show_mem(filter);
  1921. }
  1922. static inline int
  1923. should_alloc_retry(gfp_t gfp_mask, unsigned int order,
  1924. unsigned long did_some_progress,
  1925. unsigned long pages_reclaimed)
  1926. {
  1927. /* Do not loop if specifically requested */
  1928. if (gfp_mask & __GFP_NORETRY)
  1929. return 0;
  1930. /* Always retry if specifically requested */
  1931. if (gfp_mask & __GFP_NOFAIL)
  1932. return 1;
  1933. /*
  1934. * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
  1935. * making forward progress without invoking OOM. Suspend also disables
  1936. * storage devices so kswapd will not help. Bail if we are suspending.
  1937. */
  1938. if (!did_some_progress && pm_suspended_storage())
  1939. return 0;
  1940. /*
  1941. * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
  1942. * means __GFP_NOFAIL, but that may not be true in other
  1943. * implementations.
  1944. */
  1945. if (order <= PAGE_ALLOC_COSTLY_ORDER)
  1946. return 1;
  1947. /*
  1948. * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
  1949. * specified, then we retry until we no longer reclaim any pages
  1950. * (above), or we've reclaimed an order of pages at least as
  1951. * large as the allocation's order. In both cases, if the
  1952. * allocation still fails, we stop retrying.
  1953. */
  1954. if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
  1955. return 1;
  1956. return 0;
  1957. }
  1958. static inline struct page *
  1959. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  1960. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1961. nodemask_t *nodemask, struct zone *preferred_zone,
  1962. int classzone_idx, int migratetype)
  1963. {
  1964. struct page *page;
  1965. /* Acquire the per-zone oom lock for each zone */
  1966. if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
  1967. schedule_timeout_uninterruptible(1);
  1968. return NULL;
  1969. }
  1970. /*
  1971. * PM-freezer should be notified that there might be an OOM killer on
  1972. * its way to kill and wake somebody up. This is too early and we might
  1973. * end up not killing anything but false positives are acceptable.
  1974. * See freeze_processes.
  1975. */
  1976. note_oom_kill();
  1977. /*
  1978. * Go through the zonelist yet one more time, keep very high watermark
  1979. * here, this is only to catch a parallel oom killing, we must fail if
  1980. * we're still under heavy pressure.
  1981. */
  1982. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
  1983. order, zonelist, high_zoneidx,
  1984. ALLOC_WMARK_HIGH|ALLOC_CPUSET,
  1985. preferred_zone, classzone_idx, migratetype);
  1986. if (page)
  1987. goto out;
  1988. if (!(gfp_mask & __GFP_NOFAIL)) {
  1989. /* The OOM killer will not help higher order allocs */
  1990. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1991. goto out;
  1992. /* The OOM killer does not needlessly kill tasks for lowmem */
  1993. if (high_zoneidx < ZONE_NORMAL)
  1994. goto out;
  1995. /*
  1996. * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
  1997. * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
  1998. * The caller should handle page allocation failure by itself if
  1999. * it specifies __GFP_THISNODE.
  2000. * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
  2001. */
  2002. if (gfp_mask & __GFP_THISNODE)
  2003. goto out;
  2004. }
  2005. /* Exhausted what can be done so it's blamo time */
  2006. out_of_memory(zonelist, gfp_mask, order, nodemask, false);
  2007. out:
  2008. oom_zonelist_unlock(zonelist, gfp_mask);
  2009. return page;
  2010. }
  2011. #ifdef CONFIG_COMPACTION
  2012. /* Try memory compaction for high-order allocations before reclaim */
  2013. static struct page *
  2014. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2015. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2016. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  2017. int classzone_idx, int migratetype, enum migrate_mode mode,
  2018. int *contended_compaction, bool *deferred_compaction)
  2019. {
  2020. unsigned long compact_result;
  2021. struct page *page;
  2022. if (!order)
  2023. return NULL;
  2024. current->flags |= PF_MEMALLOC;
  2025. compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
  2026. nodemask, mode,
  2027. contended_compaction,
  2028. alloc_flags, classzone_idx);
  2029. current->flags &= ~PF_MEMALLOC;
  2030. switch (compact_result) {
  2031. case COMPACT_DEFERRED:
  2032. *deferred_compaction = true;
  2033. /* fall-through */
  2034. case COMPACT_SKIPPED:
  2035. return NULL;
  2036. default:
  2037. break;
  2038. }
  2039. /*
  2040. * At least in one zone compaction wasn't deferred or skipped, so let's
  2041. * count a compaction stall
  2042. */
  2043. count_vm_event(COMPACTSTALL);
  2044. page = get_page_from_freelist(gfp_mask, nodemask,
  2045. order, zonelist, high_zoneidx,
  2046. alloc_flags & ~ALLOC_NO_WATERMARKS,
  2047. preferred_zone, classzone_idx, migratetype);
  2048. if (page) {
  2049. struct zone *zone = page_zone(page);
  2050. zone->compact_blockskip_flush = false;
  2051. compaction_defer_reset(zone, order, true);
  2052. count_vm_event(COMPACTSUCCESS);
  2053. return page;
  2054. }
  2055. /*
  2056. * It's bad if compaction run occurs and fails. The most likely reason
  2057. * is that pages exist, but not enough to satisfy watermarks.
  2058. */
  2059. count_vm_event(COMPACTFAIL);
  2060. cond_resched();
  2061. return NULL;
  2062. }
  2063. #else
  2064. static inline struct page *
  2065. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2066. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2067. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  2068. int classzone_idx, int migratetype, enum migrate_mode mode,
  2069. int *contended_compaction, bool *deferred_compaction)
  2070. {
  2071. return NULL;
  2072. }
  2073. #endif /* CONFIG_COMPACTION */
  2074. /* Perform direct synchronous page reclaim */
  2075. static int
  2076. __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
  2077. nodemask_t *nodemask)
  2078. {
  2079. struct reclaim_state reclaim_state;
  2080. int progress;
  2081. cond_resched();
  2082. /* We now go into synchronous reclaim */
  2083. cpuset_memory_pressure_bump();
  2084. current->flags |= PF_MEMALLOC;
  2085. lockdep_set_current_reclaim_state(gfp_mask);
  2086. reclaim_state.reclaimed_slab = 0;
  2087. current->reclaim_state = &reclaim_state;
  2088. progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
  2089. current->reclaim_state = NULL;
  2090. lockdep_clear_current_reclaim_state();
  2091. current->flags &= ~PF_MEMALLOC;
  2092. cond_resched();
  2093. return progress;
  2094. }
  2095. /* The really slow allocator path where we enter direct reclaim */
  2096. static inline struct page *
  2097. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  2098. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2099. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  2100. int classzone_idx, int migratetype, unsigned long *did_some_progress)
  2101. {
  2102. struct page *page = NULL;
  2103. bool drained = false;
  2104. *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
  2105. nodemask);
  2106. if (unlikely(!(*did_some_progress)))
  2107. return NULL;
  2108. /* After successful reclaim, reconsider all zones for allocation */
  2109. if (IS_ENABLED(CONFIG_NUMA))
  2110. zlc_clear_zones_full(zonelist);
  2111. retry:
  2112. page = get_page_from_freelist(gfp_mask, nodemask, order,
  2113. zonelist, high_zoneidx,
  2114. alloc_flags & ~ALLOC_NO_WATERMARKS,
  2115. preferred_zone, classzone_idx,
  2116. migratetype);
  2117. /*
  2118. * If an allocation failed after direct reclaim, it could be because
  2119. * pages are pinned on the per-cpu lists. Drain them and try again
  2120. */
  2121. if (!page && !drained) {
  2122. drain_all_pages(NULL);
  2123. drained = true;
  2124. goto retry;
  2125. }
  2126. return page;
  2127. }
  2128. /*
  2129. * This is called in the allocator slow-path if the allocation request is of
  2130. * sufficient urgency to ignore watermarks and take other desperate measures
  2131. */
  2132. static inline struct page *
  2133. __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
  2134. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2135. nodemask_t *nodemask, struct zone *preferred_zone,
  2136. int classzone_idx, int migratetype)
  2137. {
  2138. struct page *page;
  2139. do {
  2140. page = get_page_from_freelist(gfp_mask, nodemask, order,
  2141. zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
  2142. preferred_zone, classzone_idx, migratetype);
  2143. if (!page && gfp_mask & __GFP_NOFAIL)
  2144. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
  2145. } while (!page && (gfp_mask & __GFP_NOFAIL));
  2146. return page;
  2147. }
  2148. static void wake_all_kswapds(unsigned int order,
  2149. struct zonelist *zonelist,
  2150. enum zone_type high_zoneidx,
  2151. struct zone *preferred_zone,
  2152. nodemask_t *nodemask)
  2153. {
  2154. struct zoneref *z;
  2155. struct zone *zone;
  2156. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  2157. high_zoneidx, nodemask)
  2158. wakeup_kswapd(zone, order, zone_idx(preferred_zone));
  2159. }
  2160. static inline int
  2161. gfp_to_alloc_flags(gfp_t gfp_mask)
  2162. {
  2163. int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  2164. const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
  2165. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  2166. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  2167. /*
  2168. * The caller may dip into page reserves a bit more if the caller
  2169. * cannot run direct reclaim, or if the caller has realtime scheduling
  2170. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  2171. * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
  2172. */
  2173. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  2174. if (atomic) {
  2175. /*
  2176. * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
  2177. * if it can't schedule.
  2178. */
  2179. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2180. alloc_flags |= ALLOC_HARDER;
  2181. /*
  2182. * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
  2183. * comment for __cpuset_node_allowed_softwall().
  2184. */
  2185. alloc_flags &= ~ALLOC_CPUSET;
  2186. } else if (unlikely(rt_task(current)) && !in_interrupt())
  2187. alloc_flags |= ALLOC_HARDER;
  2188. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  2189. if (gfp_mask & __GFP_MEMALLOC)
  2190. alloc_flags |= ALLOC_NO_WATERMARKS;
  2191. else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
  2192. alloc_flags |= ALLOC_NO_WATERMARKS;
  2193. else if (!in_interrupt() &&
  2194. ((current->flags & PF_MEMALLOC) ||
  2195. unlikely(test_thread_flag(TIF_MEMDIE))))
  2196. alloc_flags |= ALLOC_NO_WATERMARKS;
  2197. }
  2198. #ifdef CONFIG_CMA
  2199. if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  2200. alloc_flags |= ALLOC_CMA;
  2201. #endif
  2202. return alloc_flags;
  2203. }
  2204. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
  2205. {
  2206. return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
  2207. }
  2208. static inline struct page *
  2209. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  2210. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2211. nodemask_t *nodemask, struct zone *preferred_zone,
  2212. int classzone_idx, int migratetype)
  2213. {
  2214. const gfp_t wait = gfp_mask & __GFP_WAIT;
  2215. struct page *page = NULL;
  2216. int alloc_flags;
  2217. unsigned long pages_reclaimed = 0;
  2218. unsigned long did_some_progress;
  2219. enum migrate_mode migration_mode = MIGRATE_ASYNC;
  2220. bool deferred_compaction = false;
  2221. int contended_compaction = COMPACT_CONTENDED_NONE;
  2222. /*
  2223. * In the slowpath, we sanity check order to avoid ever trying to
  2224. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  2225. * be using allocators in order of preference for an area that is
  2226. * too large.
  2227. */
  2228. if (order >= MAX_ORDER) {
  2229. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  2230. return NULL;
  2231. }
  2232. /*
  2233. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  2234. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  2235. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  2236. * using a larger set of nodes after it has established that the
  2237. * allowed per node queues are empty and that nodes are
  2238. * over allocated.
  2239. */
  2240. if (IS_ENABLED(CONFIG_NUMA) &&
  2241. (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  2242. goto nopage;
  2243. restart:
  2244. if (!(gfp_mask & __GFP_NO_KSWAPD))
  2245. wake_all_kswapds(order, zonelist, high_zoneidx,
  2246. preferred_zone, nodemask);
  2247. /*
  2248. * OK, we're below the kswapd watermark and have kicked background
  2249. * reclaim. Now things get more complex, so set up alloc_flags according
  2250. * to how we want to proceed.
  2251. */
  2252. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  2253. /*
  2254. * Find the true preferred zone if the allocation is unconstrained by
  2255. * cpusets.
  2256. */
  2257. if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
  2258. struct zoneref *preferred_zoneref;
  2259. preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
  2260. NULL, &preferred_zone);
  2261. classzone_idx = zonelist_zone_idx(preferred_zoneref);
  2262. }
  2263. rebalance:
  2264. /* This is the last chance, in general, before the goto nopage. */
  2265. page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
  2266. high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
  2267. preferred_zone, classzone_idx, migratetype);
  2268. if (page)
  2269. goto got_pg;
  2270. /* Allocate without watermarks if the context allows */
  2271. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  2272. /*
  2273. * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
  2274. * the allocation is high priority and these type of
  2275. * allocations are system rather than user orientated
  2276. */
  2277. zonelist = node_zonelist(numa_node_id(), gfp_mask);
  2278. page = __alloc_pages_high_priority(gfp_mask, order,
  2279. zonelist, high_zoneidx, nodemask,
  2280. preferred_zone, classzone_idx, migratetype);
  2281. if (page) {
  2282. goto got_pg;
  2283. }
  2284. }
  2285. /* Atomic allocations - we can't balance anything */
  2286. if (!wait) {
  2287. /*
  2288. * All existing users of the deprecated __GFP_NOFAIL are
  2289. * blockable, so warn of any new users that actually allow this
  2290. * type of allocation to fail.
  2291. */
  2292. WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
  2293. goto nopage;
  2294. }
  2295. /* Avoid recursion of direct reclaim */
  2296. if (current->flags & PF_MEMALLOC)
  2297. goto nopage;
  2298. /* Avoid allocations with no watermarks from looping endlessly */
  2299. if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
  2300. goto nopage;
  2301. /*
  2302. * Try direct compaction. The first pass is asynchronous. Subsequent
  2303. * attempts after direct reclaim are synchronous
  2304. */
  2305. page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
  2306. high_zoneidx, nodemask, alloc_flags,
  2307. preferred_zone,
  2308. classzone_idx, migratetype,
  2309. migration_mode, &contended_compaction,
  2310. &deferred_compaction);
  2311. if (page)
  2312. goto got_pg;
  2313. /* Checks for THP-specific high-order allocations */
  2314. if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
  2315. /*
  2316. * If compaction is deferred for high-order allocations, it is
  2317. * because sync compaction recently failed. If this is the case
  2318. * and the caller requested a THP allocation, we do not want
  2319. * to heavily disrupt the system, so we fail the allocation
  2320. * instead of entering direct reclaim.
  2321. */
  2322. if (deferred_compaction)
  2323. goto nopage;
  2324. /*
  2325. * In all zones where compaction was attempted (and not
  2326. * deferred or skipped), lock contention has been detected.
  2327. * For THP allocation we do not want to disrupt the others
  2328. * so we fallback to base pages instead.
  2329. */
  2330. if (contended_compaction == COMPACT_CONTENDED_LOCK)
  2331. goto nopage;
  2332. /*
  2333. * If compaction was aborted due to need_resched(), we do not
  2334. * want to further increase allocation latency, unless it is
  2335. * khugepaged trying to collapse.
  2336. */
  2337. if (contended_compaction == COMPACT_CONTENDED_SCHED
  2338. && !(current->flags & PF_KTHREAD))
  2339. goto nopage;
  2340. }
  2341. /*
  2342. * It can become very expensive to allocate transparent hugepages at
  2343. * fault, so use asynchronous memory compaction for THP unless it is
  2344. * khugepaged trying to collapse.
  2345. */
  2346. if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
  2347. (current->flags & PF_KTHREAD))
  2348. migration_mode = MIGRATE_SYNC_LIGHT;
  2349. /* Try direct reclaim and then allocating */
  2350. page = __alloc_pages_direct_reclaim(gfp_mask, order,
  2351. zonelist, high_zoneidx,
  2352. nodemask,
  2353. alloc_flags, preferred_zone,
  2354. classzone_idx, migratetype,
  2355. &did_some_progress);
  2356. if (page)
  2357. goto got_pg;
  2358. /*
  2359. * If we failed to make any progress reclaiming, then we are
  2360. * running out of options and have to consider going OOM
  2361. */
  2362. if (!did_some_progress) {
  2363. if (oom_gfp_allowed(gfp_mask)) {
  2364. if (oom_killer_disabled)
  2365. goto nopage;
  2366. /* Coredumps can quickly deplete all memory reserves */
  2367. if ((current->flags & PF_DUMPCORE) &&
  2368. !(gfp_mask & __GFP_NOFAIL))
  2369. goto nopage;
  2370. page = __alloc_pages_may_oom(gfp_mask, order,
  2371. zonelist, high_zoneidx,
  2372. nodemask, preferred_zone,
  2373. classzone_idx, migratetype);
  2374. if (page)
  2375. goto got_pg;
  2376. if (!(gfp_mask & __GFP_NOFAIL)) {
  2377. /*
  2378. * The oom killer is not called for high-order
  2379. * allocations that may fail, so if no progress
  2380. * is being made, there are no other options and
  2381. * retrying is unlikely to help.
  2382. */
  2383. if (order > PAGE_ALLOC_COSTLY_ORDER)
  2384. goto nopage;
  2385. /*
  2386. * The oom killer is not called for lowmem
  2387. * allocations to prevent needlessly killing
  2388. * innocent tasks.
  2389. */
  2390. if (high_zoneidx < ZONE_NORMAL)
  2391. goto nopage;
  2392. }
  2393. goto restart;
  2394. }
  2395. }
  2396. /* Check if we should retry the allocation */
  2397. pages_reclaimed += did_some_progress;
  2398. if (should_alloc_retry(gfp_mask, order, did_some_progress,
  2399. pages_reclaimed)) {
  2400. /* Wait for some write requests to complete then retry */
  2401. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
  2402. goto rebalance;
  2403. } else {
  2404. /*
  2405. * High-order allocations do not necessarily loop after
  2406. * direct reclaim and reclaim/compaction depends on compaction
  2407. * being called after reclaim so call directly if necessary
  2408. */
  2409. page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
  2410. high_zoneidx, nodemask, alloc_flags,
  2411. preferred_zone,
  2412. classzone_idx, migratetype,
  2413. migration_mode, &contended_compaction,
  2414. &deferred_compaction);
  2415. if (page)
  2416. goto got_pg;
  2417. }
  2418. nopage:
  2419. warn_alloc_failed(gfp_mask, order, NULL);
  2420. return page;
  2421. got_pg:
  2422. if (kmemcheck_enabled)
  2423. kmemcheck_pagealloc_alloc(page, order, gfp_mask);
  2424. return page;
  2425. }
  2426. /*
  2427. * This is the 'heart' of the zoned buddy allocator.
  2428. */
  2429. struct page *
  2430. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  2431. struct zonelist *zonelist, nodemask_t *nodemask)
  2432. {
  2433. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  2434. struct zone *preferred_zone;
  2435. struct zoneref *preferred_zoneref;
  2436. struct page *page = NULL;
  2437. int migratetype = gfpflags_to_migratetype(gfp_mask);
  2438. unsigned int cpuset_mems_cookie;
  2439. int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
  2440. int classzone_idx;
  2441. gfp_mask &= gfp_allowed_mask;
  2442. lockdep_trace_alloc(gfp_mask);
  2443. might_sleep_if(gfp_mask & __GFP_WAIT);
  2444. if (should_fail_alloc_page(gfp_mask, order))
  2445. return NULL;
  2446. /*
  2447. * Check the zones suitable for the gfp_mask contain at least one
  2448. * valid zone. It's possible to have an empty zonelist as a result
  2449. * of GFP_THISNODE and a memoryless node
  2450. */
  2451. if (unlikely(!zonelist->_zonerefs->zone))
  2452. return NULL;
  2453. if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
  2454. alloc_flags |= ALLOC_CMA;
  2455. retry_cpuset:
  2456. cpuset_mems_cookie = read_mems_allowed_begin();
  2457. /* The preferred zone is used for statistics later */
  2458. preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
  2459. nodemask ? : &cpuset_current_mems_allowed,
  2460. &preferred_zone);
  2461. if (!preferred_zone)
  2462. goto out;
  2463. classzone_idx = zonelist_zone_idx(preferred_zoneref);
  2464. /* First allocation attempt */
  2465. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
  2466. zonelist, high_zoneidx, alloc_flags,
  2467. preferred_zone, classzone_idx, migratetype);
  2468. if (unlikely(!page)) {
  2469. /*
  2470. * Runtime PM, block IO and its error handling path
  2471. * can deadlock because I/O on the device might not
  2472. * complete.
  2473. */
  2474. gfp_mask = memalloc_noio_flags(gfp_mask);
  2475. page = __alloc_pages_slowpath(gfp_mask, order,
  2476. zonelist, high_zoneidx, nodemask,
  2477. preferred_zone, classzone_idx, migratetype);
  2478. }
  2479. trace_mm_page_alloc(page, order, gfp_mask, migratetype);
  2480. out:
  2481. /*
  2482. * When updating a task's mems_allowed, it is possible to race with
  2483. * parallel threads in such a way that an allocation can fail while
  2484. * the mask is being updated. If a page allocation is about to fail,
  2485. * check if the cpuset changed during allocation and if so, retry.
  2486. */
  2487. if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
  2488. goto retry_cpuset;
  2489. return page;
  2490. }
  2491. EXPORT_SYMBOL(__alloc_pages_nodemask);
  2492. /*
  2493. * Common helper functions.
  2494. */
  2495. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  2496. {
  2497. struct page *page;
  2498. /*
  2499. * __get_free_pages() returns a 32-bit address, which cannot represent
  2500. * a highmem page
  2501. */
  2502. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  2503. page = alloc_pages(gfp_mask, order);
  2504. if (!page)
  2505. return 0;
  2506. return (unsigned long) page_address(page);
  2507. }
  2508. EXPORT_SYMBOL(__get_free_pages);
  2509. unsigned long get_zeroed_page(gfp_t gfp_mask)
  2510. {
  2511. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  2512. }
  2513. EXPORT_SYMBOL(get_zeroed_page);
  2514. void __free_pages(struct page *page, unsigned int order)
  2515. {
  2516. if (put_page_testzero(page)) {
  2517. if (order == 0)
  2518. free_hot_cold_page(page, false);
  2519. else
  2520. __free_pages_ok(page, order);
  2521. }
  2522. }
  2523. EXPORT_SYMBOL(__free_pages);
  2524. void free_pages(unsigned long addr, unsigned int order)
  2525. {
  2526. if (addr != 0) {
  2527. VM_BUG_ON(!virt_addr_valid((void *)addr));
  2528. __free_pages(virt_to_page((void *)addr), order);
  2529. }
  2530. }
  2531. EXPORT_SYMBOL(free_pages);
  2532. /*
  2533. * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
  2534. * of the current memory cgroup.
  2535. *
  2536. * It should be used when the caller would like to use kmalloc, but since the
  2537. * allocation is large, it has to fall back to the page allocator.
  2538. */
  2539. struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
  2540. {
  2541. struct page *page;
  2542. struct mem_cgroup *memcg = NULL;
  2543. if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
  2544. return NULL;
  2545. page = alloc_pages(gfp_mask, order);
  2546. memcg_kmem_commit_charge(page, memcg, order);
  2547. return page;
  2548. }
  2549. struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
  2550. {
  2551. struct page *page;
  2552. struct mem_cgroup *memcg = NULL;
  2553. if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
  2554. return NULL;
  2555. page = alloc_pages_node(nid, gfp_mask, order);
  2556. memcg_kmem_commit_charge(page, memcg, order);
  2557. return page;
  2558. }
  2559. /*
  2560. * __free_kmem_pages and free_kmem_pages will free pages allocated with
  2561. * alloc_kmem_pages.
  2562. */
  2563. void __free_kmem_pages(struct page *page, unsigned int order)
  2564. {
  2565. memcg_kmem_uncharge_pages(page, order);
  2566. __free_pages(page, order);
  2567. }
  2568. void free_kmem_pages(unsigned long addr, unsigned int order)
  2569. {
  2570. if (addr != 0) {
  2571. VM_BUG_ON(!virt_addr_valid((void *)addr));
  2572. __free_kmem_pages(virt_to_page((void *)addr), order);
  2573. }
  2574. }
  2575. static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
  2576. {
  2577. if (addr) {
  2578. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  2579. unsigned long used = addr + PAGE_ALIGN(size);
  2580. split_page(virt_to_page((void *)addr), order);
  2581. while (used < alloc_end) {
  2582. free_page(used);
  2583. used += PAGE_SIZE;
  2584. }
  2585. }
  2586. return (void *)addr;
  2587. }
  2588. /**
  2589. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  2590. * @size: the number of bytes to allocate
  2591. * @gfp_mask: GFP flags for the allocation
  2592. *
  2593. * This function is similar to alloc_pages(), except that it allocates the
  2594. * minimum number of pages to satisfy the request. alloc_pages() can only
  2595. * allocate memory in power-of-two pages.
  2596. *
  2597. * This function is also limited by MAX_ORDER.
  2598. *
  2599. * Memory allocated by this function must be released by free_pages_exact().
  2600. */
  2601. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  2602. {
  2603. unsigned int order = get_order(size);
  2604. unsigned long addr;
  2605. addr = __get_free_pages(gfp_mask, order);
  2606. return make_alloc_exact(addr, order, size);
  2607. }
  2608. EXPORT_SYMBOL(alloc_pages_exact);
  2609. /**
  2610. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  2611. * pages on a node.
  2612. * @nid: the preferred node ID where memory should be allocated
  2613. * @size: the number of bytes to allocate
  2614. * @gfp_mask: GFP flags for the allocation
  2615. *
  2616. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  2617. * back.
  2618. * Note this is not alloc_pages_exact_node() which allocates on a specific node,
  2619. * but is not exact.
  2620. */
  2621. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  2622. {
  2623. unsigned order = get_order(size);
  2624. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  2625. if (!p)
  2626. return NULL;
  2627. return make_alloc_exact((unsigned long)page_address(p), order, size);
  2628. }
  2629. /**
  2630. * free_pages_exact - release memory allocated via alloc_pages_exact()
  2631. * @virt: the value returned by alloc_pages_exact.
  2632. * @size: size of allocation, same value as passed to alloc_pages_exact().
  2633. *
  2634. * Release the memory allocated by a previous call to alloc_pages_exact.
  2635. */
  2636. void free_pages_exact(void *virt, size_t size)
  2637. {
  2638. unsigned long addr = (unsigned long)virt;
  2639. unsigned long end = addr + PAGE_ALIGN(size);
  2640. while (addr < end) {
  2641. free_page(addr);
  2642. addr += PAGE_SIZE;
  2643. }
  2644. }
  2645. EXPORT_SYMBOL(free_pages_exact);
  2646. /**
  2647. * nr_free_zone_pages - count number of pages beyond high watermark
  2648. * @offset: The zone index of the highest zone
  2649. *
  2650. * nr_free_zone_pages() counts the number of counts pages which are beyond the
  2651. * high watermark within all zones at or below a given zone index. For each
  2652. * zone, the number of pages is calculated as:
  2653. * managed_pages - high_pages
  2654. */
  2655. static unsigned long nr_free_zone_pages(int offset)
  2656. {
  2657. struct zoneref *z;
  2658. struct zone *zone;
  2659. /* Just pick one node, since fallback list is circular */
  2660. unsigned long sum = 0;
  2661. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  2662. for_each_zone_zonelist(zone, z, zonelist, offset) {
  2663. unsigned long size = zone->managed_pages;
  2664. unsigned long high = high_wmark_pages(zone);
  2665. if (size > high)
  2666. sum += size - high;
  2667. }
  2668. return sum;
  2669. }
  2670. /**
  2671. * nr_free_buffer_pages - count number of pages beyond high watermark
  2672. *
  2673. * nr_free_buffer_pages() counts the number of pages which are beyond the high
  2674. * watermark within ZONE_DMA and ZONE_NORMAL.
  2675. */
  2676. unsigned long nr_free_buffer_pages(void)
  2677. {
  2678. return nr_free_zone_pages(gfp_zone(GFP_USER));
  2679. }
  2680. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  2681. /**
  2682. * nr_free_pagecache_pages - count number of pages beyond high watermark
  2683. *
  2684. * nr_free_pagecache_pages() counts the number of pages which are beyond the
  2685. * high watermark within all zones.
  2686. */
  2687. unsigned long nr_free_pagecache_pages(void)
  2688. {
  2689. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  2690. }
  2691. static inline void show_node(struct zone *zone)
  2692. {
  2693. if (IS_ENABLED(CONFIG_NUMA))
  2694. printk("Node %d ", zone_to_nid(zone));
  2695. }
  2696. void si_meminfo(struct sysinfo *val)
  2697. {
  2698. val->totalram = totalram_pages;
  2699. val->sharedram = global_page_state(NR_SHMEM);
  2700. val->freeram = global_page_state(NR_FREE_PAGES);
  2701. val->bufferram = nr_blockdev_pages();
  2702. val->totalhigh = totalhigh_pages;
  2703. val->freehigh = nr_free_highpages();
  2704. val->mem_unit = PAGE_SIZE;
  2705. }
  2706. EXPORT_SYMBOL(si_meminfo);
  2707. #ifdef CONFIG_NUMA
  2708. void si_meminfo_node(struct sysinfo *val, int nid)
  2709. {
  2710. int zone_type; /* needs to be signed */
  2711. unsigned long managed_pages = 0;
  2712. pg_data_t *pgdat = NODE_DATA(nid);
  2713. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
  2714. managed_pages += pgdat->node_zones[zone_type].managed_pages;
  2715. val->totalram = managed_pages;
  2716. val->sharedram = node_page_state(nid, NR_SHMEM);
  2717. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  2718. #ifdef CONFIG_HIGHMEM
  2719. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
  2720. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  2721. NR_FREE_PAGES);
  2722. #else
  2723. val->totalhigh = 0;
  2724. val->freehigh = 0;
  2725. #endif
  2726. val->mem_unit = PAGE_SIZE;
  2727. }
  2728. #endif
  2729. /*
  2730. * Determine whether the node should be displayed or not, depending on whether
  2731. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  2732. */
  2733. bool skip_free_areas_node(unsigned int flags, int nid)
  2734. {
  2735. bool ret = false;
  2736. unsigned int cpuset_mems_cookie;
  2737. if (!(flags & SHOW_MEM_FILTER_NODES))
  2738. goto out;
  2739. do {
  2740. cpuset_mems_cookie = read_mems_allowed_begin();
  2741. ret = !node_isset(nid, cpuset_current_mems_allowed);
  2742. } while (read_mems_allowed_retry(cpuset_mems_cookie));
  2743. out:
  2744. return ret;
  2745. }
  2746. #define K(x) ((x) << (PAGE_SHIFT-10))
  2747. static void show_migration_types(unsigned char type)
  2748. {
  2749. static const char types[MIGRATE_TYPES] = {
  2750. [MIGRATE_UNMOVABLE] = 'U',
  2751. [MIGRATE_RECLAIMABLE] = 'E',
  2752. [MIGRATE_MOVABLE] = 'M',
  2753. [MIGRATE_RESERVE] = 'R',
  2754. #ifdef CONFIG_CMA
  2755. [MIGRATE_CMA] = 'C',
  2756. #endif
  2757. #ifdef CONFIG_MEMORY_ISOLATION
  2758. [MIGRATE_ISOLATE] = 'I',
  2759. #endif
  2760. };
  2761. char tmp[MIGRATE_TYPES + 1];
  2762. char *p = tmp;
  2763. int i;
  2764. for (i = 0; i < MIGRATE_TYPES; i++) {
  2765. if (type & (1 << i))
  2766. *p++ = types[i];
  2767. }
  2768. *p = '\0';
  2769. printk("(%s) ", tmp);
  2770. }
  2771. /*
  2772. * Show free area list (used inside shift_scroll-lock stuff)
  2773. * We also calculate the percentage fragmentation. We do this by counting the
  2774. * memory on each free list with the exception of the first item on the list.
  2775. * Suppresses nodes that are not allowed by current's cpuset if
  2776. * SHOW_MEM_FILTER_NODES is passed.
  2777. */
  2778. void show_free_areas(unsigned int filter)
  2779. {
  2780. int cpu;
  2781. struct zone *zone;
  2782. for_each_populated_zone(zone) {
  2783. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2784. continue;
  2785. show_node(zone);
  2786. printk("%s per-cpu:\n", zone->name);
  2787. for_each_online_cpu(cpu) {
  2788. struct per_cpu_pageset *pageset;
  2789. pageset = per_cpu_ptr(zone->pageset, cpu);
  2790. printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
  2791. cpu, pageset->pcp.high,
  2792. pageset->pcp.batch, pageset->pcp.count);
  2793. }
  2794. }
  2795. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  2796. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  2797. " unevictable:%lu"
  2798. " dirty:%lu writeback:%lu unstable:%lu\n"
  2799. " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  2800. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
  2801. " free_cma:%lu\n",
  2802. global_page_state(NR_ACTIVE_ANON),
  2803. global_page_state(NR_INACTIVE_ANON),
  2804. global_page_state(NR_ISOLATED_ANON),
  2805. global_page_state(NR_ACTIVE_FILE),
  2806. global_page_state(NR_INACTIVE_FILE),
  2807. global_page_state(NR_ISOLATED_FILE),
  2808. global_page_state(NR_UNEVICTABLE),
  2809. global_page_state(NR_FILE_DIRTY),
  2810. global_page_state(NR_WRITEBACK),
  2811. global_page_state(NR_UNSTABLE_NFS),
  2812. global_page_state(NR_FREE_PAGES),
  2813. global_page_state(NR_SLAB_RECLAIMABLE),
  2814. global_page_state(NR_SLAB_UNRECLAIMABLE),
  2815. global_page_state(NR_FILE_MAPPED),
  2816. global_page_state(NR_SHMEM),
  2817. global_page_state(NR_PAGETABLE),
  2818. global_page_state(NR_BOUNCE),
  2819. global_page_state(NR_FREE_CMA_PAGES));
  2820. for_each_populated_zone(zone) {
  2821. int i;
  2822. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2823. continue;
  2824. show_node(zone);
  2825. printk("%s"
  2826. " free:%lukB"
  2827. " min:%lukB"
  2828. " low:%lukB"
  2829. " high:%lukB"
  2830. " active_anon:%lukB"
  2831. " inactive_anon:%lukB"
  2832. " active_file:%lukB"
  2833. " inactive_file:%lukB"
  2834. " unevictable:%lukB"
  2835. " isolated(anon):%lukB"
  2836. " isolated(file):%lukB"
  2837. " present:%lukB"
  2838. " managed:%lukB"
  2839. " mlocked:%lukB"
  2840. " dirty:%lukB"
  2841. " writeback:%lukB"
  2842. " mapped:%lukB"
  2843. " shmem:%lukB"
  2844. " slab_reclaimable:%lukB"
  2845. " slab_unreclaimable:%lukB"
  2846. " kernel_stack:%lukB"
  2847. " pagetables:%lukB"
  2848. " unstable:%lukB"
  2849. " bounce:%lukB"
  2850. " free_cma:%lukB"
  2851. " writeback_tmp:%lukB"
  2852. " pages_scanned:%lu"
  2853. " all_unreclaimable? %s"
  2854. "\n",
  2855. zone->name,
  2856. K(zone_page_state(zone, NR_FREE_PAGES)),
  2857. K(min_wmark_pages(zone)),
  2858. K(low_wmark_pages(zone)),
  2859. K(high_wmark_pages(zone)),
  2860. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  2861. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  2862. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  2863. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  2864. K(zone_page_state(zone, NR_UNEVICTABLE)),
  2865. K(zone_page_state(zone, NR_ISOLATED_ANON)),
  2866. K(zone_page_state(zone, NR_ISOLATED_FILE)),
  2867. K(zone->present_pages),
  2868. K(zone->managed_pages),
  2869. K(zone_page_state(zone, NR_MLOCK)),
  2870. K(zone_page_state(zone, NR_FILE_DIRTY)),
  2871. K(zone_page_state(zone, NR_WRITEBACK)),
  2872. K(zone_page_state(zone, NR_FILE_MAPPED)),
  2873. K(zone_page_state(zone, NR_SHMEM)),
  2874. K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
  2875. K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
  2876. zone_page_state(zone, NR_KERNEL_STACK) *
  2877. THREAD_SIZE / 1024,
  2878. K(zone_page_state(zone, NR_PAGETABLE)),
  2879. K(zone_page_state(zone, NR_UNSTABLE_NFS)),
  2880. K(zone_page_state(zone, NR_BOUNCE)),
  2881. K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
  2882. K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
  2883. K(zone_page_state(zone, NR_PAGES_SCANNED)),
  2884. (!zone_reclaimable(zone) ? "yes" : "no")
  2885. );
  2886. printk("lowmem_reserve[]:");
  2887. for (i = 0; i < MAX_NR_ZONES; i++)
  2888. printk(" %ld", zone->lowmem_reserve[i]);
  2889. printk("\n");
  2890. }
  2891. for_each_populated_zone(zone) {
  2892. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  2893. unsigned char types[MAX_ORDER];
  2894. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2895. continue;
  2896. show_node(zone);
  2897. printk("%s: ", zone->name);
  2898. spin_lock_irqsave(&zone->lock, flags);
  2899. for (order = 0; order < MAX_ORDER; order++) {
  2900. struct free_area *area = &zone->free_area[order];
  2901. int type;
  2902. nr[order] = area->nr_free;
  2903. total += nr[order] << order;
  2904. types[order] = 0;
  2905. for (type = 0; type < MIGRATE_TYPES; type++) {
  2906. if (!list_empty(&area->free_list[type]))
  2907. types[order] |= 1 << type;
  2908. }
  2909. }
  2910. spin_unlock_irqrestore(&zone->lock, flags);
  2911. for (order = 0; order < MAX_ORDER; order++) {
  2912. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  2913. if (nr[order])
  2914. show_migration_types(types[order]);
  2915. }
  2916. printk("= %lukB\n", K(total));
  2917. }
  2918. hugetlb_show_meminfo();
  2919. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  2920. show_swap_cache_info();
  2921. }
  2922. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  2923. {
  2924. zoneref->zone = zone;
  2925. zoneref->zone_idx = zone_idx(zone);
  2926. }
  2927. /*
  2928. * Builds allocation fallback zone lists.
  2929. *
  2930. * Add all populated zones of a node to the zonelist.
  2931. */
  2932. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  2933. int nr_zones)
  2934. {
  2935. struct zone *zone;
  2936. enum zone_type zone_type = MAX_NR_ZONES;
  2937. do {
  2938. zone_type--;
  2939. zone = pgdat->node_zones + zone_type;
  2940. if (populated_zone(zone)) {
  2941. zoneref_set_zone(zone,
  2942. &zonelist->_zonerefs[nr_zones++]);
  2943. check_highest_zone(zone_type);
  2944. }
  2945. } while (zone_type);
  2946. return nr_zones;
  2947. }
  2948. /*
  2949. * zonelist_order:
  2950. * 0 = automatic detection of better ordering.
  2951. * 1 = order by ([node] distance, -zonetype)
  2952. * 2 = order by (-zonetype, [node] distance)
  2953. *
  2954. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  2955. * the same zonelist. So only NUMA can configure this param.
  2956. */
  2957. #define ZONELIST_ORDER_DEFAULT 0
  2958. #define ZONELIST_ORDER_NODE 1
  2959. #define ZONELIST_ORDER_ZONE 2
  2960. /* zonelist order in the kernel.
  2961. * set_zonelist_order() will set this to NODE or ZONE.
  2962. */
  2963. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2964. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  2965. #ifdef CONFIG_NUMA
  2966. /* The value user specified ....changed by config */
  2967. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2968. /* string for sysctl */
  2969. #define NUMA_ZONELIST_ORDER_LEN 16
  2970. char numa_zonelist_order[16] = "default";
  2971. /*
  2972. * interface for configure zonelist ordering.
  2973. * command line option "numa_zonelist_order"
  2974. * = "[dD]efault - default, automatic configuration.
  2975. * = "[nN]ode - order by node locality, then by zone within node
  2976. * = "[zZ]one - order by zone, then by locality within zone
  2977. */
  2978. static int __parse_numa_zonelist_order(char *s)
  2979. {
  2980. if (*s == 'd' || *s == 'D') {
  2981. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2982. } else if (*s == 'n' || *s == 'N') {
  2983. user_zonelist_order = ZONELIST_ORDER_NODE;
  2984. } else if (*s == 'z' || *s == 'Z') {
  2985. user_zonelist_order = ZONELIST_ORDER_ZONE;
  2986. } else {
  2987. printk(KERN_WARNING
  2988. "Ignoring invalid numa_zonelist_order value: "
  2989. "%s\n", s);
  2990. return -EINVAL;
  2991. }
  2992. return 0;
  2993. }
  2994. static __init int setup_numa_zonelist_order(char *s)
  2995. {
  2996. int ret;
  2997. if (!s)
  2998. return 0;
  2999. ret = __parse_numa_zonelist_order(s);
  3000. if (ret == 0)
  3001. strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
  3002. return ret;
  3003. }
  3004. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  3005. /*
  3006. * sysctl handler for numa_zonelist_order
  3007. */
  3008. int numa_zonelist_order_handler(struct ctl_table *table, int write,
  3009. void __user *buffer, size_t *length,
  3010. loff_t *ppos)
  3011. {
  3012. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  3013. int ret;
  3014. static DEFINE_MUTEX(zl_order_mutex);
  3015. mutex_lock(&zl_order_mutex);
  3016. if (write) {
  3017. if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
  3018. ret = -EINVAL;
  3019. goto out;
  3020. }
  3021. strcpy(saved_string, (char *)table->data);
  3022. }
  3023. ret = proc_dostring(table, write, buffer, length, ppos);
  3024. if (ret)
  3025. goto out;
  3026. if (write) {
  3027. int oldval = user_zonelist_order;
  3028. ret = __parse_numa_zonelist_order((char *)table->data);
  3029. if (ret) {
  3030. /*
  3031. * bogus value. restore saved string
  3032. */
  3033. strncpy((char *)table->data, saved_string,
  3034. NUMA_ZONELIST_ORDER_LEN);
  3035. user_zonelist_order = oldval;
  3036. } else if (oldval != user_zonelist_order) {
  3037. mutex_lock(&zonelists_mutex);
  3038. build_all_zonelists(NULL, NULL);
  3039. mutex_unlock(&zonelists_mutex);
  3040. }
  3041. }
  3042. out:
  3043. mutex_unlock(&zl_order_mutex);
  3044. return ret;
  3045. }
  3046. #define MAX_NODE_LOAD (nr_online_nodes)
  3047. static int node_load[MAX_NUMNODES];
  3048. /**
  3049. * find_next_best_node - find the next node that should appear in a given node's fallback list
  3050. * @node: node whose fallback list we're appending
  3051. * @used_node_mask: nodemask_t of already used nodes
  3052. *
  3053. * We use a number of factors to determine which is the next node that should
  3054. * appear on a given node's fallback list. The node should not have appeared
  3055. * already in @node's fallback list, and it should be the next closest node
  3056. * according to the distance array (which contains arbitrary distance values
  3057. * from each node to each node in the system), and should also prefer nodes
  3058. * with no CPUs, since presumably they'll have very little allocation pressure
  3059. * on them otherwise.
  3060. * It returns -1 if no node is found.
  3061. */
  3062. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  3063. {
  3064. int n, val;
  3065. int min_val = INT_MAX;
  3066. int best_node = NUMA_NO_NODE;
  3067. const struct cpumask *tmp = cpumask_of_node(0);
  3068. /* Use the local node if we haven't already */
  3069. if (!node_isset(node, *used_node_mask)) {
  3070. node_set(node, *used_node_mask);
  3071. return node;
  3072. }
  3073. for_each_node_state(n, N_MEMORY) {
  3074. /* Don't want a node to appear more than once */
  3075. if (node_isset(n, *used_node_mask))
  3076. continue;
  3077. /* Use the distance array to find the distance */
  3078. val = node_distance(node, n);
  3079. /* Penalize nodes under us ("prefer the next node") */
  3080. val += (n < node);
  3081. /* Give preference to headless and unused nodes */
  3082. tmp = cpumask_of_node(n);
  3083. if (!cpumask_empty(tmp))
  3084. val += PENALTY_FOR_NODE_WITH_CPUS;
  3085. /* Slight preference for less loaded node */
  3086. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  3087. val += node_load[n];
  3088. if (val < min_val) {
  3089. min_val = val;
  3090. best_node = n;
  3091. }
  3092. }
  3093. if (best_node >= 0)
  3094. node_set(best_node, *used_node_mask);
  3095. return best_node;
  3096. }
  3097. /*
  3098. * Build zonelists ordered by node and zones within node.
  3099. * This results in maximum locality--normal zone overflows into local
  3100. * DMA zone, if any--but risks exhausting DMA zone.
  3101. */
  3102. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  3103. {
  3104. int j;
  3105. struct zonelist *zonelist;
  3106. zonelist = &pgdat->node_zonelists[0];
  3107. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  3108. ;
  3109. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3110. zonelist->_zonerefs[j].zone = NULL;
  3111. zonelist->_zonerefs[j].zone_idx = 0;
  3112. }
  3113. /*
  3114. * Build gfp_thisnode zonelists
  3115. */
  3116. static void build_thisnode_zonelists(pg_data_t *pgdat)
  3117. {
  3118. int j;
  3119. struct zonelist *zonelist;
  3120. zonelist = &pgdat->node_zonelists[1];
  3121. j = build_zonelists_node(pgdat, zonelist, 0);
  3122. zonelist->_zonerefs[j].zone = NULL;
  3123. zonelist->_zonerefs[j].zone_idx = 0;
  3124. }
  3125. /*
  3126. * Build zonelists ordered by zone and nodes within zones.
  3127. * This results in conserving DMA zone[s] until all Normal memory is
  3128. * exhausted, but results in overflowing to remote node while memory
  3129. * may still exist in local DMA zone.
  3130. */
  3131. static int node_order[MAX_NUMNODES];
  3132. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  3133. {
  3134. int pos, j, node;
  3135. int zone_type; /* needs to be signed */
  3136. struct zone *z;
  3137. struct zonelist *zonelist;
  3138. zonelist = &pgdat->node_zonelists[0];
  3139. pos = 0;
  3140. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  3141. for (j = 0; j < nr_nodes; j++) {
  3142. node = node_order[j];
  3143. z = &NODE_DATA(node)->node_zones[zone_type];
  3144. if (populated_zone(z)) {
  3145. zoneref_set_zone(z,
  3146. &zonelist->_zonerefs[pos++]);
  3147. check_highest_zone(zone_type);
  3148. }
  3149. }
  3150. }
  3151. zonelist->_zonerefs[pos].zone = NULL;
  3152. zonelist->_zonerefs[pos].zone_idx = 0;
  3153. }
  3154. #if defined(CONFIG_64BIT)
  3155. /*
  3156. * Devices that require DMA32/DMA are relatively rare and do not justify a
  3157. * penalty to every machine in case the specialised case applies. Default
  3158. * to Node-ordering on 64-bit NUMA machines
  3159. */
  3160. static int default_zonelist_order(void)
  3161. {
  3162. return ZONELIST_ORDER_NODE;
  3163. }
  3164. #else
  3165. /*
  3166. * On 32-bit, the Normal zone needs to be preserved for allocations accessible
  3167. * by the kernel. If processes running on node 0 deplete the low memory zone
  3168. * then reclaim will occur more frequency increasing stalls and potentially
  3169. * be easier to OOM if a large percentage of the zone is under writeback or
  3170. * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
  3171. * Hence, default to zone ordering on 32-bit.
  3172. */
  3173. static int default_zonelist_order(void)
  3174. {
  3175. return ZONELIST_ORDER_ZONE;
  3176. }
  3177. #endif /* CONFIG_64BIT */
  3178. static void set_zonelist_order(void)
  3179. {
  3180. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  3181. current_zonelist_order = default_zonelist_order();
  3182. else
  3183. current_zonelist_order = user_zonelist_order;
  3184. }
  3185. static void build_zonelists(pg_data_t *pgdat)
  3186. {
  3187. int j, node, load;
  3188. enum zone_type i;
  3189. nodemask_t used_mask;
  3190. int local_node, prev_node;
  3191. struct zonelist *zonelist;
  3192. int order = current_zonelist_order;
  3193. /* initialize zonelists */
  3194. for (i = 0; i < MAX_ZONELISTS; i++) {
  3195. zonelist = pgdat->node_zonelists + i;
  3196. zonelist->_zonerefs[0].zone = NULL;
  3197. zonelist->_zonerefs[0].zone_idx = 0;
  3198. }
  3199. /* NUMA-aware ordering of nodes */
  3200. local_node = pgdat->node_id;
  3201. load = nr_online_nodes;
  3202. prev_node = local_node;
  3203. nodes_clear(used_mask);
  3204. memset(node_order, 0, sizeof(node_order));
  3205. j = 0;
  3206. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  3207. /*
  3208. * We don't want to pressure a particular node.
  3209. * So adding penalty to the first node in same
  3210. * distance group to make it round-robin.
  3211. */
  3212. if (node_distance(local_node, node) !=
  3213. node_distance(local_node, prev_node))
  3214. node_load[node] = load;
  3215. prev_node = node;
  3216. load--;
  3217. if (order == ZONELIST_ORDER_NODE)
  3218. build_zonelists_in_node_order(pgdat, node);
  3219. else
  3220. node_order[j++] = node; /* remember order */
  3221. }
  3222. if (order == ZONELIST_ORDER_ZONE) {
  3223. /* calculate node order -- i.e., DMA last! */
  3224. build_zonelists_in_zone_order(pgdat, j);
  3225. }
  3226. build_thisnode_zonelists(pgdat);
  3227. }
  3228. /* Construct the zonelist performance cache - see further mmzone.h */
  3229. static void build_zonelist_cache(pg_data_t *pgdat)
  3230. {
  3231. struct zonelist *zonelist;
  3232. struct zonelist_cache *zlc;
  3233. struct zoneref *z;
  3234. zonelist = &pgdat->node_zonelists[0];
  3235. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  3236. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  3237. for (z = zonelist->_zonerefs; z->zone; z++)
  3238. zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
  3239. }
  3240. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3241. /*
  3242. * Return node id of node used for "local" allocations.
  3243. * I.e., first node id of first zone in arg node's generic zonelist.
  3244. * Used for initializing percpu 'numa_mem', which is used primarily
  3245. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  3246. */
  3247. int local_memory_node(int node)
  3248. {
  3249. struct zone *zone;
  3250. (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  3251. gfp_zone(GFP_KERNEL),
  3252. NULL,
  3253. &zone);
  3254. return zone->node;
  3255. }
  3256. #endif
  3257. #else /* CONFIG_NUMA */
  3258. static void set_zonelist_order(void)
  3259. {
  3260. current_zonelist_order = ZONELIST_ORDER_ZONE;
  3261. }
  3262. static void build_zonelists(pg_data_t *pgdat)
  3263. {
  3264. int node, local_node;
  3265. enum zone_type j;
  3266. struct zonelist *zonelist;
  3267. local_node = pgdat->node_id;
  3268. zonelist = &pgdat->node_zonelists[0];
  3269. j = build_zonelists_node(pgdat, zonelist, 0);
  3270. /*
  3271. * Now we build the zonelist so that it contains the zones
  3272. * of all the other nodes.
  3273. * We don't want to pressure a particular node, so when
  3274. * building the zones for node N, we make sure that the
  3275. * zones coming right after the local ones are those from
  3276. * node N+1 (modulo N)
  3277. */
  3278. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  3279. if (!node_online(node))
  3280. continue;
  3281. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3282. }
  3283. for (node = 0; node < local_node; node++) {
  3284. if (!node_online(node))
  3285. continue;
  3286. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3287. }
  3288. zonelist->_zonerefs[j].zone = NULL;
  3289. zonelist->_zonerefs[j].zone_idx = 0;
  3290. }
  3291. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  3292. static void build_zonelist_cache(pg_data_t *pgdat)
  3293. {
  3294. pgdat->node_zonelists[0].zlcache_ptr = NULL;
  3295. }
  3296. #endif /* CONFIG_NUMA */
  3297. /*
  3298. * Boot pageset table. One per cpu which is going to be used for all
  3299. * zones and all nodes. The parameters will be set in such a way
  3300. * that an item put on a list will immediately be handed over to
  3301. * the buddy list. This is safe since pageset manipulation is done
  3302. * with interrupts disabled.
  3303. *
  3304. * The boot_pagesets must be kept even after bootup is complete for
  3305. * unused processors and/or zones. They do play a role for bootstrapping
  3306. * hotplugged processors.
  3307. *
  3308. * zoneinfo_show() and maybe other functions do
  3309. * not check if the processor is online before following the pageset pointer.
  3310. * Other parts of the kernel may not check if the zone is available.
  3311. */
  3312. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  3313. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  3314. static void setup_zone_pageset(struct zone *zone);
  3315. /*
  3316. * Global mutex to protect against size modification of zonelists
  3317. * as well as to serialize pageset setup for the new populated zone.
  3318. */
  3319. DEFINE_MUTEX(zonelists_mutex);
  3320. /* return values int ....just for stop_machine() */
  3321. static int __build_all_zonelists(void *data)
  3322. {
  3323. int nid;
  3324. int cpu;
  3325. pg_data_t *self = data;
  3326. #ifdef CONFIG_NUMA
  3327. memset(node_load, 0, sizeof(node_load));
  3328. #endif
  3329. if (self && !node_online(self->node_id)) {
  3330. build_zonelists(self);
  3331. build_zonelist_cache(self);
  3332. }
  3333. for_each_online_node(nid) {
  3334. pg_data_t *pgdat = NODE_DATA(nid);
  3335. build_zonelists(pgdat);
  3336. build_zonelist_cache(pgdat);
  3337. }
  3338. /*
  3339. * Initialize the boot_pagesets that are going to be used
  3340. * for bootstrapping processors. The real pagesets for
  3341. * each zone will be allocated later when the per cpu
  3342. * allocator is available.
  3343. *
  3344. * boot_pagesets are used also for bootstrapping offline
  3345. * cpus if the system is already booted because the pagesets
  3346. * are needed to initialize allocators on a specific cpu too.
  3347. * F.e. the percpu allocator needs the page allocator which
  3348. * needs the percpu allocator in order to allocate its pagesets
  3349. * (a chicken-egg dilemma).
  3350. */
  3351. for_each_possible_cpu(cpu) {
  3352. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  3353. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3354. /*
  3355. * We now know the "local memory node" for each node--
  3356. * i.e., the node of the first zone in the generic zonelist.
  3357. * Set up numa_mem percpu variable for on-line cpus. During
  3358. * boot, only the boot cpu should be on-line; we'll init the
  3359. * secondary cpus' numa_mem as they come on-line. During
  3360. * node/memory hotplug, we'll fixup all on-line cpus.
  3361. */
  3362. if (cpu_online(cpu))
  3363. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  3364. #endif
  3365. }
  3366. return 0;
  3367. }
  3368. /*
  3369. * Called with zonelists_mutex held always
  3370. * unless system_state == SYSTEM_BOOTING.
  3371. */
  3372. void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
  3373. {
  3374. set_zonelist_order();
  3375. if (system_state == SYSTEM_BOOTING) {
  3376. __build_all_zonelists(NULL);
  3377. mminit_verify_zonelist();
  3378. cpuset_init_current_mems_allowed();
  3379. } else {
  3380. #ifdef CONFIG_MEMORY_HOTPLUG
  3381. if (zone)
  3382. setup_zone_pageset(zone);
  3383. #endif
  3384. /* we have to stop all cpus to guarantee there is no user
  3385. of zonelist */
  3386. stop_machine(__build_all_zonelists, pgdat, NULL);
  3387. /* cpuset refresh routine should be here */
  3388. }
  3389. vm_total_pages = nr_free_pagecache_pages();
  3390. /*
  3391. * Disable grouping by mobility if the number of pages in the
  3392. * system is too low to allow the mechanism to work. It would be
  3393. * more accurate, but expensive to check per-zone. This check is
  3394. * made on memory-hotadd so a system can start with mobility
  3395. * disabled and enable it later
  3396. */
  3397. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  3398. page_group_by_mobility_disabled = 1;
  3399. else
  3400. page_group_by_mobility_disabled = 0;
  3401. pr_info("Built %i zonelists in %s order, mobility grouping %s. "
  3402. "Total pages: %ld\n",
  3403. nr_online_nodes,
  3404. zonelist_order_name[current_zonelist_order],
  3405. page_group_by_mobility_disabled ? "off" : "on",
  3406. vm_total_pages);
  3407. #ifdef CONFIG_NUMA
  3408. pr_info("Policy zone: %s\n", zone_names[policy_zone]);
  3409. #endif
  3410. }
  3411. /*
  3412. * Helper functions to size the waitqueue hash table.
  3413. * Essentially these want to choose hash table sizes sufficiently
  3414. * large so that collisions trying to wait on pages are rare.
  3415. * But in fact, the number of active page waitqueues on typical
  3416. * systems is ridiculously low, less than 200. So this is even
  3417. * conservative, even though it seems large.
  3418. *
  3419. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  3420. * waitqueues, i.e. the size of the waitq table given the number of pages.
  3421. */
  3422. #define PAGES_PER_WAITQUEUE 256
  3423. #ifndef CONFIG_MEMORY_HOTPLUG
  3424. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  3425. {
  3426. unsigned long size = 1;
  3427. pages /= PAGES_PER_WAITQUEUE;
  3428. while (size < pages)
  3429. size <<= 1;
  3430. /*
  3431. * Once we have dozens or even hundreds of threads sleeping
  3432. * on IO we've got bigger problems than wait queue collision.
  3433. * Limit the size of the wait table to a reasonable size.
  3434. */
  3435. size = min(size, 4096UL);
  3436. return max(size, 4UL);
  3437. }
  3438. #else
  3439. /*
  3440. * A zone's size might be changed by hot-add, so it is not possible to determine
  3441. * a suitable size for its wait_table. So we use the maximum size now.
  3442. *
  3443. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  3444. *
  3445. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  3446. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  3447. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  3448. *
  3449. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  3450. * or more by the traditional way. (See above). It equals:
  3451. *
  3452. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  3453. * ia64(16K page size) : = ( 8G + 4M)byte.
  3454. * powerpc (64K page size) : = (32G +16M)byte.
  3455. */
  3456. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  3457. {
  3458. return 4096UL;
  3459. }
  3460. #endif
  3461. /*
  3462. * This is an integer logarithm so that shifts can be used later
  3463. * to extract the more random high bits from the multiplicative
  3464. * hash function before the remainder is taken.
  3465. */
  3466. static inline unsigned long wait_table_bits(unsigned long size)
  3467. {
  3468. return ffz(~size);
  3469. }
  3470. /*
  3471. * Check if a pageblock contains reserved pages
  3472. */
  3473. static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
  3474. {
  3475. unsigned long pfn;
  3476. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  3477. if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
  3478. return 1;
  3479. }
  3480. return 0;
  3481. }
  3482. /*
  3483. * Mark a number of pageblocks as MIGRATE_RESERVE. The number
  3484. * of blocks reserved is based on min_wmark_pages(zone). The memory within
  3485. * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  3486. * higher will lead to a bigger reserve which will get freed as contiguous
  3487. * blocks as reclaim kicks in
  3488. */
  3489. static void setup_zone_migrate_reserve(struct zone *zone)
  3490. {
  3491. unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
  3492. struct page *page;
  3493. unsigned long block_migratetype;
  3494. int reserve;
  3495. int old_reserve;
  3496. /*
  3497. * Get the start pfn, end pfn and the number of blocks to reserve
  3498. * We have to be careful to be aligned to pageblock_nr_pages to
  3499. * make sure that we always check pfn_valid for the first page in
  3500. * the block.
  3501. */
  3502. start_pfn = zone->zone_start_pfn;
  3503. end_pfn = zone_end_pfn(zone);
  3504. start_pfn = roundup(start_pfn, pageblock_nr_pages);
  3505. reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
  3506. pageblock_order;
  3507. /*
  3508. * Reserve blocks are generally in place to help high-order atomic
  3509. * allocations that are short-lived. A min_free_kbytes value that
  3510. * would result in more than 2 reserve blocks for atomic allocations
  3511. * is assumed to be in place to help anti-fragmentation for the
  3512. * future allocation of hugepages at runtime.
  3513. */
  3514. reserve = min(2, reserve);
  3515. old_reserve = zone->nr_migrate_reserve_block;
  3516. /* When memory hot-add, we almost always need to do nothing */
  3517. if (reserve == old_reserve)
  3518. return;
  3519. zone->nr_migrate_reserve_block = reserve;
  3520. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  3521. if (!pfn_valid(pfn))
  3522. continue;
  3523. page = pfn_to_page(pfn);
  3524. /* Watch out for overlapping nodes */
  3525. if (page_to_nid(page) != zone_to_nid(zone))
  3526. continue;
  3527. block_migratetype = get_pageblock_migratetype(page);
  3528. /* Only test what is necessary when the reserves are not met */
  3529. if (reserve > 0) {
  3530. /*
  3531. * Blocks with reserved pages will never free, skip
  3532. * them.
  3533. */
  3534. block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
  3535. if (pageblock_is_reserved(pfn, block_end_pfn))
  3536. continue;
  3537. /* If this block is reserved, account for it */
  3538. if (block_migratetype == MIGRATE_RESERVE) {
  3539. reserve--;
  3540. continue;
  3541. }
  3542. /* Suitable for reserving if this block is movable */
  3543. if (block_migratetype == MIGRATE_MOVABLE) {
  3544. set_pageblock_migratetype(page,
  3545. MIGRATE_RESERVE);
  3546. move_freepages_block(zone, page,
  3547. MIGRATE_RESERVE);
  3548. reserve--;
  3549. continue;
  3550. }
  3551. } else if (!old_reserve) {
  3552. /*
  3553. * At boot time we don't need to scan the whole zone
  3554. * for turning off MIGRATE_RESERVE.
  3555. */
  3556. break;
  3557. }
  3558. /*
  3559. * If the reserve is met and this is a previous reserved block,
  3560. * take it back
  3561. */
  3562. if (block_migratetype == MIGRATE_RESERVE) {
  3563. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  3564. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  3565. }
  3566. }
  3567. }
  3568. /*
  3569. * Initially all pages are reserved - free ones are freed
  3570. * up by free_all_bootmem() once the early boot process is
  3571. * done. Non-atomic initialization, single-pass.
  3572. */
  3573. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  3574. unsigned long start_pfn, enum memmap_context context)
  3575. {
  3576. struct page *page;
  3577. unsigned long end_pfn = start_pfn + size;
  3578. unsigned long pfn;
  3579. struct zone *z;
  3580. if (highest_memmap_pfn < end_pfn - 1)
  3581. highest_memmap_pfn = end_pfn - 1;
  3582. z = &NODE_DATA(nid)->node_zones[zone];
  3583. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  3584. /*
  3585. * There can be holes in boot-time mem_map[]s
  3586. * handed to this function. They do not
  3587. * exist on hotplugged memory.
  3588. */
  3589. if (context == MEMMAP_EARLY) {
  3590. if (!early_pfn_valid(pfn))
  3591. continue;
  3592. if (!early_pfn_in_nid(pfn, nid))
  3593. continue;
  3594. }
  3595. page = pfn_to_page(pfn);
  3596. set_page_links(page, zone, nid, pfn);
  3597. mminit_verify_page_links(page, zone, nid, pfn);
  3598. init_page_count(page);
  3599. page_mapcount_reset(page);
  3600. page_cpupid_reset_last(page);
  3601. SetPageReserved(page);
  3602. /*
  3603. * Mark the block movable so that blocks are reserved for
  3604. * movable at startup. This will force kernel allocations
  3605. * to reserve their blocks rather than leaking throughout
  3606. * the address space during boot when many long-lived
  3607. * kernel allocations are made. Later some blocks near
  3608. * the start are marked MIGRATE_RESERVE by
  3609. * setup_zone_migrate_reserve()
  3610. *
  3611. * bitmap is created for zone's valid pfn range. but memmap
  3612. * can be created for invalid pages (for alignment)
  3613. * check here not to call set_pageblock_migratetype() against
  3614. * pfn out of zone.
  3615. */
  3616. if ((z->zone_start_pfn <= pfn)
  3617. && (pfn < zone_end_pfn(z))
  3618. && !(pfn & (pageblock_nr_pages - 1)))
  3619. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  3620. INIT_LIST_HEAD(&page->lru);
  3621. #ifdef WANT_PAGE_VIRTUAL
  3622. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  3623. if (!is_highmem_idx(zone))
  3624. set_page_address(page, __va(pfn << PAGE_SHIFT));
  3625. #endif
  3626. }
  3627. }
  3628. static void __meminit zone_init_free_lists(struct zone *zone)
  3629. {
  3630. unsigned int order, t;
  3631. for_each_migratetype_order(order, t) {
  3632. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  3633. zone->free_area[order].nr_free = 0;
  3634. }
  3635. }
  3636. #ifndef __HAVE_ARCH_MEMMAP_INIT
  3637. #define memmap_init(size, nid, zone, start_pfn) \
  3638. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  3639. #endif
  3640. static int zone_batchsize(struct zone *zone)
  3641. {
  3642. #ifdef CONFIG_MMU
  3643. int batch;
  3644. /*
  3645. * The per-cpu-pages pools are set to around 1000th of the
  3646. * size of the zone. But no more than 1/2 of a meg.
  3647. *
  3648. * OK, so we don't know how big the cache is. So guess.
  3649. */
  3650. batch = zone->managed_pages / 1024;
  3651. if (batch * PAGE_SIZE > 512 * 1024)
  3652. batch = (512 * 1024) / PAGE_SIZE;
  3653. batch /= 4; /* We effectively *= 4 below */
  3654. if (batch < 1)
  3655. batch = 1;
  3656. /*
  3657. * Clamp the batch to a 2^n - 1 value. Having a power
  3658. * of 2 value was found to be more likely to have
  3659. * suboptimal cache aliasing properties in some cases.
  3660. *
  3661. * For example if 2 tasks are alternately allocating
  3662. * batches of pages, one task can end up with a lot
  3663. * of pages of one half of the possible page colors
  3664. * and the other with pages of the other colors.
  3665. */
  3666. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  3667. return batch;
  3668. #else
  3669. /* The deferral and batching of frees should be suppressed under NOMMU
  3670. * conditions.
  3671. *
  3672. * The problem is that NOMMU needs to be able to allocate large chunks
  3673. * of contiguous memory as there's no hardware page translation to
  3674. * assemble apparent contiguous memory from discontiguous pages.
  3675. *
  3676. * Queueing large contiguous runs of pages for batching, however,
  3677. * causes the pages to actually be freed in smaller chunks. As there
  3678. * can be a significant delay between the individual batches being
  3679. * recycled, this leads to the once large chunks of space being
  3680. * fragmented and becoming unavailable for high-order allocations.
  3681. */
  3682. return 0;
  3683. #endif
  3684. }
  3685. /*
  3686. * pcp->high and pcp->batch values are related and dependent on one another:
  3687. * ->batch must never be higher then ->high.
  3688. * The following function updates them in a safe manner without read side
  3689. * locking.
  3690. *
  3691. * Any new users of pcp->batch and pcp->high should ensure they can cope with
  3692. * those fields changing asynchronously (acording the the above rule).
  3693. *
  3694. * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  3695. * outside of boot time (or some other assurance that no concurrent updaters
  3696. * exist).
  3697. */
  3698. static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
  3699. unsigned long batch)
  3700. {
  3701. /* start with a fail safe value for batch */
  3702. pcp->batch = 1;
  3703. smp_wmb();
  3704. /* Update high, then batch, in order */
  3705. pcp->high = high;
  3706. smp_wmb();
  3707. pcp->batch = batch;
  3708. }
  3709. /* a companion to pageset_set_high() */
  3710. static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
  3711. {
  3712. pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
  3713. }
  3714. static void pageset_init(struct per_cpu_pageset *p)
  3715. {
  3716. struct per_cpu_pages *pcp;
  3717. int migratetype;
  3718. memset(p, 0, sizeof(*p));
  3719. pcp = &p->pcp;
  3720. pcp->count = 0;
  3721. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  3722. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  3723. }
  3724. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  3725. {
  3726. pageset_init(p);
  3727. pageset_set_batch(p, batch);
  3728. }
  3729. /*
  3730. * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
  3731. * to the value high for the pageset p.
  3732. */
  3733. static void pageset_set_high(struct per_cpu_pageset *p,
  3734. unsigned long high)
  3735. {
  3736. unsigned long batch = max(1UL, high / 4);
  3737. if ((high / 4) > (PAGE_SHIFT * 8))
  3738. batch = PAGE_SHIFT * 8;
  3739. pageset_update(&p->pcp, high, batch);
  3740. }
  3741. static void pageset_set_high_and_batch(struct zone *zone,
  3742. struct per_cpu_pageset *pcp)
  3743. {
  3744. if (percpu_pagelist_fraction)
  3745. pageset_set_high(pcp,
  3746. (zone->managed_pages /
  3747. percpu_pagelist_fraction));
  3748. else
  3749. pageset_set_batch(pcp, zone_batchsize(zone));
  3750. }
  3751. static void __meminit zone_pageset_init(struct zone *zone, int cpu)
  3752. {
  3753. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  3754. pageset_init(pcp);
  3755. pageset_set_high_and_batch(zone, pcp);
  3756. }
  3757. static void __meminit setup_zone_pageset(struct zone *zone)
  3758. {
  3759. int cpu;
  3760. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  3761. for_each_possible_cpu(cpu)
  3762. zone_pageset_init(zone, cpu);
  3763. }
  3764. /*
  3765. * Allocate per cpu pagesets and initialize them.
  3766. * Before this call only boot pagesets were available.
  3767. */
  3768. void __init setup_per_cpu_pageset(void)
  3769. {
  3770. struct zone *zone;
  3771. for_each_populated_zone(zone)
  3772. setup_zone_pageset(zone);
  3773. }
  3774. static noinline __init_refok
  3775. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  3776. {
  3777. int i;
  3778. size_t alloc_size;
  3779. /*
  3780. * The per-page waitqueue mechanism uses hashed waitqueues
  3781. * per zone.
  3782. */
  3783. zone->wait_table_hash_nr_entries =
  3784. wait_table_hash_nr_entries(zone_size_pages);
  3785. zone->wait_table_bits =
  3786. wait_table_bits(zone->wait_table_hash_nr_entries);
  3787. alloc_size = zone->wait_table_hash_nr_entries
  3788. * sizeof(wait_queue_head_t);
  3789. if (!slab_is_available()) {
  3790. zone->wait_table = (wait_queue_head_t *)
  3791. memblock_virt_alloc_node_nopanic(
  3792. alloc_size, zone->zone_pgdat->node_id);
  3793. } else {
  3794. /*
  3795. * This case means that a zone whose size was 0 gets new memory
  3796. * via memory hot-add.
  3797. * But it may be the case that a new node was hot-added. In
  3798. * this case vmalloc() will not be able to use this new node's
  3799. * memory - this wait_table must be initialized to use this new
  3800. * node itself as well.
  3801. * To use this new node's memory, further consideration will be
  3802. * necessary.
  3803. */
  3804. zone->wait_table = vmalloc(alloc_size);
  3805. }
  3806. if (!zone->wait_table)
  3807. return -ENOMEM;
  3808. for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  3809. init_waitqueue_head(zone->wait_table + i);
  3810. return 0;
  3811. }
  3812. static __meminit void zone_pcp_init(struct zone *zone)
  3813. {
  3814. /*
  3815. * per cpu subsystem is not up at this point. The following code
  3816. * relies on the ability of the linker to provide the
  3817. * offset of a (static) per cpu variable into the per cpu area.
  3818. */
  3819. zone->pageset = &boot_pageset;
  3820. if (populated_zone(zone))
  3821. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  3822. zone->name, zone->present_pages,
  3823. zone_batchsize(zone));
  3824. }
  3825. int __meminit init_currently_empty_zone(struct zone *zone,
  3826. unsigned long zone_start_pfn,
  3827. unsigned long size,
  3828. enum memmap_context context)
  3829. {
  3830. struct pglist_data *pgdat = zone->zone_pgdat;
  3831. int ret;
  3832. ret = zone_wait_table_init(zone, size);
  3833. if (ret)
  3834. return ret;
  3835. pgdat->nr_zones = zone_idx(zone) + 1;
  3836. zone->zone_start_pfn = zone_start_pfn;
  3837. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  3838. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  3839. pgdat->node_id,
  3840. (unsigned long)zone_idx(zone),
  3841. zone_start_pfn, (zone_start_pfn + size));
  3842. zone_init_free_lists(zone);
  3843. return 0;
  3844. }
  3845. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  3846. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  3847. /*
  3848. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  3849. */
  3850. int __meminit __early_pfn_to_nid(unsigned long pfn)
  3851. {
  3852. unsigned long start_pfn, end_pfn;
  3853. int nid;
  3854. /*
  3855. * NOTE: The following SMP-unsafe globals are only used early in boot
  3856. * when the kernel is running single-threaded.
  3857. */
  3858. static unsigned long __meminitdata last_start_pfn, last_end_pfn;
  3859. static int __meminitdata last_nid;
  3860. if (last_start_pfn <= pfn && pfn < last_end_pfn)
  3861. return last_nid;
  3862. nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
  3863. if (nid != -1) {
  3864. last_start_pfn = start_pfn;
  3865. last_end_pfn = end_pfn;
  3866. last_nid = nid;
  3867. }
  3868. return nid;
  3869. }
  3870. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  3871. int __meminit early_pfn_to_nid(unsigned long pfn)
  3872. {
  3873. int nid;
  3874. nid = __early_pfn_to_nid(pfn);
  3875. if (nid >= 0)
  3876. return nid;
  3877. /* just returns 0 */
  3878. return 0;
  3879. }
  3880. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  3881. bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  3882. {
  3883. int nid;
  3884. nid = __early_pfn_to_nid(pfn);
  3885. if (nid >= 0 && nid != node)
  3886. return false;
  3887. return true;
  3888. }
  3889. #endif
  3890. /**
  3891. * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  3892. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  3893. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
  3894. *
  3895. * If an architecture guarantees that all ranges registered contain no holes
  3896. * and may be freed, this this function may be used instead of calling
  3897. * memblock_free_early_nid() manually.
  3898. */
  3899. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  3900. {
  3901. unsigned long start_pfn, end_pfn;
  3902. int i, this_nid;
  3903. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  3904. start_pfn = min(start_pfn, max_low_pfn);
  3905. end_pfn = min(end_pfn, max_low_pfn);
  3906. if (start_pfn < end_pfn)
  3907. memblock_free_early_nid(PFN_PHYS(start_pfn),
  3908. (end_pfn - start_pfn) << PAGE_SHIFT,
  3909. this_nid);
  3910. }
  3911. }
  3912. /**
  3913. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  3914. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  3915. *
  3916. * If an architecture guarantees that all ranges registered contain no holes and may
  3917. * be freed, this function may be used instead of calling memory_present() manually.
  3918. */
  3919. void __init sparse_memory_present_with_active_regions(int nid)
  3920. {
  3921. unsigned long start_pfn, end_pfn;
  3922. int i, this_nid;
  3923. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  3924. memory_present(this_nid, start_pfn, end_pfn);
  3925. }
  3926. /**
  3927. * get_pfn_range_for_nid - Return the start and end page frames for a node
  3928. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  3929. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  3930. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  3931. *
  3932. * It returns the start and end page frame of a node based on information
  3933. * provided by memblock_set_node(). If called for a node
  3934. * with no available memory, a warning is printed and the start and end
  3935. * PFNs will be 0.
  3936. */
  3937. void __meminit get_pfn_range_for_nid(unsigned int nid,
  3938. unsigned long *start_pfn, unsigned long *end_pfn)
  3939. {
  3940. unsigned long this_start_pfn, this_end_pfn;
  3941. int i;
  3942. *start_pfn = -1UL;
  3943. *end_pfn = 0;
  3944. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  3945. *start_pfn = min(*start_pfn, this_start_pfn);
  3946. *end_pfn = max(*end_pfn, this_end_pfn);
  3947. }
  3948. if (*start_pfn == -1UL)
  3949. *start_pfn = 0;
  3950. }
  3951. /*
  3952. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  3953. * assumption is made that zones within a node are ordered in monotonic
  3954. * increasing memory addresses so that the "highest" populated zone is used
  3955. */
  3956. static void __init find_usable_zone_for_movable(void)
  3957. {
  3958. int zone_index;
  3959. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  3960. if (zone_index == ZONE_MOVABLE)
  3961. continue;
  3962. if (arch_zone_highest_possible_pfn[zone_index] >
  3963. arch_zone_lowest_possible_pfn[zone_index])
  3964. break;
  3965. }
  3966. VM_BUG_ON(zone_index == -1);
  3967. movable_zone = zone_index;
  3968. }
  3969. /*
  3970. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  3971. * because it is sized independent of architecture. Unlike the other zones,
  3972. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  3973. * in each node depending on the size of each node and how evenly kernelcore
  3974. * is distributed. This helper function adjusts the zone ranges
  3975. * provided by the architecture for a given node by using the end of the
  3976. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  3977. * zones within a node are in order of monotonic increases memory addresses
  3978. */
  3979. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  3980. unsigned long zone_type,
  3981. unsigned long node_start_pfn,
  3982. unsigned long node_end_pfn,
  3983. unsigned long *zone_start_pfn,
  3984. unsigned long *zone_end_pfn)
  3985. {
  3986. /* Only adjust if ZONE_MOVABLE is on this node */
  3987. if (zone_movable_pfn[nid]) {
  3988. /* Size ZONE_MOVABLE */
  3989. if (zone_type == ZONE_MOVABLE) {
  3990. *zone_start_pfn = zone_movable_pfn[nid];
  3991. *zone_end_pfn = min(node_end_pfn,
  3992. arch_zone_highest_possible_pfn[movable_zone]);
  3993. /* Adjust for ZONE_MOVABLE starting within this range */
  3994. } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
  3995. *zone_end_pfn > zone_movable_pfn[nid]) {
  3996. *zone_end_pfn = zone_movable_pfn[nid];
  3997. /* Check if this whole range is within ZONE_MOVABLE */
  3998. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  3999. *zone_start_pfn = *zone_end_pfn;
  4000. }
  4001. }
  4002. /*
  4003. * Return the number of pages a zone spans in a node, including holes
  4004. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  4005. */
  4006. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4007. unsigned long zone_type,
  4008. unsigned long node_start_pfn,
  4009. unsigned long node_end_pfn,
  4010. unsigned long *ignored)
  4011. {
  4012. unsigned long zone_start_pfn, zone_end_pfn;
  4013. /* Get the start and end of the zone */
  4014. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  4015. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  4016. adjust_zone_range_for_zone_movable(nid, zone_type,
  4017. node_start_pfn, node_end_pfn,
  4018. &zone_start_pfn, &zone_end_pfn);
  4019. /* Check that this node has pages within the zone's required range */
  4020. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  4021. return 0;
  4022. /* Move the zone boundaries inside the node if necessary */
  4023. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  4024. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  4025. /* Return the spanned pages */
  4026. return zone_end_pfn - zone_start_pfn;
  4027. }
  4028. /*
  4029. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  4030. * then all holes in the requested range will be accounted for.
  4031. */
  4032. unsigned long __meminit __absent_pages_in_range(int nid,
  4033. unsigned long range_start_pfn,
  4034. unsigned long range_end_pfn)
  4035. {
  4036. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  4037. unsigned long start_pfn, end_pfn;
  4038. int i;
  4039. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  4040. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  4041. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  4042. nr_absent -= end_pfn - start_pfn;
  4043. }
  4044. return nr_absent;
  4045. }
  4046. /**
  4047. * absent_pages_in_range - Return number of page frames in holes within a range
  4048. * @start_pfn: The start PFN to start searching for holes
  4049. * @end_pfn: The end PFN to stop searching for holes
  4050. *
  4051. * It returns the number of pages frames in memory holes within a range.
  4052. */
  4053. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  4054. unsigned long end_pfn)
  4055. {
  4056. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  4057. }
  4058. /* Return the number of page frames in holes in a zone on a node */
  4059. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  4060. unsigned long zone_type,
  4061. unsigned long node_start_pfn,
  4062. unsigned long node_end_pfn,
  4063. unsigned long *ignored)
  4064. {
  4065. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  4066. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  4067. unsigned long zone_start_pfn, zone_end_pfn;
  4068. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  4069. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  4070. adjust_zone_range_for_zone_movable(nid, zone_type,
  4071. node_start_pfn, node_end_pfn,
  4072. &zone_start_pfn, &zone_end_pfn);
  4073. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  4074. }
  4075. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4076. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4077. unsigned long zone_type,
  4078. unsigned long node_start_pfn,
  4079. unsigned long node_end_pfn,
  4080. unsigned long *zones_size)
  4081. {
  4082. return zones_size[zone_type];
  4083. }
  4084. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  4085. unsigned long zone_type,
  4086. unsigned long node_start_pfn,
  4087. unsigned long node_end_pfn,
  4088. unsigned long *zholes_size)
  4089. {
  4090. if (!zholes_size)
  4091. return 0;
  4092. return zholes_size[zone_type];
  4093. }
  4094. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4095. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  4096. unsigned long node_start_pfn,
  4097. unsigned long node_end_pfn,
  4098. unsigned long *zones_size,
  4099. unsigned long *zholes_size)
  4100. {
  4101. unsigned long realtotalpages, totalpages = 0;
  4102. enum zone_type i;
  4103. for (i = 0; i < MAX_NR_ZONES; i++)
  4104. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  4105. node_start_pfn,
  4106. node_end_pfn,
  4107. zones_size);
  4108. pgdat->node_spanned_pages = totalpages;
  4109. realtotalpages = totalpages;
  4110. for (i = 0; i < MAX_NR_ZONES; i++)
  4111. realtotalpages -=
  4112. zone_absent_pages_in_node(pgdat->node_id, i,
  4113. node_start_pfn, node_end_pfn,
  4114. zholes_size);
  4115. pgdat->node_present_pages = realtotalpages;
  4116. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  4117. realtotalpages);
  4118. }
  4119. #ifndef CONFIG_SPARSEMEM
  4120. /*
  4121. * Calculate the size of the zone->blockflags rounded to an unsigned long
  4122. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  4123. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  4124. * round what is now in bits to nearest long in bits, then return it in
  4125. * bytes.
  4126. */
  4127. static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
  4128. {
  4129. unsigned long usemapsize;
  4130. zonesize += zone_start_pfn & (pageblock_nr_pages-1);
  4131. usemapsize = roundup(zonesize, pageblock_nr_pages);
  4132. usemapsize = usemapsize >> pageblock_order;
  4133. usemapsize *= NR_PAGEBLOCK_BITS;
  4134. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  4135. return usemapsize / 8;
  4136. }
  4137. static void __init setup_usemap(struct pglist_data *pgdat,
  4138. struct zone *zone,
  4139. unsigned long zone_start_pfn,
  4140. unsigned long zonesize)
  4141. {
  4142. unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
  4143. zone->pageblock_flags = NULL;
  4144. if (usemapsize)
  4145. zone->pageblock_flags =
  4146. memblock_virt_alloc_node_nopanic(usemapsize,
  4147. pgdat->node_id);
  4148. }
  4149. #else
  4150. static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
  4151. unsigned long zone_start_pfn, unsigned long zonesize) {}
  4152. #endif /* CONFIG_SPARSEMEM */
  4153. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  4154. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  4155. void __paginginit set_pageblock_order(void)
  4156. {
  4157. unsigned int order;
  4158. /* Check that pageblock_nr_pages has not already been setup */
  4159. if (pageblock_order)
  4160. return;
  4161. if (HPAGE_SHIFT > PAGE_SHIFT)
  4162. order = HUGETLB_PAGE_ORDER;
  4163. else
  4164. order = MAX_ORDER - 1;
  4165. /*
  4166. * Assume the largest contiguous order of interest is a huge page.
  4167. * This value may be variable depending on boot parameters on IA64 and
  4168. * powerpc.
  4169. */
  4170. pageblock_order = order;
  4171. }
  4172. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4173. /*
  4174. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  4175. * is unused as pageblock_order is set at compile-time. See
  4176. * include/linux/pageblock-flags.h for the values of pageblock_order based on
  4177. * the kernel config
  4178. */
  4179. void __paginginit set_pageblock_order(void)
  4180. {
  4181. }
  4182. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4183. static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  4184. unsigned long present_pages)
  4185. {
  4186. unsigned long pages = spanned_pages;
  4187. /*
  4188. * Provide a more accurate estimation if there are holes within
  4189. * the zone and SPARSEMEM is in use. If there are holes within the
  4190. * zone, each populated memory region may cost us one or two extra
  4191. * memmap pages due to alignment because memmap pages for each
  4192. * populated regions may not naturally algined on page boundary.
  4193. * So the (present_pages >> 4) heuristic is a tradeoff for that.
  4194. */
  4195. if (spanned_pages > present_pages + (present_pages >> 4) &&
  4196. IS_ENABLED(CONFIG_SPARSEMEM))
  4197. pages = present_pages;
  4198. return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
  4199. }
  4200. /*
  4201. * Set up the zone data structures:
  4202. * - mark all pages reserved
  4203. * - mark all memory queues empty
  4204. * - clear the memory bitmaps
  4205. *
  4206. * NOTE: pgdat should get zeroed by caller.
  4207. */
  4208. static void __paginginit free_area_init_core(struct pglist_data *pgdat,
  4209. unsigned long node_start_pfn, unsigned long node_end_pfn,
  4210. unsigned long *zones_size, unsigned long *zholes_size)
  4211. {
  4212. enum zone_type j;
  4213. int nid = pgdat->node_id;
  4214. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  4215. int ret;
  4216. pgdat_resize_init(pgdat);
  4217. #ifdef CONFIG_NUMA_BALANCING
  4218. spin_lock_init(&pgdat->numabalancing_migrate_lock);
  4219. pgdat->numabalancing_migrate_nr_pages = 0;
  4220. pgdat->numabalancing_migrate_next_window = jiffies;
  4221. #endif
  4222. init_waitqueue_head(&pgdat->kswapd_wait);
  4223. init_waitqueue_head(&pgdat->pfmemalloc_wait);
  4224. for (j = 0; j < MAX_NR_ZONES; j++) {
  4225. struct zone *zone = pgdat->node_zones + j;
  4226. unsigned long size, realsize, freesize, memmap_pages;
  4227. size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
  4228. node_end_pfn, zones_size);
  4229. realsize = freesize = size - zone_absent_pages_in_node(nid, j,
  4230. node_start_pfn,
  4231. node_end_pfn,
  4232. zholes_size);
  4233. /*
  4234. * Adjust freesize so that it accounts for how much memory
  4235. * is used by this zone for memmap. This affects the watermark
  4236. * and per-cpu initialisations
  4237. */
  4238. memmap_pages = calc_memmap_size(size, realsize);
  4239. if (freesize >= memmap_pages) {
  4240. freesize -= memmap_pages;
  4241. if (memmap_pages)
  4242. printk(KERN_DEBUG
  4243. " %s zone: %lu pages used for memmap\n",
  4244. zone_names[j], memmap_pages);
  4245. } else
  4246. printk(KERN_WARNING
  4247. " %s zone: %lu pages exceeds freesize %lu\n",
  4248. zone_names[j], memmap_pages, freesize);
  4249. /* Account for reserved pages */
  4250. if (j == 0 && freesize > dma_reserve) {
  4251. freesize -= dma_reserve;
  4252. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  4253. zone_names[0], dma_reserve);
  4254. }
  4255. if (!is_highmem_idx(j))
  4256. nr_kernel_pages += freesize;
  4257. /* Charge for highmem memmap if there are enough kernel pages */
  4258. else if (nr_kernel_pages > memmap_pages * 2)
  4259. nr_kernel_pages -= memmap_pages;
  4260. nr_all_pages += freesize;
  4261. zone->spanned_pages = size;
  4262. zone->present_pages = realsize;
  4263. /*
  4264. * Set an approximate value for lowmem here, it will be adjusted
  4265. * when the bootmem allocator frees pages into the buddy system.
  4266. * And all highmem pages will be managed by the buddy system.
  4267. */
  4268. zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
  4269. #ifdef CONFIG_NUMA
  4270. zone->node = nid;
  4271. zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
  4272. / 100;
  4273. zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
  4274. #endif
  4275. zone->name = zone_names[j];
  4276. spin_lock_init(&zone->lock);
  4277. spin_lock_init(&zone->lru_lock);
  4278. zone_seqlock_init(zone);
  4279. zone->zone_pgdat = pgdat;
  4280. zone_pcp_init(zone);
  4281. /* For bootup, initialized properly in watermark setup */
  4282. mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
  4283. lruvec_init(&zone->lruvec);
  4284. if (!size)
  4285. continue;
  4286. set_pageblock_order();
  4287. setup_usemap(pgdat, zone, zone_start_pfn, size);
  4288. ret = init_currently_empty_zone(zone, zone_start_pfn,
  4289. size, MEMMAP_EARLY);
  4290. BUG_ON(ret);
  4291. memmap_init(size, nid, j, zone_start_pfn);
  4292. zone_start_pfn += size;
  4293. }
  4294. }
  4295. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  4296. {
  4297. /* Skip empty nodes */
  4298. if (!pgdat->node_spanned_pages)
  4299. return;
  4300. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4301. /* ia64 gets its own node_mem_map, before this, without bootmem */
  4302. if (!pgdat->node_mem_map) {
  4303. unsigned long size, start, end;
  4304. struct page *map;
  4305. /*
  4306. * The zone's endpoints aren't required to be MAX_ORDER
  4307. * aligned but the node_mem_map endpoints must be in order
  4308. * for the buddy allocator to function correctly.
  4309. */
  4310. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  4311. end = pgdat_end_pfn(pgdat);
  4312. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  4313. size = (end - start) * sizeof(struct page);
  4314. map = alloc_remap(pgdat->node_id, size);
  4315. if (!map)
  4316. map = memblock_virt_alloc_node_nopanic(size,
  4317. pgdat->node_id);
  4318. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  4319. }
  4320. #ifndef CONFIG_NEED_MULTIPLE_NODES
  4321. /*
  4322. * With no DISCONTIG, the global mem_map is just set as node 0's
  4323. */
  4324. if (pgdat == NODE_DATA(0)) {
  4325. mem_map = NODE_DATA(0)->node_mem_map;
  4326. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4327. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  4328. mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
  4329. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4330. }
  4331. #endif
  4332. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  4333. }
  4334. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  4335. unsigned long node_start_pfn, unsigned long *zholes_size)
  4336. {
  4337. pg_data_t *pgdat = NODE_DATA(nid);
  4338. unsigned long start_pfn = 0;
  4339. unsigned long end_pfn = 0;
  4340. /* pg_data_t should be reset to zero when it's allocated */
  4341. WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
  4342. pgdat->node_id = nid;
  4343. pgdat->node_start_pfn = node_start_pfn;
  4344. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4345. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  4346. printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
  4347. (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1);
  4348. #endif
  4349. calculate_node_totalpages(pgdat, start_pfn, end_pfn,
  4350. zones_size, zholes_size);
  4351. alloc_node_mem_map(pgdat);
  4352. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4353. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  4354. nid, (unsigned long)pgdat,
  4355. (unsigned long)pgdat->node_mem_map);
  4356. #endif
  4357. free_area_init_core(pgdat, start_pfn, end_pfn,
  4358. zones_size, zholes_size);
  4359. }
  4360. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4361. #if MAX_NUMNODES > 1
  4362. /*
  4363. * Figure out the number of possible node ids.
  4364. */
  4365. void __init setup_nr_node_ids(void)
  4366. {
  4367. unsigned int node;
  4368. unsigned int highest = 0;
  4369. for_each_node_mask(node, node_possible_map)
  4370. highest = node;
  4371. nr_node_ids = highest + 1;
  4372. }
  4373. #endif
  4374. /**
  4375. * node_map_pfn_alignment - determine the maximum internode alignment
  4376. *
  4377. * This function should be called after node map is populated and sorted.
  4378. * It calculates the maximum power of two alignment which can distinguish
  4379. * all the nodes.
  4380. *
  4381. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  4382. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  4383. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  4384. * shifted, 1GiB is enough and this function will indicate so.
  4385. *
  4386. * This is used to test whether pfn -> nid mapping of the chosen memory
  4387. * model has fine enough granularity to avoid incorrect mapping for the
  4388. * populated node map.
  4389. *
  4390. * Returns the determined alignment in pfn's. 0 if there is no alignment
  4391. * requirement (single node).
  4392. */
  4393. unsigned long __init node_map_pfn_alignment(void)
  4394. {
  4395. unsigned long accl_mask = 0, last_end = 0;
  4396. unsigned long start, end, mask;
  4397. int last_nid = -1;
  4398. int i, nid;
  4399. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  4400. if (!start || last_nid < 0 || last_nid == nid) {
  4401. last_nid = nid;
  4402. last_end = end;
  4403. continue;
  4404. }
  4405. /*
  4406. * Start with a mask granular enough to pin-point to the
  4407. * start pfn and tick off bits one-by-one until it becomes
  4408. * too coarse to separate the current node from the last.
  4409. */
  4410. mask = ~((1 << __ffs(start)) - 1);
  4411. while (mask && last_end <= (start & (mask << 1)))
  4412. mask <<= 1;
  4413. /* accumulate all internode masks */
  4414. accl_mask |= mask;
  4415. }
  4416. /* convert mask to number of pages */
  4417. return ~accl_mask + 1;
  4418. }
  4419. /* Find the lowest pfn for a node */
  4420. static unsigned long __init find_min_pfn_for_node(int nid)
  4421. {
  4422. unsigned long min_pfn = ULONG_MAX;
  4423. unsigned long start_pfn;
  4424. int i;
  4425. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  4426. min_pfn = min(min_pfn, start_pfn);
  4427. if (min_pfn == ULONG_MAX) {
  4428. printk(KERN_WARNING
  4429. "Could not find start_pfn for node %d\n", nid);
  4430. return 0;
  4431. }
  4432. return min_pfn;
  4433. }
  4434. /**
  4435. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  4436. *
  4437. * It returns the minimum PFN based on information provided via
  4438. * memblock_set_node().
  4439. */
  4440. unsigned long __init find_min_pfn_with_active_regions(void)
  4441. {
  4442. return find_min_pfn_for_node(MAX_NUMNODES);
  4443. }
  4444. /*
  4445. * early_calculate_totalpages()
  4446. * Sum pages in active regions for movable zone.
  4447. * Populate N_MEMORY for calculating usable_nodes.
  4448. */
  4449. static unsigned long __init early_calculate_totalpages(void)
  4450. {
  4451. unsigned long totalpages = 0;
  4452. unsigned long start_pfn, end_pfn;
  4453. int i, nid;
  4454. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  4455. unsigned long pages = end_pfn - start_pfn;
  4456. totalpages += pages;
  4457. if (pages)
  4458. node_set_state(nid, N_MEMORY);
  4459. }
  4460. return totalpages;
  4461. }
  4462. /*
  4463. * Find the PFN the Movable zone begins in each node. Kernel memory
  4464. * is spread evenly between nodes as long as the nodes have enough
  4465. * memory. When they don't, some nodes will have more kernelcore than
  4466. * others
  4467. */
  4468. static void __init find_zone_movable_pfns_for_nodes(void)
  4469. {
  4470. int i, nid;
  4471. unsigned long usable_startpfn;
  4472. unsigned long kernelcore_node, kernelcore_remaining;
  4473. /* save the state before borrow the nodemask */
  4474. nodemask_t saved_node_state = node_states[N_MEMORY];
  4475. unsigned long totalpages = early_calculate_totalpages();
  4476. int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  4477. struct memblock_region *r;
  4478. /* Need to find movable_zone earlier when movable_node is specified. */
  4479. find_usable_zone_for_movable();
  4480. /*
  4481. * If movable_node is specified, ignore kernelcore and movablecore
  4482. * options.
  4483. */
  4484. if (movable_node_is_enabled()) {
  4485. for_each_memblock(memory, r) {
  4486. if (!memblock_is_hotpluggable(r))
  4487. continue;
  4488. nid = r->nid;
  4489. usable_startpfn = PFN_DOWN(r->base);
  4490. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  4491. min(usable_startpfn, zone_movable_pfn[nid]) :
  4492. usable_startpfn;
  4493. }
  4494. goto out2;
  4495. }
  4496. /*
  4497. * If movablecore=nn[KMG] was specified, calculate what size of
  4498. * kernelcore that corresponds so that memory usable for
  4499. * any allocation type is evenly spread. If both kernelcore
  4500. * and movablecore are specified, then the value of kernelcore
  4501. * will be used for required_kernelcore if it's greater than
  4502. * what movablecore would have allowed.
  4503. */
  4504. if (required_movablecore) {
  4505. unsigned long corepages;
  4506. /*
  4507. * Round-up so that ZONE_MOVABLE is at least as large as what
  4508. * was requested by the user
  4509. */
  4510. required_movablecore =
  4511. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  4512. corepages = totalpages - required_movablecore;
  4513. required_kernelcore = max(required_kernelcore, corepages);
  4514. }
  4515. /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  4516. if (!required_kernelcore)
  4517. goto out;
  4518. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  4519. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  4520. restart:
  4521. /* Spread kernelcore memory as evenly as possible throughout nodes */
  4522. kernelcore_node = required_kernelcore / usable_nodes;
  4523. for_each_node_state(nid, N_MEMORY) {
  4524. unsigned long start_pfn, end_pfn;
  4525. /*
  4526. * Recalculate kernelcore_node if the division per node
  4527. * now exceeds what is necessary to satisfy the requested
  4528. * amount of memory for the kernel
  4529. */
  4530. if (required_kernelcore < kernelcore_node)
  4531. kernelcore_node = required_kernelcore / usable_nodes;
  4532. /*
  4533. * As the map is walked, we track how much memory is usable
  4534. * by the kernel using kernelcore_remaining. When it is
  4535. * 0, the rest of the node is usable by ZONE_MOVABLE
  4536. */
  4537. kernelcore_remaining = kernelcore_node;
  4538. /* Go through each range of PFNs within this node */
  4539. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  4540. unsigned long size_pages;
  4541. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  4542. if (start_pfn >= end_pfn)
  4543. continue;
  4544. /* Account for what is only usable for kernelcore */
  4545. if (start_pfn < usable_startpfn) {
  4546. unsigned long kernel_pages;
  4547. kernel_pages = min(end_pfn, usable_startpfn)
  4548. - start_pfn;
  4549. kernelcore_remaining -= min(kernel_pages,
  4550. kernelcore_remaining);
  4551. required_kernelcore -= min(kernel_pages,
  4552. required_kernelcore);
  4553. /* Continue if range is now fully accounted */
  4554. if (end_pfn <= usable_startpfn) {
  4555. /*
  4556. * Push zone_movable_pfn to the end so
  4557. * that if we have to rebalance
  4558. * kernelcore across nodes, we will
  4559. * not double account here
  4560. */
  4561. zone_movable_pfn[nid] = end_pfn;
  4562. continue;
  4563. }
  4564. start_pfn = usable_startpfn;
  4565. }
  4566. /*
  4567. * The usable PFN range for ZONE_MOVABLE is from
  4568. * start_pfn->end_pfn. Calculate size_pages as the
  4569. * number of pages used as kernelcore
  4570. */
  4571. size_pages = end_pfn - start_pfn;
  4572. if (size_pages > kernelcore_remaining)
  4573. size_pages = kernelcore_remaining;
  4574. zone_movable_pfn[nid] = start_pfn + size_pages;
  4575. /*
  4576. * Some kernelcore has been met, update counts and
  4577. * break if the kernelcore for this node has been
  4578. * satisfied
  4579. */
  4580. required_kernelcore -= min(required_kernelcore,
  4581. size_pages);
  4582. kernelcore_remaining -= size_pages;
  4583. if (!kernelcore_remaining)
  4584. break;
  4585. }
  4586. }
  4587. /*
  4588. * If there is still required_kernelcore, we do another pass with one
  4589. * less node in the count. This will push zone_movable_pfn[nid] further
  4590. * along on the nodes that still have memory until kernelcore is
  4591. * satisfied
  4592. */
  4593. usable_nodes--;
  4594. if (usable_nodes && required_kernelcore > usable_nodes)
  4595. goto restart;
  4596. out2:
  4597. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  4598. for (nid = 0; nid < MAX_NUMNODES; nid++)
  4599. zone_movable_pfn[nid] =
  4600. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  4601. out:
  4602. /* restore the node_state */
  4603. node_states[N_MEMORY] = saved_node_state;
  4604. }
  4605. /* Any regular or high memory on that node ? */
  4606. static void check_for_memory(pg_data_t *pgdat, int nid)
  4607. {
  4608. enum zone_type zone_type;
  4609. if (N_MEMORY == N_NORMAL_MEMORY)
  4610. return;
  4611. for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
  4612. struct zone *zone = &pgdat->node_zones[zone_type];
  4613. if (populated_zone(zone)) {
  4614. node_set_state(nid, N_HIGH_MEMORY);
  4615. if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
  4616. zone_type <= ZONE_NORMAL)
  4617. node_set_state(nid, N_NORMAL_MEMORY);
  4618. break;
  4619. }
  4620. }
  4621. }
  4622. /**
  4623. * free_area_init_nodes - Initialise all pg_data_t and zone data
  4624. * @max_zone_pfn: an array of max PFNs for each zone
  4625. *
  4626. * This will call free_area_init_node() for each active node in the system.
  4627. * Using the page ranges provided by memblock_set_node(), the size of each
  4628. * zone in each node and their holes is calculated. If the maximum PFN
  4629. * between two adjacent zones match, it is assumed that the zone is empty.
  4630. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  4631. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  4632. * starts where the previous one ended. For example, ZONE_DMA32 starts
  4633. * at arch_max_dma_pfn.
  4634. */
  4635. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  4636. {
  4637. unsigned long start_pfn, end_pfn;
  4638. int i, nid;
  4639. /* Record where the zone boundaries are */
  4640. memset(arch_zone_lowest_possible_pfn, 0,
  4641. sizeof(arch_zone_lowest_possible_pfn));
  4642. memset(arch_zone_highest_possible_pfn, 0,
  4643. sizeof(arch_zone_highest_possible_pfn));
  4644. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  4645. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  4646. for (i = 1; i < MAX_NR_ZONES; i++) {
  4647. if (i == ZONE_MOVABLE)
  4648. continue;
  4649. arch_zone_lowest_possible_pfn[i] =
  4650. arch_zone_highest_possible_pfn[i-1];
  4651. arch_zone_highest_possible_pfn[i] =
  4652. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  4653. }
  4654. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  4655. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  4656. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  4657. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  4658. find_zone_movable_pfns_for_nodes();
  4659. /* Print out the zone ranges */
  4660. pr_info("Zone ranges:\n");
  4661. for (i = 0; i < MAX_NR_ZONES; i++) {
  4662. if (i == ZONE_MOVABLE)
  4663. continue;
  4664. pr_info(" %-8s ", zone_names[i]);
  4665. if (arch_zone_lowest_possible_pfn[i] ==
  4666. arch_zone_highest_possible_pfn[i])
  4667. pr_cont("empty\n");
  4668. else
  4669. pr_cont("[mem %0#10lx-%0#10lx]\n",
  4670. arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
  4671. (arch_zone_highest_possible_pfn[i]
  4672. << PAGE_SHIFT) - 1);
  4673. }
  4674. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  4675. pr_info("Movable zone start for each node\n");
  4676. for (i = 0; i < MAX_NUMNODES; i++) {
  4677. if (zone_movable_pfn[i])
  4678. pr_info(" Node %d: %#010lx\n", i,
  4679. zone_movable_pfn[i] << PAGE_SHIFT);
  4680. }
  4681. /* Print out the early node map */
  4682. pr_info("Early memory node ranges\n");
  4683. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  4684. pr_info(" node %3d: [mem %#010lx-%#010lx]\n", nid,
  4685. start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
  4686. /* Initialise every node */
  4687. mminit_verify_pageflags_layout();
  4688. setup_nr_node_ids();
  4689. for_each_online_node(nid) {
  4690. pg_data_t *pgdat = NODE_DATA(nid);
  4691. free_area_init_node(nid, NULL,
  4692. find_min_pfn_for_node(nid), NULL);
  4693. /* Any memory on that node */
  4694. if (pgdat->node_present_pages)
  4695. node_set_state(nid, N_MEMORY);
  4696. check_for_memory(pgdat, nid);
  4697. }
  4698. }
  4699. static int __init cmdline_parse_core(char *p, unsigned long *core)
  4700. {
  4701. unsigned long long coremem;
  4702. if (!p)
  4703. return -EINVAL;
  4704. coremem = memparse(p, &p);
  4705. *core = coremem >> PAGE_SHIFT;
  4706. /* Paranoid check that UL is enough for the coremem value */
  4707. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  4708. return 0;
  4709. }
  4710. /*
  4711. * kernelcore=size sets the amount of memory for use for allocations that
  4712. * cannot be reclaimed or migrated.
  4713. */
  4714. static int __init cmdline_parse_kernelcore(char *p)
  4715. {
  4716. return cmdline_parse_core(p, &required_kernelcore);
  4717. }
  4718. /*
  4719. * movablecore=size sets the amount of memory for use for allocations that
  4720. * can be reclaimed or migrated.
  4721. */
  4722. static int __init cmdline_parse_movablecore(char *p)
  4723. {
  4724. return cmdline_parse_core(p, &required_movablecore);
  4725. }
  4726. early_param("kernelcore", cmdline_parse_kernelcore);
  4727. early_param("movablecore", cmdline_parse_movablecore);
  4728. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4729. void adjust_managed_page_count(struct page *page, long count)
  4730. {
  4731. spin_lock(&managed_page_count_lock);
  4732. page_zone(page)->managed_pages += count;
  4733. totalram_pages += count;
  4734. #ifdef CONFIG_HIGHMEM
  4735. if (PageHighMem(page))
  4736. totalhigh_pages += count;
  4737. #endif
  4738. spin_unlock(&managed_page_count_lock);
  4739. }
  4740. EXPORT_SYMBOL(adjust_managed_page_count);
  4741. unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
  4742. {
  4743. void *pos;
  4744. unsigned long pages = 0;
  4745. start = (void *)PAGE_ALIGN((unsigned long)start);
  4746. end = (void *)((unsigned long)end & PAGE_MASK);
  4747. for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
  4748. if ((unsigned int)poison <= 0xFF)
  4749. memset(pos, poison, PAGE_SIZE);
  4750. free_reserved_page(virt_to_page(pos));
  4751. }
  4752. if (pages && s)
  4753. pr_info("Freeing %s memory: %ldK (%p - %p)\n",
  4754. s, pages << (PAGE_SHIFT - 10), start, end);
  4755. return pages;
  4756. }
  4757. EXPORT_SYMBOL(free_reserved_area);
  4758. #ifdef CONFIG_HIGHMEM
  4759. void free_highmem_page(struct page *page)
  4760. {
  4761. __free_reserved_page(page);
  4762. totalram_pages++;
  4763. page_zone(page)->managed_pages++;
  4764. totalhigh_pages++;
  4765. }
  4766. #endif
  4767. void __init mem_init_print_info(const char *str)
  4768. {
  4769. unsigned long physpages, codesize, datasize, rosize, bss_size;
  4770. unsigned long init_code_size, init_data_size;
  4771. physpages = get_num_physpages();
  4772. codesize = _etext - _stext;
  4773. datasize = _edata - _sdata;
  4774. rosize = __end_rodata - __start_rodata;
  4775. bss_size = __bss_stop - __bss_start;
  4776. init_data_size = __init_end - __init_begin;
  4777. init_code_size = _einittext - _sinittext;
  4778. /*
  4779. * Detect special cases and adjust section sizes accordingly:
  4780. * 1) .init.* may be embedded into .data sections
  4781. * 2) .init.text.* may be out of [__init_begin, __init_end],
  4782. * please refer to arch/tile/kernel/vmlinux.lds.S.
  4783. * 3) .rodata.* may be embedded into .text or .data sections.
  4784. */
  4785. #define adj_init_size(start, end, size, pos, adj) \
  4786. do { \
  4787. if (start <= pos && pos < end && size > adj) \
  4788. size -= adj; \
  4789. } while (0)
  4790. adj_init_size(__init_begin, __init_end, init_data_size,
  4791. _sinittext, init_code_size);
  4792. adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
  4793. adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
  4794. adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
  4795. adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
  4796. #undef adj_init_size
  4797. pr_info("Memory: %luK/%luK available "
  4798. "(%luK kernel code, %luK rwdata, %luK rodata, "
  4799. "%luK init, %luK bss, %luK reserved"
  4800. #ifdef CONFIG_HIGHMEM
  4801. ", %luK highmem"
  4802. #endif
  4803. "%s%s)\n",
  4804. nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
  4805. codesize >> 10, datasize >> 10, rosize >> 10,
  4806. (init_data_size + init_code_size) >> 10, bss_size >> 10,
  4807. (physpages - totalram_pages) << (PAGE_SHIFT-10),
  4808. #ifdef CONFIG_HIGHMEM
  4809. totalhigh_pages << (PAGE_SHIFT-10),
  4810. #endif
  4811. str ? ", " : "", str ? str : "");
  4812. }
  4813. /**
  4814. * set_dma_reserve - set the specified number of pages reserved in the first zone
  4815. * @new_dma_reserve: The number of pages to mark reserved
  4816. *
  4817. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  4818. * In the DMA zone, a significant percentage may be consumed by kernel image
  4819. * and other unfreeable allocations which can skew the watermarks badly. This
  4820. * function may optionally be used to account for unfreeable pages in the
  4821. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  4822. * smaller per-cpu batchsize.
  4823. */
  4824. void __init set_dma_reserve(unsigned long new_dma_reserve)
  4825. {
  4826. dma_reserve = new_dma_reserve;
  4827. }
  4828. void __init free_area_init(unsigned long *zones_size)
  4829. {
  4830. free_area_init_node(0, zones_size,
  4831. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  4832. }
  4833. static int page_alloc_cpu_notify(struct notifier_block *self,
  4834. unsigned long action, void *hcpu)
  4835. {
  4836. int cpu = (unsigned long)hcpu;
  4837. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  4838. lru_add_drain_cpu(cpu);
  4839. drain_pages(cpu);
  4840. /*
  4841. * Spill the event counters of the dead processor
  4842. * into the current processors event counters.
  4843. * This artificially elevates the count of the current
  4844. * processor.
  4845. */
  4846. vm_events_fold_cpu(cpu);
  4847. /*
  4848. * Zero the differential counters of the dead processor
  4849. * so that the vm statistics are consistent.
  4850. *
  4851. * This is only okay since the processor is dead and cannot
  4852. * race with what we are doing.
  4853. */
  4854. cpu_vm_stats_fold(cpu);
  4855. }
  4856. return NOTIFY_OK;
  4857. }
  4858. void __init page_alloc_init(void)
  4859. {
  4860. hotcpu_notifier(page_alloc_cpu_notify, 0);
  4861. }
  4862. /*
  4863. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  4864. * or min_free_kbytes changes.
  4865. */
  4866. static void calculate_totalreserve_pages(void)
  4867. {
  4868. struct pglist_data *pgdat;
  4869. unsigned long reserve_pages = 0;
  4870. enum zone_type i, j;
  4871. for_each_online_pgdat(pgdat) {
  4872. for (i = 0; i < MAX_NR_ZONES; i++) {
  4873. struct zone *zone = pgdat->node_zones + i;
  4874. long max = 0;
  4875. /* Find valid and maximum lowmem_reserve in the zone */
  4876. for (j = i; j < MAX_NR_ZONES; j++) {
  4877. if (zone->lowmem_reserve[j] > max)
  4878. max = zone->lowmem_reserve[j];
  4879. }
  4880. /* we treat the high watermark as reserved pages. */
  4881. max += high_wmark_pages(zone);
  4882. if (max > zone->managed_pages)
  4883. max = zone->managed_pages;
  4884. reserve_pages += max;
  4885. /*
  4886. * Lowmem reserves are not available to
  4887. * GFP_HIGHUSER page cache allocations and
  4888. * kswapd tries to balance zones to their high
  4889. * watermark. As a result, neither should be
  4890. * regarded as dirtyable memory, to prevent a
  4891. * situation where reclaim has to clean pages
  4892. * in order to balance the zones.
  4893. */
  4894. zone->dirty_balance_reserve = max;
  4895. }
  4896. }
  4897. dirty_balance_reserve = reserve_pages;
  4898. totalreserve_pages = reserve_pages;
  4899. }
  4900. /*
  4901. * setup_per_zone_lowmem_reserve - called whenever
  4902. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  4903. * has a correct pages reserved value, so an adequate number of
  4904. * pages are left in the zone after a successful __alloc_pages().
  4905. */
  4906. static void setup_per_zone_lowmem_reserve(void)
  4907. {
  4908. struct pglist_data *pgdat;
  4909. enum zone_type j, idx;
  4910. for_each_online_pgdat(pgdat) {
  4911. for (j = 0; j < MAX_NR_ZONES; j++) {
  4912. struct zone *zone = pgdat->node_zones + j;
  4913. unsigned long managed_pages = zone->managed_pages;
  4914. zone->lowmem_reserve[j] = 0;
  4915. idx = j;
  4916. while (idx) {
  4917. struct zone *lower_zone;
  4918. idx--;
  4919. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  4920. sysctl_lowmem_reserve_ratio[idx] = 1;
  4921. lower_zone = pgdat->node_zones + idx;
  4922. lower_zone->lowmem_reserve[j] = managed_pages /
  4923. sysctl_lowmem_reserve_ratio[idx];
  4924. managed_pages += lower_zone->managed_pages;
  4925. }
  4926. }
  4927. }
  4928. /* update totalreserve_pages */
  4929. calculate_totalreserve_pages();
  4930. }
  4931. static void __setup_per_zone_wmarks(void)
  4932. {
  4933. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  4934. unsigned long lowmem_pages = 0;
  4935. struct zone *zone;
  4936. unsigned long flags;
  4937. /* Calculate total number of !ZONE_HIGHMEM pages */
  4938. for_each_zone(zone) {
  4939. if (!is_highmem(zone))
  4940. lowmem_pages += zone->managed_pages;
  4941. }
  4942. for_each_zone(zone) {
  4943. u64 tmp;
  4944. spin_lock_irqsave(&zone->lock, flags);
  4945. tmp = (u64)pages_min * zone->managed_pages;
  4946. do_div(tmp, lowmem_pages);
  4947. if (is_highmem(zone)) {
  4948. /*
  4949. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  4950. * need highmem pages, so cap pages_min to a small
  4951. * value here.
  4952. *
  4953. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  4954. * deltas controls asynch page reclaim, and so should
  4955. * not be capped for highmem.
  4956. */
  4957. unsigned long min_pages;
  4958. min_pages = zone->managed_pages / 1024;
  4959. min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
  4960. zone->watermark[WMARK_MIN] = min_pages;
  4961. } else {
  4962. /*
  4963. * If it's a lowmem zone, reserve a number of pages
  4964. * proportionate to the zone's size.
  4965. */
  4966. zone->watermark[WMARK_MIN] = tmp;
  4967. }
  4968. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
  4969. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
  4970. __mod_zone_page_state(zone, NR_ALLOC_BATCH,
  4971. high_wmark_pages(zone) - low_wmark_pages(zone) -
  4972. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  4973. setup_zone_migrate_reserve(zone);
  4974. spin_unlock_irqrestore(&zone->lock, flags);
  4975. }
  4976. /* update totalreserve_pages */
  4977. calculate_totalreserve_pages();
  4978. }
  4979. /**
  4980. * setup_per_zone_wmarks - called when min_free_kbytes changes
  4981. * or when memory is hot-{added|removed}
  4982. *
  4983. * Ensures that the watermark[min,low,high] values for each zone are set
  4984. * correctly with respect to min_free_kbytes.
  4985. */
  4986. void setup_per_zone_wmarks(void)
  4987. {
  4988. mutex_lock(&zonelists_mutex);
  4989. __setup_per_zone_wmarks();
  4990. mutex_unlock(&zonelists_mutex);
  4991. }
  4992. /*
  4993. * The inactive anon list should be small enough that the VM never has to
  4994. * do too much work, but large enough that each inactive page has a chance
  4995. * to be referenced again before it is swapped out.
  4996. *
  4997. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  4998. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  4999. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  5000. * the anonymous pages are kept on the inactive list.
  5001. *
  5002. * total target max
  5003. * memory ratio inactive anon
  5004. * -------------------------------------
  5005. * 10MB 1 5MB
  5006. * 100MB 1 50MB
  5007. * 1GB 3 250MB
  5008. * 10GB 10 0.9GB
  5009. * 100GB 31 3GB
  5010. * 1TB 101 10GB
  5011. * 10TB 320 32GB
  5012. */
  5013. static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
  5014. {
  5015. unsigned int gb, ratio;
  5016. /* Zone size in gigabytes */
  5017. gb = zone->managed_pages >> (30 - PAGE_SHIFT);
  5018. if (gb)
  5019. ratio = int_sqrt(10 * gb);
  5020. else
  5021. ratio = 1;
  5022. zone->inactive_ratio = ratio;
  5023. }
  5024. static void __meminit setup_per_zone_inactive_ratio(void)
  5025. {
  5026. struct zone *zone;
  5027. for_each_zone(zone)
  5028. calculate_zone_inactive_ratio(zone);
  5029. }
  5030. /*
  5031. * Initialise min_free_kbytes.
  5032. *
  5033. * For small machines we want it small (128k min). For large machines
  5034. * we want it large (64MB max). But it is not linear, because network
  5035. * bandwidth does not increase linearly with machine size. We use
  5036. *
  5037. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  5038. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  5039. *
  5040. * which yields
  5041. *
  5042. * 16MB: 512k
  5043. * 32MB: 724k
  5044. * 64MB: 1024k
  5045. * 128MB: 1448k
  5046. * 256MB: 2048k
  5047. * 512MB: 2896k
  5048. * 1024MB: 4096k
  5049. * 2048MB: 5792k
  5050. * 4096MB: 8192k
  5051. * 8192MB: 11584k
  5052. * 16384MB: 16384k
  5053. */
  5054. int __meminit init_per_zone_wmark_min(void)
  5055. {
  5056. unsigned long lowmem_kbytes;
  5057. int new_min_free_kbytes;
  5058. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  5059. new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  5060. if (new_min_free_kbytes > user_min_free_kbytes) {
  5061. min_free_kbytes = new_min_free_kbytes;
  5062. if (min_free_kbytes < 128)
  5063. min_free_kbytes = 128;
  5064. if (min_free_kbytes > 65536)
  5065. min_free_kbytes = 65536;
  5066. } else {
  5067. pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
  5068. new_min_free_kbytes, user_min_free_kbytes);
  5069. }
  5070. setup_per_zone_wmarks();
  5071. refresh_zone_stat_thresholds();
  5072. setup_per_zone_lowmem_reserve();
  5073. setup_per_zone_inactive_ratio();
  5074. return 0;
  5075. }
  5076. module_init(init_per_zone_wmark_min)
  5077. /*
  5078. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  5079. * that we can call two helper functions whenever min_free_kbytes
  5080. * changes.
  5081. */
  5082. int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
  5083. void __user *buffer, size_t *length, loff_t *ppos)
  5084. {
  5085. int rc;
  5086. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5087. if (rc)
  5088. return rc;
  5089. if (write) {
  5090. user_min_free_kbytes = min_free_kbytes;
  5091. setup_per_zone_wmarks();
  5092. }
  5093. return 0;
  5094. }
  5095. #ifdef CONFIG_NUMA
  5096. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
  5097. void __user *buffer, size_t *length, loff_t *ppos)
  5098. {
  5099. struct zone *zone;
  5100. int rc;
  5101. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5102. if (rc)
  5103. return rc;
  5104. for_each_zone(zone)
  5105. zone->min_unmapped_pages = (zone->managed_pages *
  5106. sysctl_min_unmapped_ratio) / 100;
  5107. return 0;
  5108. }
  5109. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
  5110. void __user *buffer, size_t *length, loff_t *ppos)
  5111. {
  5112. struct zone *zone;
  5113. int rc;
  5114. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5115. if (rc)
  5116. return rc;
  5117. for_each_zone(zone)
  5118. zone->min_slab_pages = (zone->managed_pages *
  5119. sysctl_min_slab_ratio) / 100;
  5120. return 0;
  5121. }
  5122. #endif
  5123. /*
  5124. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  5125. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  5126. * whenever sysctl_lowmem_reserve_ratio changes.
  5127. *
  5128. * The reserve ratio obviously has absolutely no relation with the
  5129. * minimum watermarks. The lowmem reserve ratio can only make sense
  5130. * if in function of the boot time zone sizes.
  5131. */
  5132. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
  5133. void __user *buffer, size_t *length, loff_t *ppos)
  5134. {
  5135. proc_dointvec_minmax(table, write, buffer, length, ppos);
  5136. setup_per_zone_lowmem_reserve();
  5137. return 0;
  5138. }
  5139. /*
  5140. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  5141. * cpu. It is the fraction of total pages in each zone that a hot per cpu
  5142. * pagelist can have before it gets flushed back to buddy allocator.
  5143. */
  5144. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
  5145. void __user *buffer, size_t *length, loff_t *ppos)
  5146. {
  5147. struct zone *zone;
  5148. int old_percpu_pagelist_fraction;
  5149. int ret;
  5150. mutex_lock(&pcp_batch_high_lock);
  5151. old_percpu_pagelist_fraction = percpu_pagelist_fraction;
  5152. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5153. if (!write || ret < 0)
  5154. goto out;
  5155. /* Sanity checking to avoid pcp imbalance */
  5156. if (percpu_pagelist_fraction &&
  5157. percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
  5158. percpu_pagelist_fraction = old_percpu_pagelist_fraction;
  5159. ret = -EINVAL;
  5160. goto out;
  5161. }
  5162. /* No change? */
  5163. if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
  5164. goto out;
  5165. for_each_populated_zone(zone) {
  5166. unsigned int cpu;
  5167. for_each_possible_cpu(cpu)
  5168. pageset_set_high_and_batch(zone,
  5169. per_cpu_ptr(zone->pageset, cpu));
  5170. }
  5171. out:
  5172. mutex_unlock(&pcp_batch_high_lock);
  5173. return ret;
  5174. }
  5175. int hashdist = HASHDIST_DEFAULT;
  5176. #ifdef CONFIG_NUMA
  5177. static int __init set_hashdist(char *str)
  5178. {
  5179. if (!str)
  5180. return 0;
  5181. hashdist = simple_strtoul(str, &str, 0);
  5182. return 1;
  5183. }
  5184. __setup("hashdist=", set_hashdist);
  5185. #endif
  5186. /*
  5187. * allocate a large system hash table from bootmem
  5188. * - it is assumed that the hash table must contain an exact power-of-2
  5189. * quantity of entries
  5190. * - limit is the number of hash buckets, not the total allocation size
  5191. */
  5192. void *__init alloc_large_system_hash(const char *tablename,
  5193. unsigned long bucketsize,
  5194. unsigned long numentries,
  5195. int scale,
  5196. int flags,
  5197. unsigned int *_hash_shift,
  5198. unsigned int *_hash_mask,
  5199. unsigned long low_limit,
  5200. unsigned long high_limit)
  5201. {
  5202. unsigned long long max = high_limit;
  5203. unsigned long log2qty, size;
  5204. void *table = NULL;
  5205. /* allow the kernel cmdline to have a say */
  5206. if (!numentries) {
  5207. /* round applicable memory size up to nearest megabyte */
  5208. numentries = nr_kernel_pages;
  5209. /* It isn't necessary when PAGE_SIZE >= 1MB */
  5210. if (PAGE_SHIFT < 20)
  5211. numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
  5212. /* limit to 1 bucket per 2^scale bytes of low memory */
  5213. if (scale > PAGE_SHIFT)
  5214. numentries >>= (scale - PAGE_SHIFT);
  5215. else
  5216. numentries <<= (PAGE_SHIFT - scale);
  5217. /* Make sure we've got at least a 0-order allocation.. */
  5218. if (unlikely(flags & HASH_SMALL)) {
  5219. /* Makes no sense without HASH_EARLY */
  5220. WARN_ON(!(flags & HASH_EARLY));
  5221. if (!(numentries >> *_hash_shift)) {
  5222. numentries = 1UL << *_hash_shift;
  5223. BUG_ON(!numentries);
  5224. }
  5225. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  5226. numentries = PAGE_SIZE / bucketsize;
  5227. }
  5228. numentries = roundup_pow_of_two(numentries);
  5229. /* limit allocation size to 1/16 total memory by default */
  5230. if (max == 0) {
  5231. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  5232. do_div(max, bucketsize);
  5233. }
  5234. max = min(max, 0x80000000ULL);
  5235. if (numentries < low_limit)
  5236. numentries = low_limit;
  5237. if (numentries > max)
  5238. numentries = max;
  5239. log2qty = ilog2(numentries);
  5240. do {
  5241. size = bucketsize << log2qty;
  5242. if (flags & HASH_EARLY)
  5243. table = memblock_virt_alloc_nopanic(size, 0);
  5244. else if (hashdist)
  5245. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  5246. else {
  5247. /*
  5248. * If bucketsize is not a power-of-two, we may free
  5249. * some pages at the end of hash table which
  5250. * alloc_pages_exact() automatically does
  5251. */
  5252. if (get_order(size) < MAX_ORDER) {
  5253. table = alloc_pages_exact(size, GFP_ATOMIC);
  5254. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  5255. }
  5256. }
  5257. } while (!table && size > PAGE_SIZE && --log2qty);
  5258. if (!table)
  5259. panic("Failed to allocate %s hash table\n", tablename);
  5260. printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
  5261. tablename,
  5262. (1UL << log2qty),
  5263. ilog2(size) - PAGE_SHIFT,
  5264. size);
  5265. if (_hash_shift)
  5266. *_hash_shift = log2qty;
  5267. if (_hash_mask)
  5268. *_hash_mask = (1 << log2qty) - 1;
  5269. return table;
  5270. }
  5271. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  5272. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  5273. unsigned long pfn)
  5274. {
  5275. #ifdef CONFIG_SPARSEMEM
  5276. return __pfn_to_section(pfn)->pageblock_flags;
  5277. #else
  5278. return zone->pageblock_flags;
  5279. #endif /* CONFIG_SPARSEMEM */
  5280. }
  5281. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  5282. {
  5283. #ifdef CONFIG_SPARSEMEM
  5284. pfn &= (PAGES_PER_SECTION-1);
  5285. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5286. #else
  5287. pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
  5288. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5289. #endif /* CONFIG_SPARSEMEM */
  5290. }
  5291. /**
  5292. * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  5293. * @page: The page within the block of interest
  5294. * @pfn: The target page frame number
  5295. * @end_bitidx: The last bit of interest to retrieve
  5296. * @mask: mask of bits that the caller is interested in
  5297. *
  5298. * Return: pageblock_bits flags
  5299. */
  5300. unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
  5301. unsigned long end_bitidx,
  5302. unsigned long mask)
  5303. {
  5304. struct zone *zone;
  5305. unsigned long *bitmap;
  5306. unsigned long bitidx, word_bitidx;
  5307. unsigned long word;
  5308. zone = page_zone(page);
  5309. bitmap = get_pageblock_bitmap(zone, pfn);
  5310. bitidx = pfn_to_bitidx(zone, pfn);
  5311. word_bitidx = bitidx / BITS_PER_LONG;
  5312. bitidx &= (BITS_PER_LONG-1);
  5313. word = bitmap[word_bitidx];
  5314. bitidx += end_bitidx;
  5315. return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
  5316. }
  5317. /**
  5318. * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  5319. * @page: The page within the block of interest
  5320. * @flags: The flags to set
  5321. * @pfn: The target page frame number
  5322. * @end_bitidx: The last bit of interest
  5323. * @mask: mask of bits that the caller is interested in
  5324. */
  5325. void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
  5326. unsigned long pfn,
  5327. unsigned long end_bitidx,
  5328. unsigned long mask)
  5329. {
  5330. struct zone *zone;
  5331. unsigned long *bitmap;
  5332. unsigned long bitidx, word_bitidx;
  5333. unsigned long old_word, word;
  5334. BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
  5335. zone = page_zone(page);
  5336. bitmap = get_pageblock_bitmap(zone, pfn);
  5337. bitidx = pfn_to_bitidx(zone, pfn);
  5338. word_bitidx = bitidx / BITS_PER_LONG;
  5339. bitidx &= (BITS_PER_LONG-1);
  5340. VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
  5341. bitidx += end_bitidx;
  5342. mask <<= (BITS_PER_LONG - bitidx - 1);
  5343. flags <<= (BITS_PER_LONG - bitidx - 1);
  5344. word = ACCESS_ONCE(bitmap[word_bitidx]);
  5345. for (;;) {
  5346. old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
  5347. if (word == old_word)
  5348. break;
  5349. word = old_word;
  5350. }
  5351. }
  5352. /*
  5353. * This function checks whether pageblock includes unmovable pages or not.
  5354. * If @count is not zero, it is okay to include less @count unmovable pages
  5355. *
  5356. * PageLRU check without isolation or lru_lock could race so that
  5357. * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
  5358. * expect this function should be exact.
  5359. */
  5360. bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
  5361. bool skip_hwpoisoned_pages)
  5362. {
  5363. unsigned long pfn, iter, found;
  5364. int mt;
  5365. /*
  5366. * For avoiding noise data, lru_add_drain_all() should be called
  5367. * If ZONE_MOVABLE, the zone never contains unmovable pages
  5368. */
  5369. if (zone_idx(zone) == ZONE_MOVABLE)
  5370. return false;
  5371. mt = get_pageblock_migratetype(page);
  5372. if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
  5373. return false;
  5374. pfn = page_to_pfn(page);
  5375. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  5376. unsigned long check = pfn + iter;
  5377. if (!pfn_valid_within(check))
  5378. continue;
  5379. page = pfn_to_page(check);
  5380. /*
  5381. * Hugepages are not in LRU lists, but they're movable.
  5382. * We need not scan over tail pages bacause we don't
  5383. * handle each tail page individually in migration.
  5384. */
  5385. if (PageHuge(page)) {
  5386. iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
  5387. continue;
  5388. }
  5389. /*
  5390. * We can't use page_count without pin a page
  5391. * because another CPU can free compound page.
  5392. * This check already skips compound tails of THP
  5393. * because their page->_count is zero at all time.
  5394. */
  5395. if (!atomic_read(&page->_count)) {
  5396. if (PageBuddy(page))
  5397. iter += (1 << page_order(page)) - 1;
  5398. continue;
  5399. }
  5400. /*
  5401. * The HWPoisoned page may be not in buddy system, and
  5402. * page_count() is not 0.
  5403. */
  5404. if (skip_hwpoisoned_pages && PageHWPoison(page))
  5405. continue;
  5406. if (!PageLRU(page))
  5407. found++;
  5408. /*
  5409. * If there are RECLAIMABLE pages, we need to check it.
  5410. * But now, memory offline itself doesn't call shrink_slab()
  5411. * and it still to be fixed.
  5412. */
  5413. /*
  5414. * If the page is not RAM, page_count()should be 0.
  5415. * we don't need more check. This is an _used_ not-movable page.
  5416. *
  5417. * The problematic thing here is PG_reserved pages. PG_reserved
  5418. * is set to both of a memory hole page and a _used_ kernel
  5419. * page at boot.
  5420. */
  5421. if (found > count)
  5422. return true;
  5423. }
  5424. return false;
  5425. }
  5426. bool is_pageblock_removable_nolock(struct page *page)
  5427. {
  5428. struct zone *zone;
  5429. unsigned long pfn;
  5430. /*
  5431. * We have to be careful here because we are iterating over memory
  5432. * sections which are not zone aware so we might end up outside of
  5433. * the zone but still within the section.
  5434. * We have to take care about the node as well. If the node is offline
  5435. * its NODE_DATA will be NULL - see page_zone.
  5436. */
  5437. if (!node_online(page_to_nid(page)))
  5438. return false;
  5439. zone = page_zone(page);
  5440. pfn = page_to_pfn(page);
  5441. if (!zone_spans_pfn(zone, pfn))
  5442. return false;
  5443. return !has_unmovable_pages(zone, page, 0, true);
  5444. }
  5445. #ifdef CONFIG_CMA
  5446. static unsigned long pfn_max_align_down(unsigned long pfn)
  5447. {
  5448. return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
  5449. pageblock_nr_pages) - 1);
  5450. }
  5451. static unsigned long pfn_max_align_up(unsigned long pfn)
  5452. {
  5453. return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
  5454. pageblock_nr_pages));
  5455. }
  5456. /* [start, end) must belong to a single zone. */
  5457. static int __alloc_contig_migrate_range(struct compact_control *cc,
  5458. unsigned long start, unsigned long end)
  5459. {
  5460. /* This function is based on compact_zone() from compaction.c. */
  5461. unsigned long nr_reclaimed;
  5462. unsigned long pfn = start;
  5463. unsigned int tries = 0;
  5464. int ret = 0;
  5465. migrate_prep();
  5466. while (pfn < end || !list_empty(&cc->migratepages)) {
  5467. if (fatal_signal_pending(current)) {
  5468. ret = -EINTR;
  5469. break;
  5470. }
  5471. if (list_empty(&cc->migratepages)) {
  5472. cc->nr_migratepages = 0;
  5473. pfn = isolate_migratepages_range(cc, pfn, end);
  5474. if (!pfn) {
  5475. ret = -EINTR;
  5476. break;
  5477. }
  5478. tries = 0;
  5479. } else if (++tries == 5) {
  5480. ret = ret < 0 ? ret : -EBUSY;
  5481. break;
  5482. }
  5483. nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
  5484. &cc->migratepages);
  5485. cc->nr_migratepages -= nr_reclaimed;
  5486. ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
  5487. NULL, 0, cc->mode, MR_CMA);
  5488. }
  5489. if (ret < 0) {
  5490. putback_movable_pages(&cc->migratepages);
  5491. return ret;
  5492. }
  5493. return 0;
  5494. }
  5495. /**
  5496. * alloc_contig_range() -- tries to allocate given range of pages
  5497. * @start: start PFN to allocate
  5498. * @end: one-past-the-last PFN to allocate
  5499. * @migratetype: migratetype of the underlaying pageblocks (either
  5500. * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
  5501. * in range must have the same migratetype and it must
  5502. * be either of the two.
  5503. *
  5504. * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
  5505. * aligned, however it's the caller's responsibility to guarantee that
  5506. * we are the only thread that changes migrate type of pageblocks the
  5507. * pages fall in.
  5508. *
  5509. * The PFN range must belong to a single zone.
  5510. *
  5511. * Returns zero on success or negative error code. On success all
  5512. * pages which PFN is in [start, end) are allocated for the caller and
  5513. * need to be freed with free_contig_range().
  5514. */
  5515. int alloc_contig_range(unsigned long start, unsigned long end,
  5516. unsigned migratetype)
  5517. {
  5518. unsigned long outer_start, outer_end;
  5519. int ret = 0, order;
  5520. struct compact_control cc = {
  5521. .nr_migratepages = 0,
  5522. .order = -1,
  5523. .zone = page_zone(pfn_to_page(start)),
  5524. .mode = MIGRATE_SYNC,
  5525. .ignore_skip_hint = true,
  5526. };
  5527. INIT_LIST_HEAD(&cc.migratepages);
  5528. /*
  5529. * What we do here is we mark all pageblocks in range as
  5530. * MIGRATE_ISOLATE. Because pageblock and max order pages may
  5531. * have different sizes, and due to the way page allocator
  5532. * work, we align the range to biggest of the two pages so
  5533. * that page allocator won't try to merge buddies from
  5534. * different pageblocks and change MIGRATE_ISOLATE to some
  5535. * other migration type.
  5536. *
  5537. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  5538. * migrate the pages from an unaligned range (ie. pages that
  5539. * we are interested in). This will put all the pages in
  5540. * range back to page allocator as MIGRATE_ISOLATE.
  5541. *
  5542. * When this is done, we take the pages in range from page
  5543. * allocator removing them from the buddy system. This way
  5544. * page allocator will never consider using them.
  5545. *
  5546. * This lets us mark the pageblocks back as
  5547. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  5548. * aligned range but not in the unaligned, original range are
  5549. * put back to page allocator so that buddy can use them.
  5550. */
  5551. ret = start_isolate_page_range(pfn_max_align_down(start),
  5552. pfn_max_align_up(end), migratetype,
  5553. false);
  5554. if (ret)
  5555. return ret;
  5556. ret = __alloc_contig_migrate_range(&cc, start, end);
  5557. if (ret)
  5558. goto done;
  5559. /*
  5560. * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
  5561. * aligned blocks that are marked as MIGRATE_ISOLATE. What's
  5562. * more, all pages in [start, end) are free in page allocator.
  5563. * What we are going to do is to allocate all pages from
  5564. * [start, end) (that is remove them from page allocator).
  5565. *
  5566. * The only problem is that pages at the beginning and at the
  5567. * end of interesting range may be not aligned with pages that
  5568. * page allocator holds, ie. they can be part of higher order
  5569. * pages. Because of this, we reserve the bigger range and
  5570. * once this is done free the pages we are not interested in.
  5571. *
  5572. * We don't have to hold zone->lock here because the pages are
  5573. * isolated thus they won't get removed from buddy.
  5574. */
  5575. lru_add_drain_all();
  5576. drain_all_pages(cc.zone);
  5577. order = 0;
  5578. outer_start = start;
  5579. while (!PageBuddy(pfn_to_page(outer_start))) {
  5580. if (++order >= MAX_ORDER) {
  5581. ret = -EBUSY;
  5582. goto done;
  5583. }
  5584. outer_start &= ~0UL << order;
  5585. }
  5586. /* Make sure the range is really isolated. */
  5587. if (test_pages_isolated(outer_start, end, false)) {
  5588. pr_info("%s: [%lx, %lx) PFNs busy\n",
  5589. __func__, outer_start, end);
  5590. ret = -EBUSY;
  5591. goto done;
  5592. }
  5593. /* Grab isolated pages from freelists. */
  5594. outer_end = isolate_freepages_range(&cc, outer_start, end);
  5595. if (!outer_end) {
  5596. ret = -EBUSY;
  5597. goto done;
  5598. }
  5599. /* Free head and tail (if any) */
  5600. if (start != outer_start)
  5601. free_contig_range(outer_start, start - outer_start);
  5602. if (end != outer_end)
  5603. free_contig_range(end, outer_end - end);
  5604. done:
  5605. undo_isolate_page_range(pfn_max_align_down(start),
  5606. pfn_max_align_up(end), migratetype);
  5607. return ret;
  5608. }
  5609. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  5610. {
  5611. unsigned int count = 0;
  5612. for (; nr_pages--; pfn++) {
  5613. struct page *page = pfn_to_page(pfn);
  5614. count += page_count(page) != 1;
  5615. __free_page(page);
  5616. }
  5617. WARN(count != 0, "%d pages are still in use!\n", count);
  5618. }
  5619. #endif
  5620. #ifdef CONFIG_MEMORY_HOTPLUG
  5621. /*
  5622. * The zone indicated has a new number of managed_pages; batch sizes and percpu
  5623. * page high values need to be recalulated.
  5624. */
  5625. void __meminit zone_pcp_update(struct zone *zone)
  5626. {
  5627. unsigned cpu;
  5628. mutex_lock(&pcp_batch_high_lock);
  5629. for_each_possible_cpu(cpu)
  5630. pageset_set_high_and_batch(zone,
  5631. per_cpu_ptr(zone->pageset, cpu));
  5632. mutex_unlock(&pcp_batch_high_lock);
  5633. }
  5634. #endif
  5635. void zone_pcp_reset(struct zone *zone)
  5636. {
  5637. unsigned long flags;
  5638. int cpu;
  5639. struct per_cpu_pageset *pset;
  5640. /* avoid races with drain_pages() */
  5641. local_irq_save(flags);
  5642. if (zone->pageset != &boot_pageset) {
  5643. for_each_online_cpu(cpu) {
  5644. pset = per_cpu_ptr(zone->pageset, cpu);
  5645. drain_zonestat(zone, pset);
  5646. }
  5647. free_percpu(zone->pageset);
  5648. zone->pageset = &boot_pageset;
  5649. }
  5650. local_irq_restore(flags);
  5651. }
  5652. #ifdef CONFIG_MEMORY_HOTREMOVE
  5653. /*
  5654. * All pages in the range must be isolated before calling this.
  5655. */
  5656. void
  5657. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  5658. {
  5659. struct page *page;
  5660. struct zone *zone;
  5661. unsigned int order, i;
  5662. unsigned long pfn;
  5663. unsigned long flags;
  5664. /* find the first valid pfn */
  5665. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  5666. if (pfn_valid(pfn))
  5667. break;
  5668. if (pfn == end_pfn)
  5669. return;
  5670. zone = page_zone(pfn_to_page(pfn));
  5671. spin_lock_irqsave(&zone->lock, flags);
  5672. pfn = start_pfn;
  5673. while (pfn < end_pfn) {
  5674. if (!pfn_valid(pfn)) {
  5675. pfn++;
  5676. continue;
  5677. }
  5678. page = pfn_to_page(pfn);
  5679. /*
  5680. * The HWPoisoned page may be not in buddy system, and
  5681. * page_count() is not 0.
  5682. */
  5683. if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
  5684. pfn++;
  5685. SetPageReserved(page);
  5686. continue;
  5687. }
  5688. BUG_ON(page_count(page));
  5689. BUG_ON(!PageBuddy(page));
  5690. order = page_order(page);
  5691. #ifdef CONFIG_DEBUG_VM
  5692. printk(KERN_INFO "remove from free list %lx %d %lx\n",
  5693. pfn, 1 << order, end_pfn);
  5694. #endif
  5695. list_del(&page->lru);
  5696. rmv_page_order(page);
  5697. zone->free_area[order].nr_free--;
  5698. for (i = 0; i < (1 << order); i++)
  5699. SetPageReserved((page+i));
  5700. pfn += (1 << order);
  5701. }
  5702. spin_unlock_irqrestore(&zone->lock, flags);
  5703. }
  5704. #endif
  5705. #ifdef CONFIG_MEMORY_FAILURE
  5706. bool is_free_buddy_page(struct page *page)
  5707. {
  5708. struct zone *zone = page_zone(page);
  5709. unsigned long pfn = page_to_pfn(page);
  5710. unsigned long flags;
  5711. unsigned int order;
  5712. spin_lock_irqsave(&zone->lock, flags);
  5713. for (order = 0; order < MAX_ORDER; order++) {
  5714. struct page *page_head = page - (pfn & ((1 << order) - 1));
  5715. if (PageBuddy(page_head) && page_order(page_head) >= order)
  5716. break;
  5717. }
  5718. spin_unlock_irqrestore(&zone->lock, flags);
  5719. return order < MAX_ORDER;
  5720. }
  5721. #endif