page_alloc.c 200 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kmemcheck.h>
  27. #include <linux/kasan.h>
  28. #include <linux/module.h>
  29. #include <linux/suspend.h>
  30. #include <linux/pagevec.h>
  31. #include <linux/blkdev.h>
  32. #include <linux/slab.h>
  33. #include <linux/ratelimit.h>
  34. #include <linux/oom.h>
  35. #include <linux/notifier.h>
  36. #include <linux/topology.h>
  37. #include <linux/sysctl.h>
  38. #include <linux/cpu.h>
  39. #include <linux/cpuset.h>
  40. #include <linux/memory_hotplug.h>
  41. #include <linux/nodemask.h>
  42. #include <linux/vmalloc.h>
  43. #include <linux/vmstat.h>
  44. #include <linux/mempolicy.h>
  45. #include <linux/memremap.h>
  46. #include <linux/stop_machine.h>
  47. #include <linux/sort.h>
  48. #include <linux/pfn.h>
  49. #include <linux/backing-dev.h>
  50. #include <linux/fault-inject.h>
  51. #include <linux/page-isolation.h>
  52. #include <linux/page_ext.h>
  53. #include <linux/debugobjects.h>
  54. #include <linux/kmemleak.h>
  55. #include <linux/compaction.h>
  56. #include <trace/events/kmem.h>
  57. #include <linux/prefetch.h>
  58. #include <linux/mm_inline.h>
  59. #include <linux/migrate.h>
  60. #include <linux/page_ext.h>
  61. #include <linux/hugetlb.h>
  62. #include <linux/sched/rt.h>
  63. #include <linux/page_owner.h>
  64. #include <linux/kthread.h>
  65. #include <asm/sections.h>
  66. #include <asm/tlbflush.h>
  67. #include <asm/div64.h>
  68. #include "internal.h"
  69. /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  70. static DEFINE_MUTEX(pcp_batch_high_lock);
  71. #define MIN_PERCPU_PAGELIST_FRACTION (8)
  72. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  73. DEFINE_PER_CPU(int, numa_node);
  74. EXPORT_PER_CPU_SYMBOL(numa_node);
  75. #endif
  76. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  77. /*
  78. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  79. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  80. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  81. * defined in <linux/topology.h>.
  82. */
  83. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  84. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  85. int _node_numa_mem_[MAX_NUMNODES];
  86. #endif
  87. /*
  88. * Array of node states.
  89. */
  90. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  91. [N_POSSIBLE] = NODE_MASK_ALL,
  92. [N_ONLINE] = { { [0] = 1UL } },
  93. #ifndef CONFIG_NUMA
  94. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  95. #ifdef CONFIG_HIGHMEM
  96. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  97. #endif
  98. #ifdef CONFIG_MOVABLE_NODE
  99. [N_MEMORY] = { { [0] = 1UL } },
  100. #endif
  101. [N_CPU] = { { [0] = 1UL } },
  102. #endif /* NUMA */
  103. };
  104. EXPORT_SYMBOL(node_states);
  105. /* Protect totalram_pages and zone->managed_pages */
  106. static DEFINE_SPINLOCK(managed_page_count_lock);
  107. unsigned long totalram_pages __read_mostly;
  108. unsigned long totalreserve_pages __read_mostly;
  109. unsigned long totalcma_pages __read_mostly;
  110. int percpu_pagelist_fraction;
  111. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  112. /*
  113. * A cached value of the page's pageblock's migratetype, used when the page is
  114. * put on a pcplist. Used to avoid the pageblock migratetype lookup when
  115. * freeing from pcplists in most cases, at the cost of possibly becoming stale.
  116. * Also the migratetype set in the page does not necessarily match the pcplist
  117. * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
  118. * other index - this ensures that it will be put on the correct CMA freelist.
  119. */
  120. static inline int get_pcppage_migratetype(struct page *page)
  121. {
  122. return page->index;
  123. }
  124. static inline void set_pcppage_migratetype(struct page *page, int migratetype)
  125. {
  126. page->index = migratetype;
  127. }
  128. #ifdef CONFIG_PM_SLEEP
  129. /*
  130. * The following functions are used by the suspend/hibernate code to temporarily
  131. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  132. * while devices are suspended. To avoid races with the suspend/hibernate code,
  133. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  134. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  135. * guaranteed not to run in parallel with that modification).
  136. */
  137. static gfp_t saved_gfp_mask;
  138. void pm_restore_gfp_mask(void)
  139. {
  140. WARN_ON(!mutex_is_locked(&pm_mutex));
  141. if (saved_gfp_mask) {
  142. gfp_allowed_mask = saved_gfp_mask;
  143. saved_gfp_mask = 0;
  144. }
  145. }
  146. void pm_restrict_gfp_mask(void)
  147. {
  148. WARN_ON(!mutex_is_locked(&pm_mutex));
  149. WARN_ON(saved_gfp_mask);
  150. saved_gfp_mask = gfp_allowed_mask;
  151. gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
  152. }
  153. bool pm_suspended_storage(void)
  154. {
  155. if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
  156. return false;
  157. return true;
  158. }
  159. #endif /* CONFIG_PM_SLEEP */
  160. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  161. unsigned int pageblock_order __read_mostly;
  162. #endif
  163. static void __free_pages_ok(struct page *page, unsigned int order);
  164. /*
  165. * results with 256, 32 in the lowmem_reserve sysctl:
  166. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  167. * 1G machine -> (16M dma, 784M normal, 224M high)
  168. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  169. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  170. * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
  171. *
  172. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  173. * don't need any ZONE_NORMAL reservation
  174. */
  175. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  176. #ifdef CONFIG_ZONE_DMA
  177. 256,
  178. #endif
  179. #ifdef CONFIG_ZONE_DMA32
  180. 256,
  181. #endif
  182. #ifdef CONFIG_HIGHMEM
  183. 32,
  184. #endif
  185. 32,
  186. };
  187. EXPORT_SYMBOL(totalram_pages);
  188. static char * const zone_names[MAX_NR_ZONES] = {
  189. #ifdef CONFIG_ZONE_DMA
  190. "DMA",
  191. #endif
  192. #ifdef CONFIG_ZONE_DMA32
  193. "DMA32",
  194. #endif
  195. "Normal",
  196. #ifdef CONFIG_HIGHMEM
  197. "HighMem",
  198. #endif
  199. "Movable",
  200. #ifdef CONFIG_ZONE_DEVICE
  201. "Device",
  202. #endif
  203. };
  204. char * const migratetype_names[MIGRATE_TYPES] = {
  205. "Unmovable",
  206. "Movable",
  207. "Reclaimable",
  208. "HighAtomic",
  209. #ifdef CONFIG_CMA
  210. "CMA",
  211. #endif
  212. #ifdef CONFIG_MEMORY_ISOLATION
  213. "Isolate",
  214. #endif
  215. };
  216. compound_page_dtor * const compound_page_dtors[] = {
  217. NULL,
  218. free_compound_page,
  219. #ifdef CONFIG_HUGETLB_PAGE
  220. free_huge_page,
  221. #endif
  222. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  223. free_transhuge_page,
  224. #endif
  225. };
  226. int min_free_kbytes = 1024;
  227. int user_min_free_kbytes = -1;
  228. int watermark_scale_factor = 10;
  229. static unsigned long __meminitdata nr_kernel_pages;
  230. static unsigned long __meminitdata nr_all_pages;
  231. static unsigned long __meminitdata dma_reserve;
  232. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  233. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  234. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  235. static unsigned long __initdata required_kernelcore;
  236. static unsigned long __initdata required_movablecore;
  237. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  238. static bool mirrored_kernelcore;
  239. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  240. int movable_zone;
  241. EXPORT_SYMBOL(movable_zone);
  242. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  243. #if MAX_NUMNODES > 1
  244. int nr_node_ids __read_mostly = MAX_NUMNODES;
  245. int nr_online_nodes __read_mostly = 1;
  246. EXPORT_SYMBOL(nr_node_ids);
  247. EXPORT_SYMBOL(nr_online_nodes);
  248. #endif
  249. int page_group_by_mobility_disabled __read_mostly;
  250. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  251. static inline void reset_deferred_meminit(pg_data_t *pgdat)
  252. {
  253. pgdat->first_deferred_pfn = ULONG_MAX;
  254. }
  255. /* Returns true if the struct page for the pfn is uninitialised */
  256. static inline bool __meminit early_page_uninitialised(unsigned long pfn)
  257. {
  258. if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
  259. return true;
  260. return false;
  261. }
  262. static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
  263. {
  264. if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
  265. return true;
  266. return false;
  267. }
  268. /*
  269. * Returns false when the remaining initialisation should be deferred until
  270. * later in the boot cycle when it can be parallelised.
  271. */
  272. static inline bool update_defer_init(pg_data_t *pgdat,
  273. unsigned long pfn, unsigned long zone_end,
  274. unsigned long *nr_initialised)
  275. {
  276. unsigned long max_initialise;
  277. /* Always populate low zones for address-contrained allocations */
  278. if (zone_end < pgdat_end_pfn(pgdat))
  279. return true;
  280. /*
  281. * Initialise at least 2G of a node but also take into account that
  282. * two large system hashes that can take up 1GB for 0.25TB/node.
  283. */
  284. max_initialise = max(2UL << (30 - PAGE_SHIFT),
  285. (pgdat->node_spanned_pages >> 8));
  286. (*nr_initialised)++;
  287. if ((*nr_initialised > max_initialise) &&
  288. (pfn & (PAGES_PER_SECTION - 1)) == 0) {
  289. pgdat->first_deferred_pfn = pfn;
  290. return false;
  291. }
  292. return true;
  293. }
  294. #else
  295. static inline void reset_deferred_meminit(pg_data_t *pgdat)
  296. {
  297. }
  298. static inline bool early_page_uninitialised(unsigned long pfn)
  299. {
  300. return false;
  301. }
  302. static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
  303. {
  304. return false;
  305. }
  306. static inline bool update_defer_init(pg_data_t *pgdat,
  307. unsigned long pfn, unsigned long zone_end,
  308. unsigned long *nr_initialised)
  309. {
  310. return true;
  311. }
  312. #endif
  313. void set_pageblock_migratetype(struct page *page, int migratetype)
  314. {
  315. if (unlikely(page_group_by_mobility_disabled &&
  316. migratetype < MIGRATE_PCPTYPES))
  317. migratetype = MIGRATE_UNMOVABLE;
  318. set_pageblock_flags_group(page, (unsigned long)migratetype,
  319. PB_migrate, PB_migrate_end);
  320. }
  321. #ifdef CONFIG_DEBUG_VM
  322. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  323. {
  324. int ret = 0;
  325. unsigned seq;
  326. unsigned long pfn = page_to_pfn(page);
  327. unsigned long sp, start_pfn;
  328. do {
  329. seq = zone_span_seqbegin(zone);
  330. start_pfn = zone->zone_start_pfn;
  331. sp = zone->spanned_pages;
  332. if (!zone_spans_pfn(zone, pfn))
  333. ret = 1;
  334. } while (zone_span_seqretry(zone, seq));
  335. if (ret)
  336. pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
  337. pfn, zone_to_nid(zone), zone->name,
  338. start_pfn, start_pfn + sp);
  339. return ret;
  340. }
  341. static int page_is_consistent(struct zone *zone, struct page *page)
  342. {
  343. if (!pfn_valid_within(page_to_pfn(page)))
  344. return 0;
  345. if (zone != page_zone(page))
  346. return 0;
  347. return 1;
  348. }
  349. /*
  350. * Temporary debugging check for pages not lying within a given zone.
  351. */
  352. static int bad_range(struct zone *zone, struct page *page)
  353. {
  354. if (page_outside_zone_boundaries(zone, page))
  355. return 1;
  356. if (!page_is_consistent(zone, page))
  357. return 1;
  358. return 0;
  359. }
  360. #else
  361. static inline int bad_range(struct zone *zone, struct page *page)
  362. {
  363. return 0;
  364. }
  365. #endif
  366. static void bad_page(struct page *page, const char *reason,
  367. unsigned long bad_flags)
  368. {
  369. static unsigned long resume;
  370. static unsigned long nr_shown;
  371. static unsigned long nr_unshown;
  372. /* Don't complain about poisoned pages */
  373. if (PageHWPoison(page)) {
  374. page_mapcount_reset(page); /* remove PageBuddy */
  375. return;
  376. }
  377. /*
  378. * Allow a burst of 60 reports, then keep quiet for that minute;
  379. * or allow a steady drip of one report per second.
  380. */
  381. if (nr_shown == 60) {
  382. if (time_before(jiffies, resume)) {
  383. nr_unshown++;
  384. goto out;
  385. }
  386. if (nr_unshown) {
  387. pr_alert(
  388. "BUG: Bad page state: %lu messages suppressed\n",
  389. nr_unshown);
  390. nr_unshown = 0;
  391. }
  392. nr_shown = 0;
  393. }
  394. if (nr_shown++ == 0)
  395. resume = jiffies + 60 * HZ;
  396. pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
  397. current->comm, page_to_pfn(page));
  398. __dump_page(page, reason);
  399. bad_flags &= page->flags;
  400. if (bad_flags)
  401. pr_alert("bad because of flags: %#lx(%pGp)\n",
  402. bad_flags, &bad_flags);
  403. dump_page_owner(page);
  404. print_modules();
  405. dump_stack();
  406. out:
  407. /* Leave bad fields for debug, except PageBuddy could make trouble */
  408. page_mapcount_reset(page); /* remove PageBuddy */
  409. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  410. }
  411. /*
  412. * Higher-order pages are called "compound pages". They are structured thusly:
  413. *
  414. * The first PAGE_SIZE page is called the "head page" and have PG_head set.
  415. *
  416. * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
  417. * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
  418. *
  419. * The first tail page's ->compound_dtor holds the offset in array of compound
  420. * page destructors. See compound_page_dtors.
  421. *
  422. * The first tail page's ->compound_order holds the order of allocation.
  423. * This usage means that zero-order pages may not be compound.
  424. */
  425. void free_compound_page(struct page *page)
  426. {
  427. __free_pages_ok(page, compound_order(page));
  428. }
  429. void prep_compound_page(struct page *page, unsigned int order)
  430. {
  431. int i;
  432. int nr_pages = 1 << order;
  433. set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
  434. set_compound_order(page, order);
  435. __SetPageHead(page);
  436. for (i = 1; i < nr_pages; i++) {
  437. struct page *p = page + i;
  438. set_page_count(p, 0);
  439. p->mapping = TAIL_MAPPING;
  440. set_compound_head(p, page);
  441. }
  442. atomic_set(compound_mapcount_ptr(page), -1);
  443. }
  444. #ifdef CONFIG_DEBUG_PAGEALLOC
  445. unsigned int _debug_guardpage_minorder;
  446. bool _debug_pagealloc_enabled __read_mostly
  447. = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
  448. EXPORT_SYMBOL(_debug_pagealloc_enabled);
  449. bool _debug_guardpage_enabled __read_mostly;
  450. static int __init early_debug_pagealloc(char *buf)
  451. {
  452. if (!buf)
  453. return -EINVAL;
  454. if (strcmp(buf, "on") == 0)
  455. _debug_pagealloc_enabled = true;
  456. if (strcmp(buf, "off") == 0)
  457. _debug_pagealloc_enabled = false;
  458. return 0;
  459. }
  460. early_param("debug_pagealloc", early_debug_pagealloc);
  461. static bool need_debug_guardpage(void)
  462. {
  463. /* If we don't use debug_pagealloc, we don't need guard page */
  464. if (!debug_pagealloc_enabled())
  465. return false;
  466. return true;
  467. }
  468. static void init_debug_guardpage(void)
  469. {
  470. if (!debug_pagealloc_enabled())
  471. return;
  472. _debug_guardpage_enabled = true;
  473. }
  474. struct page_ext_operations debug_guardpage_ops = {
  475. .need = need_debug_guardpage,
  476. .init = init_debug_guardpage,
  477. };
  478. static int __init debug_guardpage_minorder_setup(char *buf)
  479. {
  480. unsigned long res;
  481. if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
  482. pr_err("Bad debug_guardpage_minorder value\n");
  483. return 0;
  484. }
  485. _debug_guardpage_minorder = res;
  486. pr_info("Setting debug_guardpage_minorder to %lu\n", res);
  487. return 0;
  488. }
  489. __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
  490. static inline void set_page_guard(struct zone *zone, struct page *page,
  491. unsigned int order, int migratetype)
  492. {
  493. struct page_ext *page_ext;
  494. if (!debug_guardpage_enabled())
  495. return;
  496. page_ext = lookup_page_ext(page);
  497. __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  498. INIT_LIST_HEAD(&page->lru);
  499. set_page_private(page, order);
  500. /* Guard pages are not available for any usage */
  501. __mod_zone_freepage_state(zone, -(1 << order), migratetype);
  502. }
  503. static inline void clear_page_guard(struct zone *zone, struct page *page,
  504. unsigned int order, int migratetype)
  505. {
  506. struct page_ext *page_ext;
  507. if (!debug_guardpage_enabled())
  508. return;
  509. page_ext = lookup_page_ext(page);
  510. __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  511. set_page_private(page, 0);
  512. if (!is_migrate_isolate(migratetype))
  513. __mod_zone_freepage_state(zone, (1 << order), migratetype);
  514. }
  515. #else
  516. struct page_ext_operations debug_guardpage_ops = { NULL, };
  517. static inline void set_page_guard(struct zone *zone, struct page *page,
  518. unsigned int order, int migratetype) {}
  519. static inline void clear_page_guard(struct zone *zone, struct page *page,
  520. unsigned int order, int migratetype) {}
  521. #endif
  522. static inline void set_page_order(struct page *page, unsigned int order)
  523. {
  524. set_page_private(page, order);
  525. __SetPageBuddy(page);
  526. }
  527. static inline void rmv_page_order(struct page *page)
  528. {
  529. __ClearPageBuddy(page);
  530. set_page_private(page, 0);
  531. }
  532. /*
  533. * This function checks whether a page is free && is the buddy
  534. * we can do coalesce a page and its buddy if
  535. * (a) the buddy is not in a hole &&
  536. * (b) the buddy is in the buddy system &&
  537. * (c) a page and its buddy have the same order &&
  538. * (d) a page and its buddy are in the same zone.
  539. *
  540. * For recording whether a page is in the buddy system, we set ->_mapcount
  541. * PAGE_BUDDY_MAPCOUNT_VALUE.
  542. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
  543. * serialized by zone->lock.
  544. *
  545. * For recording page's order, we use page_private(page).
  546. */
  547. static inline int page_is_buddy(struct page *page, struct page *buddy,
  548. unsigned int order)
  549. {
  550. if (!pfn_valid_within(page_to_pfn(buddy)))
  551. return 0;
  552. if (page_is_guard(buddy) && page_order(buddy) == order) {
  553. if (page_zone_id(page) != page_zone_id(buddy))
  554. return 0;
  555. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  556. return 1;
  557. }
  558. if (PageBuddy(buddy) && page_order(buddy) == order) {
  559. /*
  560. * zone check is done late to avoid uselessly
  561. * calculating zone/node ids for pages that could
  562. * never merge.
  563. */
  564. if (page_zone_id(page) != page_zone_id(buddy))
  565. return 0;
  566. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  567. return 1;
  568. }
  569. return 0;
  570. }
  571. /*
  572. * Freeing function for a buddy system allocator.
  573. *
  574. * The concept of a buddy system is to maintain direct-mapped table
  575. * (containing bit values) for memory blocks of various "orders".
  576. * The bottom level table contains the map for the smallest allocatable
  577. * units of memory (here, pages), and each level above it describes
  578. * pairs of units from the levels below, hence, "buddies".
  579. * At a high level, all that happens here is marking the table entry
  580. * at the bottom level available, and propagating the changes upward
  581. * as necessary, plus some accounting needed to play nicely with other
  582. * parts of the VM system.
  583. * At each level, we keep a list of pages, which are heads of continuous
  584. * free pages of length of (1 << order) and marked with _mapcount
  585. * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
  586. * field.
  587. * So when we are allocating or freeing one, we can derive the state of the
  588. * other. That is, if we allocate a small block, and both were
  589. * free, the remainder of the region must be split into blocks.
  590. * If a block is freed, and its buddy is also free, then this
  591. * triggers coalescing into a block of larger size.
  592. *
  593. * -- nyc
  594. */
  595. static inline void __free_one_page(struct page *page,
  596. unsigned long pfn,
  597. struct zone *zone, unsigned int order,
  598. int migratetype)
  599. {
  600. unsigned long page_idx;
  601. unsigned long combined_idx;
  602. unsigned long uninitialized_var(buddy_idx);
  603. struct page *buddy;
  604. unsigned int max_order;
  605. max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
  606. VM_BUG_ON(!zone_is_initialized(zone));
  607. VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
  608. VM_BUG_ON(migratetype == -1);
  609. if (likely(!is_migrate_isolate(migratetype)))
  610. __mod_zone_freepage_state(zone, 1 << order, migratetype);
  611. page_idx = pfn & ((1 << MAX_ORDER) - 1);
  612. VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
  613. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  614. continue_merging:
  615. while (order < max_order - 1) {
  616. buddy_idx = __find_buddy_index(page_idx, order);
  617. buddy = page + (buddy_idx - page_idx);
  618. if (!page_is_buddy(page, buddy, order))
  619. goto done_merging;
  620. /*
  621. * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  622. * merge with it and move up one order.
  623. */
  624. if (page_is_guard(buddy)) {
  625. clear_page_guard(zone, buddy, order, migratetype);
  626. } else {
  627. list_del(&buddy->lru);
  628. zone->free_area[order].nr_free--;
  629. rmv_page_order(buddy);
  630. }
  631. combined_idx = buddy_idx & page_idx;
  632. page = page + (combined_idx - page_idx);
  633. page_idx = combined_idx;
  634. order++;
  635. }
  636. if (max_order < MAX_ORDER) {
  637. /* If we are here, it means order is >= pageblock_order.
  638. * We want to prevent merge between freepages on isolate
  639. * pageblock and normal pageblock. Without this, pageblock
  640. * isolation could cause incorrect freepage or CMA accounting.
  641. *
  642. * We don't want to hit this code for the more frequent
  643. * low-order merging.
  644. */
  645. if (unlikely(has_isolate_pageblock(zone))) {
  646. int buddy_mt;
  647. buddy_idx = __find_buddy_index(page_idx, order);
  648. buddy = page + (buddy_idx - page_idx);
  649. buddy_mt = get_pageblock_migratetype(buddy);
  650. if (migratetype != buddy_mt
  651. && (is_migrate_isolate(migratetype) ||
  652. is_migrate_isolate(buddy_mt)))
  653. goto done_merging;
  654. }
  655. max_order++;
  656. goto continue_merging;
  657. }
  658. done_merging:
  659. set_page_order(page, order);
  660. /*
  661. * If this is not the largest possible page, check if the buddy
  662. * of the next-highest order is free. If it is, it's possible
  663. * that pages are being freed that will coalesce soon. In case,
  664. * that is happening, add the free page to the tail of the list
  665. * so it's less likely to be used soon and more likely to be merged
  666. * as a higher order page
  667. */
  668. if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
  669. struct page *higher_page, *higher_buddy;
  670. combined_idx = buddy_idx & page_idx;
  671. higher_page = page + (combined_idx - page_idx);
  672. buddy_idx = __find_buddy_index(combined_idx, order + 1);
  673. higher_buddy = higher_page + (buddy_idx - combined_idx);
  674. if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
  675. list_add_tail(&page->lru,
  676. &zone->free_area[order].free_list[migratetype]);
  677. goto out;
  678. }
  679. }
  680. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  681. out:
  682. zone->free_area[order].nr_free++;
  683. }
  684. /*
  685. * A bad page could be due to a number of fields. Instead of multiple branches,
  686. * try and check multiple fields with one check. The caller must do a detailed
  687. * check if necessary.
  688. */
  689. static inline bool page_expected_state(struct page *page,
  690. unsigned long check_flags)
  691. {
  692. if (unlikely(atomic_read(&page->_mapcount) != -1))
  693. return false;
  694. if (unlikely((unsigned long)page->mapping |
  695. page_ref_count(page) |
  696. #ifdef CONFIG_MEMCG
  697. (unsigned long)page->mem_cgroup |
  698. #endif
  699. (page->flags & check_flags)))
  700. return false;
  701. return true;
  702. }
  703. static void free_pages_check_bad(struct page *page)
  704. {
  705. const char *bad_reason;
  706. unsigned long bad_flags;
  707. bad_reason = NULL;
  708. bad_flags = 0;
  709. if (unlikely(atomic_read(&page->_mapcount) != -1))
  710. bad_reason = "nonzero mapcount";
  711. if (unlikely(page->mapping != NULL))
  712. bad_reason = "non-NULL mapping";
  713. if (unlikely(page_ref_count(page) != 0))
  714. bad_reason = "nonzero _refcount";
  715. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
  716. bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
  717. bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
  718. }
  719. #ifdef CONFIG_MEMCG
  720. if (unlikely(page->mem_cgroup))
  721. bad_reason = "page still charged to cgroup";
  722. #endif
  723. bad_page(page, bad_reason, bad_flags);
  724. }
  725. static inline int free_pages_check(struct page *page)
  726. {
  727. if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
  728. return 0;
  729. /* Something has gone sideways, find it */
  730. free_pages_check_bad(page);
  731. return 1;
  732. }
  733. /*
  734. * Frees a number of pages from the PCP lists
  735. * Assumes all pages on list are in same zone, and of same order.
  736. * count is the number of pages to free.
  737. *
  738. * If the zone was previously in an "all pages pinned" state then look to
  739. * see if this freeing clears that state.
  740. *
  741. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  742. * pinned" detection logic.
  743. */
  744. static void free_pcppages_bulk(struct zone *zone, int count,
  745. struct per_cpu_pages *pcp)
  746. {
  747. int migratetype = 0;
  748. int batch_free = 0;
  749. unsigned long nr_scanned;
  750. bool isolated_pageblocks;
  751. spin_lock(&zone->lock);
  752. isolated_pageblocks = has_isolate_pageblock(zone);
  753. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  754. if (nr_scanned)
  755. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  756. while (count) {
  757. struct page *page;
  758. struct list_head *list;
  759. /*
  760. * Remove pages from lists in a round-robin fashion. A
  761. * batch_free count is maintained that is incremented when an
  762. * empty list is encountered. This is so more pages are freed
  763. * off fuller lists instead of spinning excessively around empty
  764. * lists
  765. */
  766. do {
  767. batch_free++;
  768. if (++migratetype == MIGRATE_PCPTYPES)
  769. migratetype = 0;
  770. list = &pcp->lists[migratetype];
  771. } while (list_empty(list));
  772. /* This is the only non-empty list. Free them all. */
  773. if (batch_free == MIGRATE_PCPTYPES)
  774. batch_free = count;
  775. do {
  776. int mt; /* migratetype of the to-be-freed page */
  777. page = list_last_entry(list, struct page, lru);
  778. /* must delete as __free_one_page list manipulates */
  779. list_del(&page->lru);
  780. mt = get_pcppage_migratetype(page);
  781. /* MIGRATE_ISOLATE page should not go to pcplists */
  782. VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
  783. /* Pageblock could have been isolated meanwhile */
  784. if (unlikely(isolated_pageblocks))
  785. mt = get_pageblock_migratetype(page);
  786. __free_one_page(page, page_to_pfn(page), zone, 0, mt);
  787. trace_mm_page_pcpu_drain(page, 0, mt);
  788. } while (--count && --batch_free && !list_empty(list));
  789. }
  790. spin_unlock(&zone->lock);
  791. }
  792. static void free_one_page(struct zone *zone,
  793. struct page *page, unsigned long pfn,
  794. unsigned int order,
  795. int migratetype)
  796. {
  797. unsigned long nr_scanned;
  798. spin_lock(&zone->lock);
  799. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  800. if (nr_scanned)
  801. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  802. if (unlikely(has_isolate_pageblock(zone) ||
  803. is_migrate_isolate(migratetype))) {
  804. migratetype = get_pfnblock_migratetype(page, pfn);
  805. }
  806. __free_one_page(page, pfn, zone, order, migratetype);
  807. spin_unlock(&zone->lock);
  808. }
  809. static int free_tail_pages_check(struct page *head_page, struct page *page)
  810. {
  811. int ret = 1;
  812. /*
  813. * We rely page->lru.next never has bit 0 set, unless the page
  814. * is PageTail(). Let's make sure that's true even for poisoned ->lru.
  815. */
  816. BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
  817. if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
  818. ret = 0;
  819. goto out;
  820. }
  821. switch (page - head_page) {
  822. case 1:
  823. /* the first tail page: ->mapping is compound_mapcount() */
  824. if (unlikely(compound_mapcount(page))) {
  825. bad_page(page, "nonzero compound_mapcount", 0);
  826. goto out;
  827. }
  828. break;
  829. case 2:
  830. /*
  831. * the second tail page: ->mapping is
  832. * page_deferred_list().next -- ignore value.
  833. */
  834. break;
  835. default:
  836. if (page->mapping != TAIL_MAPPING) {
  837. bad_page(page, "corrupted mapping in tail page", 0);
  838. goto out;
  839. }
  840. break;
  841. }
  842. if (unlikely(!PageTail(page))) {
  843. bad_page(page, "PageTail not set", 0);
  844. goto out;
  845. }
  846. if (unlikely(compound_head(page) != head_page)) {
  847. bad_page(page, "compound_head not consistent", 0);
  848. goto out;
  849. }
  850. ret = 0;
  851. out:
  852. page->mapping = NULL;
  853. clear_compound_head(page);
  854. return ret;
  855. }
  856. static void __meminit __init_single_page(struct page *page, unsigned long pfn,
  857. unsigned long zone, int nid)
  858. {
  859. set_page_links(page, zone, nid, pfn);
  860. init_page_count(page);
  861. page_mapcount_reset(page);
  862. page_cpupid_reset_last(page);
  863. INIT_LIST_HEAD(&page->lru);
  864. #ifdef WANT_PAGE_VIRTUAL
  865. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  866. if (!is_highmem_idx(zone))
  867. set_page_address(page, __va(pfn << PAGE_SHIFT));
  868. #endif
  869. }
  870. static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
  871. int nid)
  872. {
  873. return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
  874. }
  875. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  876. static void init_reserved_page(unsigned long pfn)
  877. {
  878. pg_data_t *pgdat;
  879. int nid, zid;
  880. if (!early_page_uninitialised(pfn))
  881. return;
  882. nid = early_pfn_to_nid(pfn);
  883. pgdat = NODE_DATA(nid);
  884. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  885. struct zone *zone = &pgdat->node_zones[zid];
  886. if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
  887. break;
  888. }
  889. __init_single_pfn(pfn, zid, nid);
  890. }
  891. #else
  892. static inline void init_reserved_page(unsigned long pfn)
  893. {
  894. }
  895. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  896. /*
  897. * Initialised pages do not have PageReserved set. This function is
  898. * called for each range allocated by the bootmem allocator and
  899. * marks the pages PageReserved. The remaining valid pages are later
  900. * sent to the buddy page allocator.
  901. */
  902. void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
  903. {
  904. unsigned long start_pfn = PFN_DOWN(start);
  905. unsigned long end_pfn = PFN_UP(end);
  906. for (; start_pfn < end_pfn; start_pfn++) {
  907. if (pfn_valid(start_pfn)) {
  908. struct page *page = pfn_to_page(start_pfn);
  909. init_reserved_page(start_pfn);
  910. /* Avoid false-positive PageTail() */
  911. INIT_LIST_HEAD(&page->lru);
  912. SetPageReserved(page);
  913. }
  914. }
  915. }
  916. static bool free_pages_prepare(struct page *page, unsigned int order)
  917. {
  918. int bad = 0;
  919. VM_BUG_ON_PAGE(PageTail(page), page);
  920. trace_mm_page_free(page, order);
  921. kmemcheck_free_shadow(page, order);
  922. kasan_free_pages(page, order);
  923. /*
  924. * Check tail pages before head page information is cleared to
  925. * avoid checking PageCompound for order-0 pages.
  926. */
  927. if (unlikely(order)) {
  928. bool compound = PageCompound(page);
  929. int i;
  930. VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
  931. for (i = 1; i < (1 << order); i++) {
  932. if (compound)
  933. bad += free_tail_pages_check(page, page + i);
  934. if (unlikely(free_pages_check(page + i))) {
  935. bad++;
  936. continue;
  937. }
  938. (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  939. }
  940. }
  941. if (PageAnonHead(page))
  942. page->mapping = NULL;
  943. bad += free_pages_check(page);
  944. if (bad)
  945. return false;
  946. page_cpupid_reset_last(page);
  947. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  948. reset_page_owner(page, order);
  949. if (!PageHighMem(page)) {
  950. debug_check_no_locks_freed(page_address(page),
  951. PAGE_SIZE << order);
  952. debug_check_no_obj_freed(page_address(page),
  953. PAGE_SIZE << order);
  954. }
  955. arch_free_page(page, order);
  956. kernel_poison_pages(page, 1 << order, 0);
  957. kernel_map_pages(page, 1 << order, 0);
  958. return true;
  959. }
  960. static void __free_pages_ok(struct page *page, unsigned int order)
  961. {
  962. unsigned long flags;
  963. int migratetype;
  964. unsigned long pfn = page_to_pfn(page);
  965. if (!free_pages_prepare(page, order))
  966. return;
  967. migratetype = get_pfnblock_migratetype(page, pfn);
  968. local_irq_save(flags);
  969. __count_vm_events(PGFREE, 1 << order);
  970. free_one_page(page_zone(page), page, pfn, order, migratetype);
  971. local_irq_restore(flags);
  972. }
  973. static void __init __free_pages_boot_core(struct page *page, unsigned int order)
  974. {
  975. unsigned int nr_pages = 1 << order;
  976. struct page *p = page;
  977. unsigned int loop;
  978. prefetchw(p);
  979. for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
  980. prefetchw(p + 1);
  981. __ClearPageReserved(p);
  982. set_page_count(p, 0);
  983. }
  984. __ClearPageReserved(p);
  985. set_page_count(p, 0);
  986. page_zone(page)->managed_pages += nr_pages;
  987. set_page_refcounted(page);
  988. __free_pages(page, order);
  989. }
  990. #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
  991. defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  992. static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
  993. int __meminit early_pfn_to_nid(unsigned long pfn)
  994. {
  995. static DEFINE_SPINLOCK(early_pfn_lock);
  996. int nid;
  997. spin_lock(&early_pfn_lock);
  998. nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
  999. if (nid < 0)
  1000. nid = 0;
  1001. spin_unlock(&early_pfn_lock);
  1002. return nid;
  1003. }
  1004. #endif
  1005. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  1006. static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
  1007. struct mminit_pfnnid_cache *state)
  1008. {
  1009. int nid;
  1010. nid = __early_pfn_to_nid(pfn, state);
  1011. if (nid >= 0 && nid != node)
  1012. return false;
  1013. return true;
  1014. }
  1015. /* Only safe to use early in boot when initialisation is single-threaded */
  1016. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  1017. {
  1018. return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
  1019. }
  1020. #else
  1021. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  1022. {
  1023. return true;
  1024. }
  1025. static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
  1026. struct mminit_pfnnid_cache *state)
  1027. {
  1028. return true;
  1029. }
  1030. #endif
  1031. void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
  1032. unsigned int order)
  1033. {
  1034. if (early_page_uninitialised(pfn))
  1035. return;
  1036. return __free_pages_boot_core(page, order);
  1037. }
  1038. /*
  1039. * Check that the whole (or subset of) a pageblock given by the interval of
  1040. * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
  1041. * with the migration of free compaction scanner. The scanners then need to
  1042. * use only pfn_valid_within() check for arches that allow holes within
  1043. * pageblocks.
  1044. *
  1045. * Return struct page pointer of start_pfn, or NULL if checks were not passed.
  1046. *
  1047. * It's possible on some configurations to have a setup like node0 node1 node0
  1048. * i.e. it's possible that all pages within a zones range of pages do not
  1049. * belong to a single zone. We assume that a border between node0 and node1
  1050. * can occur within a single pageblock, but not a node0 node1 node0
  1051. * interleaving within a single pageblock. It is therefore sufficient to check
  1052. * the first and last page of a pageblock and avoid checking each individual
  1053. * page in a pageblock.
  1054. */
  1055. struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
  1056. unsigned long end_pfn, struct zone *zone)
  1057. {
  1058. struct page *start_page;
  1059. struct page *end_page;
  1060. /* end_pfn is one past the range we are checking */
  1061. end_pfn--;
  1062. if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
  1063. return NULL;
  1064. start_page = pfn_to_page(start_pfn);
  1065. if (page_zone(start_page) != zone)
  1066. return NULL;
  1067. end_page = pfn_to_page(end_pfn);
  1068. /* This gives a shorter code than deriving page_zone(end_page) */
  1069. if (page_zone_id(start_page) != page_zone_id(end_page))
  1070. return NULL;
  1071. return start_page;
  1072. }
  1073. void set_zone_contiguous(struct zone *zone)
  1074. {
  1075. unsigned long block_start_pfn = zone->zone_start_pfn;
  1076. unsigned long block_end_pfn;
  1077. block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
  1078. for (; block_start_pfn < zone_end_pfn(zone);
  1079. block_start_pfn = block_end_pfn,
  1080. block_end_pfn += pageblock_nr_pages) {
  1081. block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
  1082. if (!__pageblock_pfn_to_page(block_start_pfn,
  1083. block_end_pfn, zone))
  1084. return;
  1085. }
  1086. /* We confirm that there is no hole */
  1087. zone->contiguous = true;
  1088. }
  1089. void clear_zone_contiguous(struct zone *zone)
  1090. {
  1091. zone->contiguous = false;
  1092. }
  1093. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1094. static void __init deferred_free_range(struct page *page,
  1095. unsigned long pfn, int nr_pages)
  1096. {
  1097. int i;
  1098. if (!page)
  1099. return;
  1100. /* Free a large naturally-aligned chunk if possible */
  1101. if (nr_pages == MAX_ORDER_NR_PAGES &&
  1102. (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
  1103. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1104. __free_pages_boot_core(page, MAX_ORDER-1);
  1105. return;
  1106. }
  1107. for (i = 0; i < nr_pages; i++, page++)
  1108. __free_pages_boot_core(page, 0);
  1109. }
  1110. /* Completion tracking for deferred_init_memmap() threads */
  1111. static atomic_t pgdat_init_n_undone __initdata;
  1112. static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
  1113. static inline void __init pgdat_init_report_one_done(void)
  1114. {
  1115. if (atomic_dec_and_test(&pgdat_init_n_undone))
  1116. complete(&pgdat_init_all_done_comp);
  1117. }
  1118. /* Initialise remaining memory on a node */
  1119. static int __init deferred_init_memmap(void *data)
  1120. {
  1121. pg_data_t *pgdat = data;
  1122. int nid = pgdat->node_id;
  1123. struct mminit_pfnnid_cache nid_init_state = { };
  1124. unsigned long start = jiffies;
  1125. unsigned long nr_pages = 0;
  1126. unsigned long walk_start, walk_end;
  1127. int i, zid;
  1128. struct zone *zone;
  1129. unsigned long first_init_pfn = pgdat->first_deferred_pfn;
  1130. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1131. if (first_init_pfn == ULONG_MAX) {
  1132. pgdat_init_report_one_done();
  1133. return 0;
  1134. }
  1135. /* Bind memory initialisation thread to a local node if possible */
  1136. if (!cpumask_empty(cpumask))
  1137. set_cpus_allowed_ptr(current, cpumask);
  1138. /* Sanity check boundaries */
  1139. BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
  1140. BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
  1141. pgdat->first_deferred_pfn = ULONG_MAX;
  1142. /* Only the highest zone is deferred so find it */
  1143. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1144. zone = pgdat->node_zones + zid;
  1145. if (first_init_pfn < zone_end_pfn(zone))
  1146. break;
  1147. }
  1148. for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
  1149. unsigned long pfn, end_pfn;
  1150. struct page *page = NULL;
  1151. struct page *free_base_page = NULL;
  1152. unsigned long free_base_pfn = 0;
  1153. int nr_to_free = 0;
  1154. end_pfn = min(walk_end, zone_end_pfn(zone));
  1155. pfn = first_init_pfn;
  1156. if (pfn < walk_start)
  1157. pfn = walk_start;
  1158. if (pfn < zone->zone_start_pfn)
  1159. pfn = zone->zone_start_pfn;
  1160. for (; pfn < end_pfn; pfn++) {
  1161. if (!pfn_valid_within(pfn))
  1162. goto free_range;
  1163. /*
  1164. * Ensure pfn_valid is checked every
  1165. * MAX_ORDER_NR_PAGES for memory holes
  1166. */
  1167. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
  1168. if (!pfn_valid(pfn)) {
  1169. page = NULL;
  1170. goto free_range;
  1171. }
  1172. }
  1173. if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
  1174. page = NULL;
  1175. goto free_range;
  1176. }
  1177. /* Minimise pfn page lookups and scheduler checks */
  1178. if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
  1179. page++;
  1180. } else {
  1181. nr_pages += nr_to_free;
  1182. deferred_free_range(free_base_page,
  1183. free_base_pfn, nr_to_free);
  1184. free_base_page = NULL;
  1185. free_base_pfn = nr_to_free = 0;
  1186. page = pfn_to_page(pfn);
  1187. cond_resched();
  1188. }
  1189. if (page->flags) {
  1190. VM_BUG_ON(page_zone(page) != zone);
  1191. goto free_range;
  1192. }
  1193. __init_single_page(page, pfn, zid, nid);
  1194. if (!free_base_page) {
  1195. free_base_page = page;
  1196. free_base_pfn = pfn;
  1197. nr_to_free = 0;
  1198. }
  1199. nr_to_free++;
  1200. /* Where possible, batch up pages for a single free */
  1201. continue;
  1202. free_range:
  1203. /* Free the current block of pages to allocator */
  1204. nr_pages += nr_to_free;
  1205. deferred_free_range(free_base_page, free_base_pfn,
  1206. nr_to_free);
  1207. free_base_page = NULL;
  1208. free_base_pfn = nr_to_free = 0;
  1209. }
  1210. first_init_pfn = max(end_pfn, first_init_pfn);
  1211. }
  1212. /* Sanity check that the next zone really is unpopulated */
  1213. WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
  1214. pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
  1215. jiffies_to_msecs(jiffies - start));
  1216. pgdat_init_report_one_done();
  1217. return 0;
  1218. }
  1219. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  1220. void __init page_alloc_init_late(void)
  1221. {
  1222. struct zone *zone;
  1223. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1224. int nid;
  1225. /* There will be num_node_state(N_MEMORY) threads */
  1226. atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
  1227. for_each_node_state(nid, N_MEMORY) {
  1228. kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
  1229. }
  1230. /* Block until all are initialised */
  1231. wait_for_completion(&pgdat_init_all_done_comp);
  1232. /* Reinit limits that are based on free pages after the kernel is up */
  1233. files_maxfiles_init();
  1234. #endif
  1235. for_each_populated_zone(zone)
  1236. set_zone_contiguous(zone);
  1237. }
  1238. #ifdef CONFIG_CMA
  1239. /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
  1240. void __init init_cma_reserved_pageblock(struct page *page)
  1241. {
  1242. unsigned i = pageblock_nr_pages;
  1243. struct page *p = page;
  1244. do {
  1245. __ClearPageReserved(p);
  1246. set_page_count(p, 0);
  1247. } while (++p, --i);
  1248. set_pageblock_migratetype(page, MIGRATE_CMA);
  1249. if (pageblock_order >= MAX_ORDER) {
  1250. i = pageblock_nr_pages;
  1251. p = page;
  1252. do {
  1253. set_page_refcounted(p);
  1254. __free_pages(p, MAX_ORDER - 1);
  1255. p += MAX_ORDER_NR_PAGES;
  1256. } while (i -= MAX_ORDER_NR_PAGES);
  1257. } else {
  1258. set_page_refcounted(page);
  1259. __free_pages(page, pageblock_order);
  1260. }
  1261. adjust_managed_page_count(page, pageblock_nr_pages);
  1262. }
  1263. #endif
  1264. /*
  1265. * The order of subdivision here is critical for the IO subsystem.
  1266. * Please do not alter this order without good reasons and regression
  1267. * testing. Specifically, as large blocks of memory are subdivided,
  1268. * the order in which smaller blocks are delivered depends on the order
  1269. * they're subdivided in this function. This is the primary factor
  1270. * influencing the order in which pages are delivered to the IO
  1271. * subsystem according to empirical testing, and this is also justified
  1272. * by considering the behavior of a buddy system containing a single
  1273. * large block of memory acted on by a series of small allocations.
  1274. * This behavior is a critical factor in sglist merging's success.
  1275. *
  1276. * -- nyc
  1277. */
  1278. static inline void expand(struct zone *zone, struct page *page,
  1279. int low, int high, struct free_area *area,
  1280. int migratetype)
  1281. {
  1282. unsigned long size = 1 << high;
  1283. while (high > low) {
  1284. area--;
  1285. high--;
  1286. size >>= 1;
  1287. VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  1288. if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
  1289. debug_guardpage_enabled() &&
  1290. high < debug_guardpage_minorder()) {
  1291. /*
  1292. * Mark as guard pages (or page), that will allow to
  1293. * merge back to allocator when buddy will be freed.
  1294. * Corresponding page table entries will not be touched,
  1295. * pages will stay not present in virtual address space
  1296. */
  1297. set_page_guard(zone, &page[size], high, migratetype);
  1298. continue;
  1299. }
  1300. list_add(&page[size].lru, &area->free_list[migratetype]);
  1301. area->nr_free++;
  1302. set_page_order(&page[size], high);
  1303. }
  1304. }
  1305. /*
  1306. * This page is about to be returned from the page allocator
  1307. */
  1308. static inline int check_new_page(struct page *page)
  1309. {
  1310. const char *bad_reason;
  1311. unsigned long bad_flags;
  1312. if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))
  1313. return 0;
  1314. bad_reason = NULL;
  1315. bad_flags = 0;
  1316. if (unlikely(atomic_read(&page->_mapcount) != -1))
  1317. bad_reason = "nonzero mapcount";
  1318. if (unlikely(page->mapping != NULL))
  1319. bad_reason = "non-NULL mapping";
  1320. if (unlikely(page_ref_count(page) != 0))
  1321. bad_reason = "nonzero _count";
  1322. if (unlikely(page->flags & __PG_HWPOISON)) {
  1323. bad_reason = "HWPoisoned (hardware-corrupted)";
  1324. bad_flags = __PG_HWPOISON;
  1325. }
  1326. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
  1327. bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
  1328. bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
  1329. }
  1330. #ifdef CONFIG_MEMCG
  1331. if (unlikely(page->mem_cgroup))
  1332. bad_reason = "page still charged to cgroup";
  1333. #endif
  1334. if (unlikely(bad_reason)) {
  1335. bad_page(page, bad_reason, bad_flags);
  1336. return 1;
  1337. }
  1338. return 0;
  1339. }
  1340. static inline bool free_pages_prezeroed(bool poisoned)
  1341. {
  1342. return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
  1343. page_poisoning_enabled() && poisoned;
  1344. }
  1345. static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
  1346. unsigned int alloc_flags)
  1347. {
  1348. int i;
  1349. bool poisoned = true;
  1350. for (i = 0; i < (1 << order); i++) {
  1351. struct page *p = page + i;
  1352. if (unlikely(check_new_page(p)))
  1353. return 1;
  1354. if (poisoned)
  1355. poisoned &= page_is_poisoned(p);
  1356. }
  1357. set_page_private(page, 0);
  1358. set_page_refcounted(page);
  1359. arch_alloc_page(page, order);
  1360. kernel_map_pages(page, 1 << order, 1);
  1361. kernel_poison_pages(page, 1 << order, 1);
  1362. kasan_alloc_pages(page, order);
  1363. if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
  1364. for (i = 0; i < (1 << order); i++)
  1365. clear_highpage(page + i);
  1366. if (order && (gfp_flags & __GFP_COMP))
  1367. prep_compound_page(page, order);
  1368. set_page_owner(page, order, gfp_flags);
  1369. /*
  1370. * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
  1371. * allocate the page. The expectation is that the caller is taking
  1372. * steps that will free more memory. The caller should avoid the page
  1373. * being used for !PFMEMALLOC purposes.
  1374. */
  1375. if (alloc_flags & ALLOC_NO_WATERMARKS)
  1376. set_page_pfmemalloc(page);
  1377. else
  1378. clear_page_pfmemalloc(page);
  1379. return 0;
  1380. }
  1381. /*
  1382. * Go through the free lists for the given migratetype and remove
  1383. * the smallest available page from the freelists
  1384. */
  1385. static inline
  1386. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  1387. int migratetype)
  1388. {
  1389. unsigned int current_order;
  1390. struct free_area *area;
  1391. struct page *page;
  1392. /* Find a page of the appropriate size in the preferred list */
  1393. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  1394. area = &(zone->free_area[current_order]);
  1395. page = list_first_entry_or_null(&area->free_list[migratetype],
  1396. struct page, lru);
  1397. if (!page)
  1398. continue;
  1399. list_del(&page->lru);
  1400. rmv_page_order(page);
  1401. area->nr_free--;
  1402. expand(zone, page, order, current_order, area, migratetype);
  1403. set_pcppage_migratetype(page, migratetype);
  1404. return page;
  1405. }
  1406. return NULL;
  1407. }
  1408. /*
  1409. * This array describes the order lists are fallen back to when
  1410. * the free lists for the desirable migrate type are depleted
  1411. */
  1412. static int fallbacks[MIGRATE_TYPES][4] = {
  1413. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1414. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1415. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
  1416. #ifdef CONFIG_CMA
  1417. [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
  1418. #endif
  1419. #ifdef CONFIG_MEMORY_ISOLATION
  1420. [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
  1421. #endif
  1422. };
  1423. #ifdef CONFIG_CMA
  1424. static struct page *__rmqueue_cma_fallback(struct zone *zone,
  1425. unsigned int order)
  1426. {
  1427. return __rmqueue_smallest(zone, order, MIGRATE_CMA);
  1428. }
  1429. #else
  1430. static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  1431. unsigned int order) { return NULL; }
  1432. #endif
  1433. /*
  1434. * Move the free pages in a range to the free lists of the requested type.
  1435. * Note that start_page and end_pages are not aligned on a pageblock
  1436. * boundary. If alignment is required, use move_freepages_block()
  1437. */
  1438. int move_freepages(struct zone *zone,
  1439. struct page *start_page, struct page *end_page,
  1440. int migratetype)
  1441. {
  1442. struct page *page;
  1443. unsigned int order;
  1444. int pages_moved = 0;
  1445. #ifndef CONFIG_HOLES_IN_ZONE
  1446. /*
  1447. * page_zone is not safe to call in this context when
  1448. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  1449. * anyway as we check zone boundaries in move_freepages_block().
  1450. * Remove at a later date when no bug reports exist related to
  1451. * grouping pages by mobility
  1452. */
  1453. VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
  1454. #endif
  1455. for (page = start_page; page <= end_page;) {
  1456. /* Make sure we are not inadvertently changing nodes */
  1457. VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
  1458. if (!pfn_valid_within(page_to_pfn(page))) {
  1459. page++;
  1460. continue;
  1461. }
  1462. if (!PageBuddy(page)) {
  1463. page++;
  1464. continue;
  1465. }
  1466. order = page_order(page);
  1467. list_move(&page->lru,
  1468. &zone->free_area[order].free_list[migratetype]);
  1469. page += 1 << order;
  1470. pages_moved += 1 << order;
  1471. }
  1472. return pages_moved;
  1473. }
  1474. int move_freepages_block(struct zone *zone, struct page *page,
  1475. int migratetype)
  1476. {
  1477. unsigned long start_pfn, end_pfn;
  1478. struct page *start_page, *end_page;
  1479. start_pfn = page_to_pfn(page);
  1480. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  1481. start_page = pfn_to_page(start_pfn);
  1482. end_page = start_page + pageblock_nr_pages - 1;
  1483. end_pfn = start_pfn + pageblock_nr_pages - 1;
  1484. /* Do not cross zone boundaries */
  1485. if (!zone_spans_pfn(zone, start_pfn))
  1486. start_page = page;
  1487. if (!zone_spans_pfn(zone, end_pfn))
  1488. return 0;
  1489. return move_freepages(zone, start_page, end_page, migratetype);
  1490. }
  1491. static void change_pageblock_range(struct page *pageblock_page,
  1492. int start_order, int migratetype)
  1493. {
  1494. int nr_pageblocks = 1 << (start_order - pageblock_order);
  1495. while (nr_pageblocks--) {
  1496. set_pageblock_migratetype(pageblock_page, migratetype);
  1497. pageblock_page += pageblock_nr_pages;
  1498. }
  1499. }
  1500. /*
  1501. * When we are falling back to another migratetype during allocation, try to
  1502. * steal extra free pages from the same pageblocks to satisfy further
  1503. * allocations, instead of polluting multiple pageblocks.
  1504. *
  1505. * If we are stealing a relatively large buddy page, it is likely there will
  1506. * be more free pages in the pageblock, so try to steal them all. For
  1507. * reclaimable and unmovable allocations, we steal regardless of page size,
  1508. * as fragmentation caused by those allocations polluting movable pageblocks
  1509. * is worse than movable allocations stealing from unmovable and reclaimable
  1510. * pageblocks.
  1511. */
  1512. static bool can_steal_fallback(unsigned int order, int start_mt)
  1513. {
  1514. /*
  1515. * Leaving this order check is intended, although there is
  1516. * relaxed order check in next check. The reason is that
  1517. * we can actually steal whole pageblock if this condition met,
  1518. * but, below check doesn't guarantee it and that is just heuristic
  1519. * so could be changed anytime.
  1520. */
  1521. if (order >= pageblock_order)
  1522. return true;
  1523. if (order >= pageblock_order / 2 ||
  1524. start_mt == MIGRATE_RECLAIMABLE ||
  1525. start_mt == MIGRATE_UNMOVABLE ||
  1526. page_group_by_mobility_disabled)
  1527. return true;
  1528. return false;
  1529. }
  1530. /*
  1531. * This function implements actual steal behaviour. If order is large enough,
  1532. * we can steal whole pageblock. If not, we first move freepages in this
  1533. * pageblock and check whether half of pages are moved or not. If half of
  1534. * pages are moved, we can change migratetype of pageblock and permanently
  1535. * use it's pages as requested migratetype in the future.
  1536. */
  1537. static void steal_suitable_fallback(struct zone *zone, struct page *page,
  1538. int start_type)
  1539. {
  1540. unsigned int current_order = page_order(page);
  1541. int pages;
  1542. /* Take ownership for orders >= pageblock_order */
  1543. if (current_order >= pageblock_order) {
  1544. change_pageblock_range(page, current_order, start_type);
  1545. return;
  1546. }
  1547. pages = move_freepages_block(zone, page, start_type);
  1548. /* Claim the whole block if over half of it is free */
  1549. if (pages >= (1 << (pageblock_order-1)) ||
  1550. page_group_by_mobility_disabled)
  1551. set_pageblock_migratetype(page, start_type);
  1552. }
  1553. /*
  1554. * Check whether there is a suitable fallback freepage with requested order.
  1555. * If only_stealable is true, this function returns fallback_mt only if
  1556. * we can steal other freepages all together. This would help to reduce
  1557. * fragmentation due to mixed migratetype pages in one pageblock.
  1558. */
  1559. int find_suitable_fallback(struct free_area *area, unsigned int order,
  1560. int migratetype, bool only_stealable, bool *can_steal)
  1561. {
  1562. int i;
  1563. int fallback_mt;
  1564. if (area->nr_free == 0)
  1565. return -1;
  1566. *can_steal = false;
  1567. for (i = 0;; i++) {
  1568. fallback_mt = fallbacks[migratetype][i];
  1569. if (fallback_mt == MIGRATE_TYPES)
  1570. break;
  1571. if (list_empty(&area->free_list[fallback_mt]))
  1572. continue;
  1573. if (can_steal_fallback(order, migratetype))
  1574. *can_steal = true;
  1575. if (!only_stealable)
  1576. return fallback_mt;
  1577. if (*can_steal)
  1578. return fallback_mt;
  1579. }
  1580. return -1;
  1581. }
  1582. /*
  1583. * Reserve a pageblock for exclusive use of high-order atomic allocations if
  1584. * there are no empty page blocks that contain a page with a suitable order
  1585. */
  1586. static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
  1587. unsigned int alloc_order)
  1588. {
  1589. int mt;
  1590. unsigned long max_managed, flags;
  1591. /*
  1592. * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
  1593. * Check is race-prone but harmless.
  1594. */
  1595. max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
  1596. if (zone->nr_reserved_highatomic >= max_managed)
  1597. return;
  1598. spin_lock_irqsave(&zone->lock, flags);
  1599. /* Recheck the nr_reserved_highatomic limit under the lock */
  1600. if (zone->nr_reserved_highatomic >= max_managed)
  1601. goto out_unlock;
  1602. /* Yoink! */
  1603. mt = get_pageblock_migratetype(page);
  1604. if (mt != MIGRATE_HIGHATOMIC &&
  1605. !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
  1606. zone->nr_reserved_highatomic += pageblock_nr_pages;
  1607. set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
  1608. move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
  1609. }
  1610. out_unlock:
  1611. spin_unlock_irqrestore(&zone->lock, flags);
  1612. }
  1613. /*
  1614. * Used when an allocation is about to fail under memory pressure. This
  1615. * potentially hurts the reliability of high-order allocations when under
  1616. * intense memory pressure but failed atomic allocations should be easier
  1617. * to recover from than an OOM.
  1618. */
  1619. static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
  1620. {
  1621. struct zonelist *zonelist = ac->zonelist;
  1622. unsigned long flags;
  1623. struct zoneref *z;
  1624. struct zone *zone;
  1625. struct page *page;
  1626. int order;
  1627. for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
  1628. ac->nodemask) {
  1629. /* Preserve at least one pageblock */
  1630. if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
  1631. continue;
  1632. spin_lock_irqsave(&zone->lock, flags);
  1633. for (order = 0; order < MAX_ORDER; order++) {
  1634. struct free_area *area = &(zone->free_area[order]);
  1635. page = list_first_entry_or_null(
  1636. &area->free_list[MIGRATE_HIGHATOMIC],
  1637. struct page, lru);
  1638. if (!page)
  1639. continue;
  1640. /*
  1641. * It should never happen but changes to locking could
  1642. * inadvertently allow a per-cpu drain to add pages
  1643. * to MIGRATE_HIGHATOMIC while unreserving so be safe
  1644. * and watch for underflows.
  1645. */
  1646. zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
  1647. zone->nr_reserved_highatomic);
  1648. /*
  1649. * Convert to ac->migratetype and avoid the normal
  1650. * pageblock stealing heuristics. Minimally, the caller
  1651. * is doing the work and needs the pages. More
  1652. * importantly, if the block was always converted to
  1653. * MIGRATE_UNMOVABLE or another type then the number
  1654. * of pageblocks that cannot be completely freed
  1655. * may increase.
  1656. */
  1657. set_pageblock_migratetype(page, ac->migratetype);
  1658. move_freepages_block(zone, page, ac->migratetype);
  1659. spin_unlock_irqrestore(&zone->lock, flags);
  1660. return;
  1661. }
  1662. spin_unlock_irqrestore(&zone->lock, flags);
  1663. }
  1664. }
  1665. /* Remove an element from the buddy allocator from the fallback list */
  1666. static inline struct page *
  1667. __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
  1668. {
  1669. struct free_area *area;
  1670. unsigned int current_order;
  1671. struct page *page;
  1672. int fallback_mt;
  1673. bool can_steal;
  1674. /* Find the largest possible block of pages in the other list */
  1675. for (current_order = MAX_ORDER-1;
  1676. current_order >= order && current_order <= MAX_ORDER-1;
  1677. --current_order) {
  1678. area = &(zone->free_area[current_order]);
  1679. fallback_mt = find_suitable_fallback(area, current_order,
  1680. start_migratetype, false, &can_steal);
  1681. if (fallback_mt == -1)
  1682. continue;
  1683. page = list_first_entry(&area->free_list[fallback_mt],
  1684. struct page, lru);
  1685. if (can_steal)
  1686. steal_suitable_fallback(zone, page, start_migratetype);
  1687. /* Remove the page from the freelists */
  1688. area->nr_free--;
  1689. list_del(&page->lru);
  1690. rmv_page_order(page);
  1691. expand(zone, page, order, current_order, area,
  1692. start_migratetype);
  1693. /*
  1694. * The pcppage_migratetype may differ from pageblock's
  1695. * migratetype depending on the decisions in
  1696. * find_suitable_fallback(). This is OK as long as it does not
  1697. * differ for MIGRATE_CMA pageblocks. Those can be used as
  1698. * fallback only via special __rmqueue_cma_fallback() function
  1699. */
  1700. set_pcppage_migratetype(page, start_migratetype);
  1701. trace_mm_page_alloc_extfrag(page, order, current_order,
  1702. start_migratetype, fallback_mt);
  1703. return page;
  1704. }
  1705. return NULL;
  1706. }
  1707. /*
  1708. * Do the hard work of removing an element from the buddy allocator.
  1709. * Call me with the zone->lock already held.
  1710. */
  1711. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  1712. int migratetype)
  1713. {
  1714. struct page *page;
  1715. page = __rmqueue_smallest(zone, order, migratetype);
  1716. if (unlikely(!page)) {
  1717. if (migratetype == MIGRATE_MOVABLE)
  1718. page = __rmqueue_cma_fallback(zone, order);
  1719. if (!page)
  1720. page = __rmqueue_fallback(zone, order, migratetype);
  1721. }
  1722. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  1723. return page;
  1724. }
  1725. /*
  1726. * Obtain a specified number of elements from the buddy allocator, all under
  1727. * a single hold of the lock, for efficiency. Add them to the supplied list.
  1728. * Returns the number of new pages which were placed at *list.
  1729. */
  1730. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  1731. unsigned long count, struct list_head *list,
  1732. int migratetype, bool cold)
  1733. {
  1734. int i;
  1735. spin_lock(&zone->lock);
  1736. for (i = 0; i < count; ++i) {
  1737. struct page *page = __rmqueue(zone, order, migratetype);
  1738. if (unlikely(page == NULL))
  1739. break;
  1740. /*
  1741. * Split buddy pages returned by expand() are received here
  1742. * in physical page order. The page is added to the callers and
  1743. * list and the list head then moves forward. From the callers
  1744. * perspective, the linked list is ordered by page number in
  1745. * some conditions. This is useful for IO devices that can
  1746. * merge IO requests if the physical pages are ordered
  1747. * properly.
  1748. */
  1749. if (likely(!cold))
  1750. list_add(&page->lru, list);
  1751. else
  1752. list_add_tail(&page->lru, list);
  1753. list = &page->lru;
  1754. if (is_migrate_cma(get_pcppage_migratetype(page)))
  1755. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
  1756. -(1 << order));
  1757. }
  1758. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  1759. spin_unlock(&zone->lock);
  1760. return i;
  1761. }
  1762. #ifdef CONFIG_NUMA
  1763. /*
  1764. * Called from the vmstat counter updater to drain pagesets of this
  1765. * currently executing processor on remote nodes after they have
  1766. * expired.
  1767. *
  1768. * Note that this function must be called with the thread pinned to
  1769. * a single processor.
  1770. */
  1771. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  1772. {
  1773. unsigned long flags;
  1774. int to_drain, batch;
  1775. local_irq_save(flags);
  1776. batch = READ_ONCE(pcp->batch);
  1777. to_drain = min(pcp->count, batch);
  1778. if (to_drain > 0) {
  1779. free_pcppages_bulk(zone, to_drain, pcp);
  1780. pcp->count -= to_drain;
  1781. }
  1782. local_irq_restore(flags);
  1783. }
  1784. #endif
  1785. /*
  1786. * Drain pcplists of the indicated processor and zone.
  1787. *
  1788. * The processor must either be the current processor and the
  1789. * thread pinned to the current processor or a processor that
  1790. * is not online.
  1791. */
  1792. static void drain_pages_zone(unsigned int cpu, struct zone *zone)
  1793. {
  1794. unsigned long flags;
  1795. struct per_cpu_pageset *pset;
  1796. struct per_cpu_pages *pcp;
  1797. local_irq_save(flags);
  1798. pset = per_cpu_ptr(zone->pageset, cpu);
  1799. pcp = &pset->pcp;
  1800. if (pcp->count) {
  1801. free_pcppages_bulk(zone, pcp->count, pcp);
  1802. pcp->count = 0;
  1803. }
  1804. local_irq_restore(flags);
  1805. }
  1806. /*
  1807. * Drain pcplists of all zones on the indicated processor.
  1808. *
  1809. * The processor must either be the current processor and the
  1810. * thread pinned to the current processor or a processor that
  1811. * is not online.
  1812. */
  1813. static void drain_pages(unsigned int cpu)
  1814. {
  1815. struct zone *zone;
  1816. for_each_populated_zone(zone) {
  1817. drain_pages_zone(cpu, zone);
  1818. }
  1819. }
  1820. /*
  1821. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  1822. *
  1823. * The CPU has to be pinned. When zone parameter is non-NULL, spill just
  1824. * the single zone's pages.
  1825. */
  1826. void drain_local_pages(struct zone *zone)
  1827. {
  1828. int cpu = smp_processor_id();
  1829. if (zone)
  1830. drain_pages_zone(cpu, zone);
  1831. else
  1832. drain_pages(cpu);
  1833. }
  1834. /*
  1835. * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  1836. *
  1837. * When zone parameter is non-NULL, spill just the single zone's pages.
  1838. *
  1839. * Note that this code is protected against sending an IPI to an offline
  1840. * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
  1841. * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
  1842. * nothing keeps CPUs from showing up after we populated the cpumask and
  1843. * before the call to on_each_cpu_mask().
  1844. */
  1845. void drain_all_pages(struct zone *zone)
  1846. {
  1847. int cpu;
  1848. /*
  1849. * Allocate in the BSS so we wont require allocation in
  1850. * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
  1851. */
  1852. static cpumask_t cpus_with_pcps;
  1853. /*
  1854. * We don't care about racing with CPU hotplug event
  1855. * as offline notification will cause the notified
  1856. * cpu to drain that CPU pcps and on_each_cpu_mask
  1857. * disables preemption as part of its processing
  1858. */
  1859. for_each_online_cpu(cpu) {
  1860. struct per_cpu_pageset *pcp;
  1861. struct zone *z;
  1862. bool has_pcps = false;
  1863. if (zone) {
  1864. pcp = per_cpu_ptr(zone->pageset, cpu);
  1865. if (pcp->pcp.count)
  1866. has_pcps = true;
  1867. } else {
  1868. for_each_populated_zone(z) {
  1869. pcp = per_cpu_ptr(z->pageset, cpu);
  1870. if (pcp->pcp.count) {
  1871. has_pcps = true;
  1872. break;
  1873. }
  1874. }
  1875. }
  1876. if (has_pcps)
  1877. cpumask_set_cpu(cpu, &cpus_with_pcps);
  1878. else
  1879. cpumask_clear_cpu(cpu, &cpus_with_pcps);
  1880. }
  1881. on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
  1882. zone, 1);
  1883. }
  1884. #ifdef CONFIG_HIBERNATION
  1885. void mark_free_pages(struct zone *zone)
  1886. {
  1887. unsigned long pfn, max_zone_pfn;
  1888. unsigned long flags;
  1889. unsigned int order, t;
  1890. struct page *page;
  1891. if (zone_is_empty(zone))
  1892. return;
  1893. spin_lock_irqsave(&zone->lock, flags);
  1894. max_zone_pfn = zone_end_pfn(zone);
  1895. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  1896. if (pfn_valid(pfn)) {
  1897. page = pfn_to_page(pfn);
  1898. if (page_zone(page) != zone)
  1899. continue;
  1900. if (!swsusp_page_is_forbidden(page))
  1901. swsusp_unset_page_free(page);
  1902. }
  1903. for_each_migratetype_order(order, t) {
  1904. list_for_each_entry(page,
  1905. &zone->free_area[order].free_list[t], lru) {
  1906. unsigned long i;
  1907. pfn = page_to_pfn(page);
  1908. for (i = 0; i < (1UL << order); i++)
  1909. swsusp_set_page_free(pfn_to_page(pfn + i));
  1910. }
  1911. }
  1912. spin_unlock_irqrestore(&zone->lock, flags);
  1913. }
  1914. #endif /* CONFIG_PM */
  1915. /*
  1916. * Free a 0-order page
  1917. * cold == true ? free a cold page : free a hot page
  1918. */
  1919. void free_hot_cold_page(struct page *page, bool cold)
  1920. {
  1921. struct zone *zone = page_zone(page);
  1922. struct per_cpu_pages *pcp;
  1923. unsigned long flags;
  1924. unsigned long pfn = page_to_pfn(page);
  1925. int migratetype;
  1926. if (!free_pages_prepare(page, 0))
  1927. return;
  1928. migratetype = get_pfnblock_migratetype(page, pfn);
  1929. set_pcppage_migratetype(page, migratetype);
  1930. local_irq_save(flags);
  1931. __count_vm_event(PGFREE);
  1932. /*
  1933. * We only track unmovable, reclaimable and movable on pcp lists.
  1934. * Free ISOLATE pages back to the allocator because they are being
  1935. * offlined but treat RESERVE as movable pages so we can get those
  1936. * areas back if necessary. Otherwise, we may have to free
  1937. * excessively into the page allocator
  1938. */
  1939. if (migratetype >= MIGRATE_PCPTYPES) {
  1940. if (unlikely(is_migrate_isolate(migratetype))) {
  1941. free_one_page(zone, page, pfn, 0, migratetype);
  1942. goto out;
  1943. }
  1944. migratetype = MIGRATE_MOVABLE;
  1945. }
  1946. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1947. if (!cold)
  1948. list_add(&page->lru, &pcp->lists[migratetype]);
  1949. else
  1950. list_add_tail(&page->lru, &pcp->lists[migratetype]);
  1951. pcp->count++;
  1952. if (pcp->count >= pcp->high) {
  1953. unsigned long batch = READ_ONCE(pcp->batch);
  1954. free_pcppages_bulk(zone, batch, pcp);
  1955. pcp->count -= batch;
  1956. }
  1957. out:
  1958. local_irq_restore(flags);
  1959. }
  1960. /*
  1961. * Free a list of 0-order pages
  1962. */
  1963. void free_hot_cold_page_list(struct list_head *list, bool cold)
  1964. {
  1965. struct page *page, *next;
  1966. list_for_each_entry_safe(page, next, list, lru) {
  1967. trace_mm_page_free_batched(page, cold);
  1968. free_hot_cold_page(page, cold);
  1969. }
  1970. }
  1971. /*
  1972. * split_page takes a non-compound higher-order page, and splits it into
  1973. * n (1<<order) sub-pages: page[0..n]
  1974. * Each sub-page must be freed individually.
  1975. *
  1976. * Note: this is probably too low level an operation for use in drivers.
  1977. * Please consult with lkml before using this in your driver.
  1978. */
  1979. void split_page(struct page *page, unsigned int order)
  1980. {
  1981. int i;
  1982. gfp_t gfp_mask;
  1983. VM_BUG_ON_PAGE(PageCompound(page), page);
  1984. VM_BUG_ON_PAGE(!page_count(page), page);
  1985. #ifdef CONFIG_KMEMCHECK
  1986. /*
  1987. * Split shadow pages too, because free(page[0]) would
  1988. * otherwise free the whole shadow.
  1989. */
  1990. if (kmemcheck_page_is_tracked(page))
  1991. split_page(virt_to_page(page[0].shadow), order);
  1992. #endif
  1993. gfp_mask = get_page_owner_gfp(page);
  1994. set_page_owner(page, 0, gfp_mask);
  1995. for (i = 1; i < (1 << order); i++) {
  1996. set_page_refcounted(page + i);
  1997. set_page_owner(page + i, 0, gfp_mask);
  1998. }
  1999. }
  2000. EXPORT_SYMBOL_GPL(split_page);
  2001. int __isolate_free_page(struct page *page, unsigned int order)
  2002. {
  2003. unsigned long watermark;
  2004. struct zone *zone;
  2005. int mt;
  2006. BUG_ON(!PageBuddy(page));
  2007. zone = page_zone(page);
  2008. mt = get_pageblock_migratetype(page);
  2009. if (!is_migrate_isolate(mt)) {
  2010. /* Obey watermarks as if the page was being allocated */
  2011. watermark = low_wmark_pages(zone) + (1 << order);
  2012. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  2013. return 0;
  2014. __mod_zone_freepage_state(zone, -(1UL << order), mt);
  2015. }
  2016. /* Remove page from free list */
  2017. list_del(&page->lru);
  2018. zone->free_area[order].nr_free--;
  2019. rmv_page_order(page);
  2020. set_page_owner(page, order, __GFP_MOVABLE);
  2021. /* Set the pageblock if the isolated page is at least a pageblock */
  2022. if (order >= pageblock_order - 1) {
  2023. struct page *endpage = page + (1 << order) - 1;
  2024. for (; page < endpage; page += pageblock_nr_pages) {
  2025. int mt = get_pageblock_migratetype(page);
  2026. if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
  2027. set_pageblock_migratetype(page,
  2028. MIGRATE_MOVABLE);
  2029. }
  2030. }
  2031. return 1UL << order;
  2032. }
  2033. /*
  2034. * Similar to split_page except the page is already free. As this is only
  2035. * being used for migration, the migratetype of the block also changes.
  2036. * As this is called with interrupts disabled, the caller is responsible
  2037. * for calling arch_alloc_page() and kernel_map_page() after interrupts
  2038. * are enabled.
  2039. *
  2040. * Note: this is probably too low level an operation for use in drivers.
  2041. * Please consult with lkml before using this in your driver.
  2042. */
  2043. int split_free_page(struct page *page)
  2044. {
  2045. unsigned int order;
  2046. int nr_pages;
  2047. order = page_order(page);
  2048. nr_pages = __isolate_free_page(page, order);
  2049. if (!nr_pages)
  2050. return 0;
  2051. /* Split into individual pages */
  2052. set_page_refcounted(page);
  2053. split_page(page, order);
  2054. return nr_pages;
  2055. }
  2056. /*
  2057. * Update NUMA hit/miss statistics
  2058. *
  2059. * Must be called with interrupts disabled.
  2060. *
  2061. * When __GFP_OTHER_NODE is set assume the node of the preferred
  2062. * zone is the local node. This is useful for daemons who allocate
  2063. * memory on behalf of other processes.
  2064. */
  2065. static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
  2066. gfp_t flags)
  2067. {
  2068. #ifdef CONFIG_NUMA
  2069. int local_nid = numa_node_id();
  2070. enum zone_stat_item local_stat = NUMA_LOCAL;
  2071. if (unlikely(flags & __GFP_OTHER_NODE)) {
  2072. local_stat = NUMA_OTHER;
  2073. local_nid = preferred_zone->node;
  2074. }
  2075. if (z->node == local_nid) {
  2076. __inc_zone_state(z, NUMA_HIT);
  2077. __inc_zone_state(z, local_stat);
  2078. } else {
  2079. __inc_zone_state(z, NUMA_MISS);
  2080. __inc_zone_state(preferred_zone, NUMA_FOREIGN);
  2081. }
  2082. #endif
  2083. }
  2084. /*
  2085. * Allocate a page from the given zone. Use pcplists for order-0 allocations.
  2086. */
  2087. static inline
  2088. struct page *buffered_rmqueue(struct zone *preferred_zone,
  2089. struct zone *zone, unsigned int order,
  2090. gfp_t gfp_flags, unsigned int alloc_flags,
  2091. int migratetype)
  2092. {
  2093. unsigned long flags;
  2094. struct page *page;
  2095. bool cold = ((gfp_flags & __GFP_COLD) != 0);
  2096. if (likely(order == 0)) {
  2097. struct per_cpu_pages *pcp;
  2098. struct list_head *list;
  2099. local_irq_save(flags);
  2100. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  2101. list = &pcp->lists[migratetype];
  2102. if (list_empty(list)) {
  2103. pcp->count += rmqueue_bulk(zone, 0,
  2104. pcp->batch, list,
  2105. migratetype, cold);
  2106. if (unlikely(list_empty(list)))
  2107. goto failed;
  2108. }
  2109. if (cold)
  2110. page = list_last_entry(list, struct page, lru);
  2111. else
  2112. page = list_first_entry(list, struct page, lru);
  2113. __dec_zone_state(zone, NR_ALLOC_BATCH);
  2114. list_del(&page->lru);
  2115. pcp->count--;
  2116. } else {
  2117. /*
  2118. * We most definitely don't want callers attempting to
  2119. * allocate greater than order-1 page units with __GFP_NOFAIL.
  2120. */
  2121. WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
  2122. spin_lock_irqsave(&zone->lock, flags);
  2123. page = NULL;
  2124. if (alloc_flags & ALLOC_HARDER) {
  2125. page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
  2126. if (page)
  2127. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  2128. }
  2129. if (!page)
  2130. page = __rmqueue(zone, order, migratetype);
  2131. spin_unlock(&zone->lock);
  2132. if (!page)
  2133. goto failed;
  2134. __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
  2135. __mod_zone_freepage_state(zone, -(1 << order),
  2136. get_pcppage_migratetype(page));
  2137. }
  2138. if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
  2139. !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
  2140. set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  2141. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  2142. zone_statistics(preferred_zone, zone, gfp_flags);
  2143. local_irq_restore(flags);
  2144. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  2145. return page;
  2146. failed:
  2147. local_irq_restore(flags);
  2148. return NULL;
  2149. }
  2150. #ifdef CONFIG_FAIL_PAGE_ALLOC
  2151. static struct {
  2152. struct fault_attr attr;
  2153. bool ignore_gfp_highmem;
  2154. bool ignore_gfp_reclaim;
  2155. u32 min_order;
  2156. } fail_page_alloc = {
  2157. .attr = FAULT_ATTR_INITIALIZER,
  2158. .ignore_gfp_reclaim = true,
  2159. .ignore_gfp_highmem = true,
  2160. .min_order = 1,
  2161. };
  2162. static int __init setup_fail_page_alloc(char *str)
  2163. {
  2164. return setup_fault_attr(&fail_page_alloc.attr, str);
  2165. }
  2166. __setup("fail_page_alloc=", setup_fail_page_alloc);
  2167. static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2168. {
  2169. if (order < fail_page_alloc.min_order)
  2170. return false;
  2171. if (gfp_mask & __GFP_NOFAIL)
  2172. return false;
  2173. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  2174. return false;
  2175. if (fail_page_alloc.ignore_gfp_reclaim &&
  2176. (gfp_mask & __GFP_DIRECT_RECLAIM))
  2177. return false;
  2178. return should_fail(&fail_page_alloc.attr, 1 << order);
  2179. }
  2180. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  2181. static int __init fail_page_alloc_debugfs(void)
  2182. {
  2183. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  2184. struct dentry *dir;
  2185. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  2186. &fail_page_alloc.attr);
  2187. if (IS_ERR(dir))
  2188. return PTR_ERR(dir);
  2189. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  2190. &fail_page_alloc.ignore_gfp_reclaim))
  2191. goto fail;
  2192. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  2193. &fail_page_alloc.ignore_gfp_highmem))
  2194. goto fail;
  2195. if (!debugfs_create_u32("min-order", mode, dir,
  2196. &fail_page_alloc.min_order))
  2197. goto fail;
  2198. return 0;
  2199. fail:
  2200. debugfs_remove_recursive(dir);
  2201. return -ENOMEM;
  2202. }
  2203. late_initcall(fail_page_alloc_debugfs);
  2204. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  2205. #else /* CONFIG_FAIL_PAGE_ALLOC */
  2206. static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2207. {
  2208. return false;
  2209. }
  2210. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  2211. /*
  2212. * Return true if free base pages are above 'mark'. For high-order checks it
  2213. * will return true of the order-0 watermark is reached and there is at least
  2214. * one free page of a suitable size. Checking now avoids taking the zone lock
  2215. * to check in the allocation paths if no pages are free.
  2216. */
  2217. static bool __zone_watermark_ok(struct zone *z, unsigned int order,
  2218. unsigned long mark, int classzone_idx,
  2219. unsigned int alloc_flags,
  2220. long free_pages)
  2221. {
  2222. long min = mark;
  2223. int o;
  2224. const bool alloc_harder = (alloc_flags & ALLOC_HARDER);
  2225. /* free_pages may go negative - that's OK */
  2226. free_pages -= (1 << order) - 1;
  2227. if (alloc_flags & ALLOC_HIGH)
  2228. min -= min / 2;
  2229. /*
  2230. * If the caller does not have rights to ALLOC_HARDER then subtract
  2231. * the high-atomic reserves. This will over-estimate the size of the
  2232. * atomic reserve but it avoids a search.
  2233. */
  2234. if (likely(!alloc_harder))
  2235. free_pages -= z->nr_reserved_highatomic;
  2236. else
  2237. min -= min / 4;
  2238. #ifdef CONFIG_CMA
  2239. /* If allocation can't use CMA areas don't use free CMA pages */
  2240. if (!(alloc_flags & ALLOC_CMA))
  2241. free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
  2242. #endif
  2243. /*
  2244. * Check watermarks for an order-0 allocation request. If these
  2245. * are not met, then a high-order request also cannot go ahead
  2246. * even if a suitable page happened to be free.
  2247. */
  2248. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  2249. return false;
  2250. /* If this is an order-0 request then the watermark is fine */
  2251. if (!order)
  2252. return true;
  2253. /* For a high-order request, check at least one suitable page is free */
  2254. for (o = order; o < MAX_ORDER; o++) {
  2255. struct free_area *area = &z->free_area[o];
  2256. int mt;
  2257. if (!area->nr_free)
  2258. continue;
  2259. if (alloc_harder)
  2260. return true;
  2261. for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
  2262. if (!list_empty(&area->free_list[mt]))
  2263. return true;
  2264. }
  2265. #ifdef CONFIG_CMA
  2266. if ((alloc_flags & ALLOC_CMA) &&
  2267. !list_empty(&area->free_list[MIGRATE_CMA])) {
  2268. return true;
  2269. }
  2270. #endif
  2271. }
  2272. return false;
  2273. }
  2274. bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  2275. int classzone_idx, unsigned int alloc_flags)
  2276. {
  2277. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2278. zone_page_state(z, NR_FREE_PAGES));
  2279. }
  2280. static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
  2281. unsigned long mark, int classzone_idx, unsigned int alloc_flags)
  2282. {
  2283. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2284. long cma_pages = 0;
  2285. #ifdef CONFIG_CMA
  2286. /* If allocation can't use CMA areas don't use free CMA pages */
  2287. if (!(alloc_flags & ALLOC_CMA))
  2288. cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
  2289. #endif
  2290. /*
  2291. * Fast check for order-0 only. If this fails then the reserves
  2292. * need to be calculated. There is a corner case where the check
  2293. * passes but only the high-order atomic reserve are free. If
  2294. * the caller is !atomic then it'll uselessly search the free
  2295. * list. That corner case is then slower but it is harmless.
  2296. */
  2297. if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
  2298. return true;
  2299. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2300. free_pages);
  2301. }
  2302. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  2303. unsigned long mark, int classzone_idx)
  2304. {
  2305. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2306. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  2307. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  2308. return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
  2309. free_pages);
  2310. }
  2311. #ifdef CONFIG_NUMA
  2312. static bool zone_local(struct zone *local_zone, struct zone *zone)
  2313. {
  2314. return local_zone->node == zone->node;
  2315. }
  2316. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2317. {
  2318. return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
  2319. RECLAIM_DISTANCE;
  2320. }
  2321. #else /* CONFIG_NUMA */
  2322. static bool zone_local(struct zone *local_zone, struct zone *zone)
  2323. {
  2324. return true;
  2325. }
  2326. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2327. {
  2328. return true;
  2329. }
  2330. #endif /* CONFIG_NUMA */
  2331. static void reset_alloc_batches(struct zone *preferred_zone)
  2332. {
  2333. struct zone *zone = preferred_zone->zone_pgdat->node_zones;
  2334. do {
  2335. mod_zone_page_state(zone, NR_ALLOC_BATCH,
  2336. high_wmark_pages(zone) - low_wmark_pages(zone) -
  2337. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  2338. clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  2339. } while (zone++ != preferred_zone);
  2340. }
  2341. /*
  2342. * get_page_from_freelist goes through the zonelist trying to allocate
  2343. * a page.
  2344. */
  2345. static struct page *
  2346. get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
  2347. const struct alloc_context *ac)
  2348. {
  2349. struct zoneref *z = ac->preferred_zoneref;
  2350. struct zone *zone;
  2351. bool fair_skipped = false;
  2352. bool apply_fair = (alloc_flags & ALLOC_FAIR);
  2353. zonelist_scan:
  2354. /*
  2355. * Scan zonelist, looking for a zone with enough free.
  2356. * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
  2357. */
  2358. for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  2359. ac->nodemask) {
  2360. struct page *page;
  2361. unsigned long mark;
  2362. if (cpusets_enabled() &&
  2363. (alloc_flags & ALLOC_CPUSET) &&
  2364. !cpuset_zone_allowed(zone, gfp_mask))
  2365. continue;
  2366. /*
  2367. * Distribute pages in proportion to the individual
  2368. * zone size to ensure fair page aging. The zone a
  2369. * page was allocated in should have no effect on the
  2370. * time the page has in memory before being reclaimed.
  2371. */
  2372. if (apply_fair) {
  2373. if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
  2374. fair_skipped = true;
  2375. continue;
  2376. }
  2377. if (!zone_local(ac->preferred_zoneref->zone, zone)) {
  2378. if (fair_skipped)
  2379. goto reset_fair;
  2380. apply_fair = false;
  2381. }
  2382. }
  2383. /*
  2384. * When allocating a page cache page for writing, we
  2385. * want to get it from a zone that is within its dirty
  2386. * limit, such that no single zone holds more than its
  2387. * proportional share of globally allowed dirty pages.
  2388. * The dirty limits take into account the zone's
  2389. * lowmem reserves and high watermark so that kswapd
  2390. * should be able to balance it without having to
  2391. * write pages from its LRU list.
  2392. *
  2393. * This may look like it could increase pressure on
  2394. * lower zones by failing allocations in higher zones
  2395. * before they are full. But the pages that do spill
  2396. * over are limited as the lower zones are protected
  2397. * by this very same mechanism. It should not become
  2398. * a practical burden to them.
  2399. *
  2400. * XXX: For now, allow allocations to potentially
  2401. * exceed the per-zone dirty limit in the slowpath
  2402. * (spread_dirty_pages unset) before going into reclaim,
  2403. * which is important when on a NUMA setup the allowed
  2404. * zones are together not big enough to reach the
  2405. * global limit. The proper fix for these situations
  2406. * will require awareness of zones in the
  2407. * dirty-throttling and the flusher threads.
  2408. */
  2409. if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
  2410. continue;
  2411. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  2412. if (!zone_watermark_fast(zone, order, mark,
  2413. ac_classzone_idx(ac), alloc_flags)) {
  2414. int ret;
  2415. /* Checked here to keep the fast path fast */
  2416. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  2417. if (alloc_flags & ALLOC_NO_WATERMARKS)
  2418. goto try_this_zone;
  2419. if (zone_reclaim_mode == 0 ||
  2420. !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
  2421. continue;
  2422. ret = zone_reclaim(zone, gfp_mask, order);
  2423. switch (ret) {
  2424. case ZONE_RECLAIM_NOSCAN:
  2425. /* did not scan */
  2426. continue;
  2427. case ZONE_RECLAIM_FULL:
  2428. /* scanned but unreclaimable */
  2429. continue;
  2430. default:
  2431. /* did we reclaim enough */
  2432. if (zone_watermark_ok(zone, order, mark,
  2433. ac_classzone_idx(ac), alloc_flags))
  2434. goto try_this_zone;
  2435. continue;
  2436. }
  2437. }
  2438. try_this_zone:
  2439. page = buffered_rmqueue(ac->preferred_zoneref->zone, zone, order,
  2440. gfp_mask, alloc_flags, ac->migratetype);
  2441. if (page) {
  2442. if (prep_new_page(page, order, gfp_mask, alloc_flags))
  2443. goto try_this_zone;
  2444. /*
  2445. * If this is a high-order atomic allocation then check
  2446. * if the pageblock should be reserved for the future
  2447. */
  2448. if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
  2449. reserve_highatomic_pageblock(page, zone, order);
  2450. return page;
  2451. }
  2452. }
  2453. /*
  2454. * The first pass makes sure allocations are spread fairly within the
  2455. * local node. However, the local node might have free pages left
  2456. * after the fairness batches are exhausted, and remote zones haven't
  2457. * even been considered yet. Try once more without fairness, and
  2458. * include remote zones now, before entering the slowpath and waking
  2459. * kswapd: prefer spilling to a remote zone over swapping locally.
  2460. */
  2461. if (fair_skipped) {
  2462. reset_fair:
  2463. apply_fair = false;
  2464. fair_skipped = false;
  2465. reset_alloc_batches(ac->preferred_zoneref->zone);
  2466. goto zonelist_scan;
  2467. }
  2468. return NULL;
  2469. }
  2470. /*
  2471. * Large machines with many possible nodes should not always dump per-node
  2472. * meminfo in irq context.
  2473. */
  2474. static inline bool should_suppress_show_mem(void)
  2475. {
  2476. bool ret = false;
  2477. #if NODES_SHIFT > 8
  2478. ret = in_interrupt();
  2479. #endif
  2480. return ret;
  2481. }
  2482. static DEFINE_RATELIMIT_STATE(nopage_rs,
  2483. DEFAULT_RATELIMIT_INTERVAL,
  2484. DEFAULT_RATELIMIT_BURST);
  2485. void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
  2486. {
  2487. unsigned int filter = SHOW_MEM_FILTER_NODES;
  2488. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
  2489. debug_guardpage_minorder() > 0)
  2490. return;
  2491. /*
  2492. * This documents exceptions given to allocations in certain
  2493. * contexts that are allowed to allocate outside current's set
  2494. * of allowed nodes.
  2495. */
  2496. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2497. if (test_thread_flag(TIF_MEMDIE) ||
  2498. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  2499. filter &= ~SHOW_MEM_FILTER_NODES;
  2500. if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
  2501. filter &= ~SHOW_MEM_FILTER_NODES;
  2502. if (fmt) {
  2503. struct va_format vaf;
  2504. va_list args;
  2505. va_start(args, fmt);
  2506. vaf.fmt = fmt;
  2507. vaf.va = &args;
  2508. pr_warn("%pV", &vaf);
  2509. va_end(args);
  2510. }
  2511. pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
  2512. current->comm, order, gfp_mask, &gfp_mask);
  2513. dump_stack();
  2514. if (!should_suppress_show_mem())
  2515. show_mem(filter);
  2516. }
  2517. static inline struct page *
  2518. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  2519. const struct alloc_context *ac, unsigned long *did_some_progress)
  2520. {
  2521. struct oom_control oc = {
  2522. .zonelist = ac->zonelist,
  2523. .nodemask = ac->nodemask,
  2524. .gfp_mask = gfp_mask,
  2525. .order = order,
  2526. };
  2527. struct page *page;
  2528. *did_some_progress = 0;
  2529. /*
  2530. * Acquire the oom lock. If that fails, somebody else is
  2531. * making progress for us.
  2532. */
  2533. if (!mutex_trylock(&oom_lock)) {
  2534. *did_some_progress = 1;
  2535. schedule_timeout_uninterruptible(1);
  2536. return NULL;
  2537. }
  2538. /*
  2539. * Go through the zonelist yet one more time, keep very high watermark
  2540. * here, this is only to catch a parallel oom killing, we must fail if
  2541. * we're still under heavy pressure.
  2542. */
  2543. page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
  2544. ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
  2545. if (page)
  2546. goto out;
  2547. if (!(gfp_mask & __GFP_NOFAIL)) {
  2548. /* Coredumps can quickly deplete all memory reserves */
  2549. if (current->flags & PF_DUMPCORE)
  2550. goto out;
  2551. /* The OOM killer will not help higher order allocs */
  2552. if (order > PAGE_ALLOC_COSTLY_ORDER)
  2553. goto out;
  2554. /* The OOM killer does not needlessly kill tasks for lowmem */
  2555. if (ac->high_zoneidx < ZONE_NORMAL)
  2556. goto out;
  2557. if (pm_suspended_storage())
  2558. goto out;
  2559. /*
  2560. * XXX: GFP_NOFS allocations should rather fail than rely on
  2561. * other request to make a forward progress.
  2562. * We are in an unfortunate situation where out_of_memory cannot
  2563. * do much for this context but let's try it to at least get
  2564. * access to memory reserved if the current task is killed (see
  2565. * out_of_memory). Once filesystems are ready to handle allocation
  2566. * failures more gracefully we should just bail out here.
  2567. */
  2568. /* The OOM killer may not free memory on a specific node */
  2569. if (gfp_mask & __GFP_THISNODE)
  2570. goto out;
  2571. }
  2572. /* Exhausted what can be done so it's blamo time */
  2573. if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
  2574. *did_some_progress = 1;
  2575. if (gfp_mask & __GFP_NOFAIL) {
  2576. page = get_page_from_freelist(gfp_mask, order,
  2577. ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
  2578. /*
  2579. * fallback to ignore cpuset restriction if our nodes
  2580. * are depleted
  2581. */
  2582. if (!page)
  2583. page = get_page_from_freelist(gfp_mask, order,
  2584. ALLOC_NO_WATERMARKS, ac);
  2585. }
  2586. }
  2587. out:
  2588. mutex_unlock(&oom_lock);
  2589. return page;
  2590. }
  2591. #ifdef CONFIG_COMPACTION
  2592. /* Try memory compaction for high-order allocations before reclaim */
  2593. static struct page *
  2594. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2595. unsigned int alloc_flags, const struct alloc_context *ac,
  2596. enum migrate_mode mode, int *contended_compaction,
  2597. bool *deferred_compaction)
  2598. {
  2599. unsigned long compact_result;
  2600. struct page *page;
  2601. if (!order)
  2602. return NULL;
  2603. current->flags |= PF_MEMALLOC;
  2604. compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
  2605. mode, contended_compaction);
  2606. current->flags &= ~PF_MEMALLOC;
  2607. switch (compact_result) {
  2608. case COMPACT_DEFERRED:
  2609. *deferred_compaction = true;
  2610. /* fall-through */
  2611. case COMPACT_SKIPPED:
  2612. return NULL;
  2613. default:
  2614. break;
  2615. }
  2616. /*
  2617. * At least in one zone compaction wasn't deferred or skipped, so let's
  2618. * count a compaction stall
  2619. */
  2620. count_vm_event(COMPACTSTALL);
  2621. page = get_page_from_freelist(gfp_mask, order,
  2622. alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
  2623. if (page) {
  2624. struct zone *zone = page_zone(page);
  2625. zone->compact_blockskip_flush = false;
  2626. compaction_defer_reset(zone, order, true);
  2627. count_vm_event(COMPACTSUCCESS);
  2628. return page;
  2629. }
  2630. /*
  2631. * It's bad if compaction run occurs and fails. The most likely reason
  2632. * is that pages exist, but not enough to satisfy watermarks.
  2633. */
  2634. count_vm_event(COMPACTFAIL);
  2635. cond_resched();
  2636. return NULL;
  2637. }
  2638. #else
  2639. static inline struct page *
  2640. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2641. unsigned int alloc_flags, const struct alloc_context *ac,
  2642. enum migrate_mode mode, int *contended_compaction,
  2643. bool *deferred_compaction)
  2644. {
  2645. return NULL;
  2646. }
  2647. #endif /* CONFIG_COMPACTION */
  2648. /* Perform direct synchronous page reclaim */
  2649. static int
  2650. __perform_reclaim(gfp_t gfp_mask, unsigned int order,
  2651. const struct alloc_context *ac)
  2652. {
  2653. struct reclaim_state reclaim_state;
  2654. int progress;
  2655. cond_resched();
  2656. /* We now go into synchronous reclaim */
  2657. cpuset_memory_pressure_bump();
  2658. current->flags |= PF_MEMALLOC;
  2659. lockdep_set_current_reclaim_state(gfp_mask);
  2660. reclaim_state.reclaimed_slab = 0;
  2661. current->reclaim_state = &reclaim_state;
  2662. progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
  2663. ac->nodemask);
  2664. current->reclaim_state = NULL;
  2665. lockdep_clear_current_reclaim_state();
  2666. current->flags &= ~PF_MEMALLOC;
  2667. cond_resched();
  2668. return progress;
  2669. }
  2670. /* The really slow allocator path where we enter direct reclaim */
  2671. static inline struct page *
  2672. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  2673. unsigned int alloc_flags, const struct alloc_context *ac,
  2674. unsigned long *did_some_progress)
  2675. {
  2676. struct page *page = NULL;
  2677. bool drained = false;
  2678. *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
  2679. if (unlikely(!(*did_some_progress)))
  2680. return NULL;
  2681. retry:
  2682. page = get_page_from_freelist(gfp_mask, order,
  2683. alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
  2684. /*
  2685. * If an allocation failed after direct reclaim, it could be because
  2686. * pages are pinned on the per-cpu lists or in high alloc reserves.
  2687. * Shrink them them and try again
  2688. */
  2689. if (!page && !drained) {
  2690. unreserve_highatomic_pageblock(ac);
  2691. drain_all_pages(NULL);
  2692. drained = true;
  2693. goto retry;
  2694. }
  2695. return page;
  2696. }
  2697. static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
  2698. {
  2699. struct zoneref *z;
  2700. struct zone *zone;
  2701. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
  2702. ac->high_zoneidx, ac->nodemask)
  2703. wakeup_kswapd(zone, order, ac_classzone_idx(ac));
  2704. }
  2705. static inline unsigned int
  2706. gfp_to_alloc_flags(gfp_t gfp_mask)
  2707. {
  2708. unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  2709. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  2710. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  2711. /*
  2712. * The caller may dip into page reserves a bit more if the caller
  2713. * cannot run direct reclaim, or if the caller has realtime scheduling
  2714. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  2715. * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
  2716. */
  2717. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  2718. if (gfp_mask & __GFP_ATOMIC) {
  2719. /*
  2720. * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
  2721. * if it can't schedule.
  2722. */
  2723. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2724. alloc_flags |= ALLOC_HARDER;
  2725. /*
  2726. * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
  2727. * comment for __cpuset_node_allowed().
  2728. */
  2729. alloc_flags &= ~ALLOC_CPUSET;
  2730. } else if (unlikely(rt_task(current)) && !in_interrupt())
  2731. alloc_flags |= ALLOC_HARDER;
  2732. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  2733. if (gfp_mask & __GFP_MEMALLOC)
  2734. alloc_flags |= ALLOC_NO_WATERMARKS;
  2735. else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
  2736. alloc_flags |= ALLOC_NO_WATERMARKS;
  2737. else if (!in_interrupt() &&
  2738. ((current->flags & PF_MEMALLOC) ||
  2739. unlikely(test_thread_flag(TIF_MEMDIE))))
  2740. alloc_flags |= ALLOC_NO_WATERMARKS;
  2741. }
  2742. #ifdef CONFIG_CMA
  2743. if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  2744. alloc_flags |= ALLOC_CMA;
  2745. #endif
  2746. return alloc_flags;
  2747. }
  2748. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
  2749. {
  2750. return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
  2751. }
  2752. static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
  2753. {
  2754. return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
  2755. }
  2756. static inline struct page *
  2757. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  2758. struct alloc_context *ac)
  2759. {
  2760. bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
  2761. struct page *page = NULL;
  2762. unsigned int alloc_flags;
  2763. unsigned long pages_reclaimed = 0;
  2764. unsigned long did_some_progress;
  2765. enum migrate_mode migration_mode = MIGRATE_ASYNC;
  2766. bool deferred_compaction = false;
  2767. int contended_compaction = COMPACT_CONTENDED_NONE;
  2768. /*
  2769. * In the slowpath, we sanity check order to avoid ever trying to
  2770. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  2771. * be using allocators in order of preference for an area that is
  2772. * too large.
  2773. */
  2774. if (order >= MAX_ORDER) {
  2775. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  2776. return NULL;
  2777. }
  2778. /*
  2779. * We also sanity check to catch abuse of atomic reserves being used by
  2780. * callers that are not in atomic context.
  2781. */
  2782. if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
  2783. (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
  2784. gfp_mask &= ~__GFP_ATOMIC;
  2785. retry:
  2786. if (gfp_mask & __GFP_KSWAPD_RECLAIM)
  2787. wake_all_kswapds(order, ac);
  2788. /*
  2789. * OK, we're below the kswapd watermark and have kicked background
  2790. * reclaim. Now things get more complex, so set up alloc_flags according
  2791. * to how we want to proceed.
  2792. */
  2793. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  2794. /* This is the last chance, in general, before the goto nopage. */
  2795. page = get_page_from_freelist(gfp_mask, order,
  2796. alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
  2797. if (page)
  2798. goto got_pg;
  2799. /* Allocate without watermarks if the context allows */
  2800. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  2801. /*
  2802. * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
  2803. * the allocation is high priority and these type of
  2804. * allocations are system rather than user orientated
  2805. */
  2806. ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
  2807. page = get_page_from_freelist(gfp_mask, order,
  2808. ALLOC_NO_WATERMARKS, ac);
  2809. if (page)
  2810. goto got_pg;
  2811. }
  2812. /* Caller is not willing to reclaim, we can't balance anything */
  2813. if (!can_direct_reclaim) {
  2814. /*
  2815. * All existing users of the __GFP_NOFAIL are blockable, so warn
  2816. * of any new users that actually allow this type of allocation
  2817. * to fail.
  2818. */
  2819. WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
  2820. goto nopage;
  2821. }
  2822. /* Avoid recursion of direct reclaim */
  2823. if (current->flags & PF_MEMALLOC) {
  2824. /*
  2825. * __GFP_NOFAIL request from this context is rather bizarre
  2826. * because we cannot reclaim anything and only can loop waiting
  2827. * for somebody to do a work for us.
  2828. */
  2829. if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
  2830. cond_resched();
  2831. goto retry;
  2832. }
  2833. goto nopage;
  2834. }
  2835. /* Avoid allocations with no watermarks from looping endlessly */
  2836. if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
  2837. goto nopage;
  2838. /*
  2839. * Try direct compaction. The first pass is asynchronous. Subsequent
  2840. * attempts after direct reclaim are synchronous
  2841. */
  2842. page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
  2843. migration_mode,
  2844. &contended_compaction,
  2845. &deferred_compaction);
  2846. if (page)
  2847. goto got_pg;
  2848. /* Checks for THP-specific high-order allocations */
  2849. if (is_thp_gfp_mask(gfp_mask)) {
  2850. /*
  2851. * If compaction is deferred for high-order allocations, it is
  2852. * because sync compaction recently failed. If this is the case
  2853. * and the caller requested a THP allocation, we do not want
  2854. * to heavily disrupt the system, so we fail the allocation
  2855. * instead of entering direct reclaim.
  2856. */
  2857. if (deferred_compaction)
  2858. goto nopage;
  2859. /*
  2860. * In all zones where compaction was attempted (and not
  2861. * deferred or skipped), lock contention has been detected.
  2862. * For THP allocation we do not want to disrupt the others
  2863. * so we fallback to base pages instead.
  2864. */
  2865. if (contended_compaction == COMPACT_CONTENDED_LOCK)
  2866. goto nopage;
  2867. /*
  2868. * If compaction was aborted due to need_resched(), we do not
  2869. * want to further increase allocation latency, unless it is
  2870. * khugepaged trying to collapse.
  2871. */
  2872. if (contended_compaction == COMPACT_CONTENDED_SCHED
  2873. && !(current->flags & PF_KTHREAD))
  2874. goto nopage;
  2875. }
  2876. /*
  2877. * It can become very expensive to allocate transparent hugepages at
  2878. * fault, so use asynchronous memory compaction for THP unless it is
  2879. * khugepaged trying to collapse.
  2880. */
  2881. if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
  2882. migration_mode = MIGRATE_SYNC_LIGHT;
  2883. /* Try direct reclaim and then allocating */
  2884. page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
  2885. &did_some_progress);
  2886. if (page)
  2887. goto got_pg;
  2888. /* Do not loop if specifically requested */
  2889. if (gfp_mask & __GFP_NORETRY)
  2890. goto noretry;
  2891. /* Keep reclaiming pages as long as there is reasonable progress */
  2892. pages_reclaimed += did_some_progress;
  2893. if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
  2894. ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
  2895. /* Wait for some write requests to complete then retry */
  2896. wait_iff_congested(ac->preferred_zoneref->zone, BLK_RW_ASYNC, HZ/50);
  2897. goto retry;
  2898. }
  2899. /* Reclaim has failed us, start killing things */
  2900. page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
  2901. if (page)
  2902. goto got_pg;
  2903. /* Retry as long as the OOM killer is making progress */
  2904. if (did_some_progress)
  2905. goto retry;
  2906. noretry:
  2907. /*
  2908. * High-order allocations do not necessarily loop after
  2909. * direct reclaim and reclaim/compaction depends on compaction
  2910. * being called after reclaim so call directly if necessary
  2911. */
  2912. page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
  2913. ac, migration_mode,
  2914. &contended_compaction,
  2915. &deferred_compaction);
  2916. if (page)
  2917. goto got_pg;
  2918. nopage:
  2919. warn_alloc_failed(gfp_mask, order, NULL);
  2920. got_pg:
  2921. return page;
  2922. }
  2923. /*
  2924. * This is the 'heart' of the zoned buddy allocator.
  2925. */
  2926. struct page *
  2927. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  2928. struct zonelist *zonelist, nodemask_t *nodemask)
  2929. {
  2930. struct page *page;
  2931. unsigned int cpuset_mems_cookie;
  2932. unsigned int alloc_flags = ALLOC_WMARK_LOW|ALLOC_FAIR;
  2933. gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
  2934. struct alloc_context ac = {
  2935. .high_zoneidx = gfp_zone(gfp_mask),
  2936. .zonelist = zonelist,
  2937. .nodemask = nodemask,
  2938. .migratetype = gfpflags_to_migratetype(gfp_mask),
  2939. };
  2940. if (cpusets_enabled()) {
  2941. alloc_mask |= __GFP_HARDWALL;
  2942. alloc_flags |= ALLOC_CPUSET;
  2943. if (!ac.nodemask)
  2944. ac.nodemask = &cpuset_current_mems_allowed;
  2945. }
  2946. gfp_mask &= gfp_allowed_mask;
  2947. lockdep_trace_alloc(gfp_mask);
  2948. might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
  2949. if (should_fail_alloc_page(gfp_mask, order))
  2950. return NULL;
  2951. /*
  2952. * Check the zones suitable for the gfp_mask contain at least one
  2953. * valid zone. It's possible to have an empty zonelist as a result
  2954. * of __GFP_THISNODE and a memoryless node
  2955. */
  2956. if (unlikely(!zonelist->_zonerefs->zone))
  2957. return NULL;
  2958. if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
  2959. alloc_flags |= ALLOC_CMA;
  2960. retry_cpuset:
  2961. cpuset_mems_cookie = read_mems_allowed_begin();
  2962. /* Dirty zone balancing only done in the fast path */
  2963. ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
  2964. /* The preferred zone is used for statistics later */
  2965. ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
  2966. ac.high_zoneidx, ac.nodemask);
  2967. if (!ac.preferred_zoneref) {
  2968. page = NULL;
  2969. goto no_zone;
  2970. }
  2971. /* First allocation attempt */
  2972. page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
  2973. if (likely(page))
  2974. goto out;
  2975. /*
  2976. * Runtime PM, block IO and its error handling path can deadlock
  2977. * because I/O on the device might not complete.
  2978. */
  2979. alloc_mask = memalloc_noio_flags(gfp_mask);
  2980. ac.spread_dirty_pages = false;
  2981. page = __alloc_pages_slowpath(alloc_mask, order, &ac);
  2982. no_zone:
  2983. /*
  2984. * When updating a task's mems_allowed, it is possible to race with
  2985. * parallel threads in such a way that an allocation can fail while
  2986. * the mask is being updated. If a page allocation is about to fail,
  2987. * check if the cpuset changed during allocation and if so, retry.
  2988. */
  2989. if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
  2990. alloc_mask = gfp_mask;
  2991. goto retry_cpuset;
  2992. }
  2993. out:
  2994. if (kmemcheck_enabled && page)
  2995. kmemcheck_pagealloc_alloc(page, order, gfp_mask);
  2996. trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
  2997. return page;
  2998. }
  2999. EXPORT_SYMBOL(__alloc_pages_nodemask);
  3000. /*
  3001. * Common helper functions.
  3002. */
  3003. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  3004. {
  3005. struct page *page;
  3006. /*
  3007. * __get_free_pages() returns a 32-bit address, which cannot represent
  3008. * a highmem page
  3009. */
  3010. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  3011. page = alloc_pages(gfp_mask, order);
  3012. if (!page)
  3013. return 0;
  3014. return (unsigned long) page_address(page);
  3015. }
  3016. EXPORT_SYMBOL(__get_free_pages);
  3017. unsigned long get_zeroed_page(gfp_t gfp_mask)
  3018. {
  3019. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  3020. }
  3021. EXPORT_SYMBOL(get_zeroed_page);
  3022. void __free_pages(struct page *page, unsigned int order)
  3023. {
  3024. if (put_page_testzero(page)) {
  3025. if (order == 0)
  3026. free_hot_cold_page(page, false);
  3027. else
  3028. __free_pages_ok(page, order);
  3029. }
  3030. }
  3031. EXPORT_SYMBOL(__free_pages);
  3032. void free_pages(unsigned long addr, unsigned int order)
  3033. {
  3034. if (addr != 0) {
  3035. VM_BUG_ON(!virt_addr_valid((void *)addr));
  3036. __free_pages(virt_to_page((void *)addr), order);
  3037. }
  3038. }
  3039. EXPORT_SYMBOL(free_pages);
  3040. /*
  3041. * Page Fragment:
  3042. * An arbitrary-length arbitrary-offset area of memory which resides
  3043. * within a 0 or higher order page. Multiple fragments within that page
  3044. * are individually refcounted, in the page's reference counter.
  3045. *
  3046. * The page_frag functions below provide a simple allocation framework for
  3047. * page fragments. This is used by the network stack and network device
  3048. * drivers to provide a backing region of memory for use as either an
  3049. * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  3050. */
  3051. static struct page *__page_frag_refill(struct page_frag_cache *nc,
  3052. gfp_t gfp_mask)
  3053. {
  3054. struct page *page = NULL;
  3055. gfp_t gfp = gfp_mask;
  3056. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3057. gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
  3058. __GFP_NOMEMALLOC;
  3059. page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
  3060. PAGE_FRAG_CACHE_MAX_ORDER);
  3061. nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
  3062. #endif
  3063. if (unlikely(!page))
  3064. page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
  3065. nc->va = page ? page_address(page) : NULL;
  3066. return page;
  3067. }
  3068. void *__alloc_page_frag(struct page_frag_cache *nc,
  3069. unsigned int fragsz, gfp_t gfp_mask)
  3070. {
  3071. unsigned int size = PAGE_SIZE;
  3072. struct page *page;
  3073. int offset;
  3074. if (unlikely(!nc->va)) {
  3075. refill:
  3076. page = __page_frag_refill(nc, gfp_mask);
  3077. if (!page)
  3078. return NULL;
  3079. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3080. /* if size can vary use size else just use PAGE_SIZE */
  3081. size = nc->size;
  3082. #endif
  3083. /* Even if we own the page, we do not use atomic_set().
  3084. * This would break get_page_unless_zero() users.
  3085. */
  3086. page_ref_add(page, size - 1);
  3087. /* reset page count bias and offset to start of new frag */
  3088. nc->pfmemalloc = page_is_pfmemalloc(page);
  3089. nc->pagecnt_bias = size;
  3090. nc->offset = size;
  3091. }
  3092. offset = nc->offset - fragsz;
  3093. if (unlikely(offset < 0)) {
  3094. page = virt_to_page(nc->va);
  3095. if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
  3096. goto refill;
  3097. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3098. /* if size can vary use size else just use PAGE_SIZE */
  3099. size = nc->size;
  3100. #endif
  3101. /* OK, page count is 0, we can safely set it */
  3102. set_page_count(page, size);
  3103. /* reset page count bias and offset to start of new frag */
  3104. nc->pagecnt_bias = size;
  3105. offset = size - fragsz;
  3106. }
  3107. nc->pagecnt_bias--;
  3108. nc->offset = offset;
  3109. return nc->va + offset;
  3110. }
  3111. EXPORT_SYMBOL(__alloc_page_frag);
  3112. /*
  3113. * Frees a page fragment allocated out of either a compound or order 0 page.
  3114. */
  3115. void __free_page_frag(void *addr)
  3116. {
  3117. struct page *page = virt_to_head_page(addr);
  3118. if (unlikely(put_page_testzero(page)))
  3119. __free_pages_ok(page, compound_order(page));
  3120. }
  3121. EXPORT_SYMBOL(__free_page_frag);
  3122. /*
  3123. * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
  3124. * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
  3125. * equivalent to alloc_pages.
  3126. *
  3127. * It should be used when the caller would like to use kmalloc, but since the
  3128. * allocation is large, it has to fall back to the page allocator.
  3129. */
  3130. struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
  3131. {
  3132. struct page *page;
  3133. page = alloc_pages(gfp_mask, order);
  3134. if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
  3135. __free_pages(page, order);
  3136. page = NULL;
  3137. }
  3138. return page;
  3139. }
  3140. struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
  3141. {
  3142. struct page *page;
  3143. page = alloc_pages_node(nid, gfp_mask, order);
  3144. if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
  3145. __free_pages(page, order);
  3146. page = NULL;
  3147. }
  3148. return page;
  3149. }
  3150. /*
  3151. * __free_kmem_pages and free_kmem_pages will free pages allocated with
  3152. * alloc_kmem_pages.
  3153. */
  3154. void __free_kmem_pages(struct page *page, unsigned int order)
  3155. {
  3156. memcg_kmem_uncharge(page, order);
  3157. __free_pages(page, order);
  3158. }
  3159. void free_kmem_pages(unsigned long addr, unsigned int order)
  3160. {
  3161. if (addr != 0) {
  3162. VM_BUG_ON(!virt_addr_valid((void *)addr));
  3163. __free_kmem_pages(virt_to_page((void *)addr), order);
  3164. }
  3165. }
  3166. static void *make_alloc_exact(unsigned long addr, unsigned int order,
  3167. size_t size)
  3168. {
  3169. if (addr) {
  3170. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  3171. unsigned long used = addr + PAGE_ALIGN(size);
  3172. split_page(virt_to_page((void *)addr), order);
  3173. while (used < alloc_end) {
  3174. free_page(used);
  3175. used += PAGE_SIZE;
  3176. }
  3177. }
  3178. return (void *)addr;
  3179. }
  3180. /**
  3181. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  3182. * @size: the number of bytes to allocate
  3183. * @gfp_mask: GFP flags for the allocation
  3184. *
  3185. * This function is similar to alloc_pages(), except that it allocates the
  3186. * minimum number of pages to satisfy the request. alloc_pages() can only
  3187. * allocate memory in power-of-two pages.
  3188. *
  3189. * This function is also limited by MAX_ORDER.
  3190. *
  3191. * Memory allocated by this function must be released by free_pages_exact().
  3192. */
  3193. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  3194. {
  3195. unsigned int order = get_order(size);
  3196. unsigned long addr;
  3197. addr = __get_free_pages(gfp_mask, order);
  3198. return make_alloc_exact(addr, order, size);
  3199. }
  3200. EXPORT_SYMBOL(alloc_pages_exact);
  3201. /**
  3202. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  3203. * pages on a node.
  3204. * @nid: the preferred node ID where memory should be allocated
  3205. * @size: the number of bytes to allocate
  3206. * @gfp_mask: GFP flags for the allocation
  3207. *
  3208. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  3209. * back.
  3210. */
  3211. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  3212. {
  3213. unsigned int order = get_order(size);
  3214. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  3215. if (!p)
  3216. return NULL;
  3217. return make_alloc_exact((unsigned long)page_address(p), order, size);
  3218. }
  3219. /**
  3220. * free_pages_exact - release memory allocated via alloc_pages_exact()
  3221. * @virt: the value returned by alloc_pages_exact.
  3222. * @size: size of allocation, same value as passed to alloc_pages_exact().
  3223. *
  3224. * Release the memory allocated by a previous call to alloc_pages_exact.
  3225. */
  3226. void free_pages_exact(void *virt, size_t size)
  3227. {
  3228. unsigned long addr = (unsigned long)virt;
  3229. unsigned long end = addr + PAGE_ALIGN(size);
  3230. while (addr < end) {
  3231. free_page(addr);
  3232. addr += PAGE_SIZE;
  3233. }
  3234. }
  3235. EXPORT_SYMBOL(free_pages_exact);
  3236. /**
  3237. * nr_free_zone_pages - count number of pages beyond high watermark
  3238. * @offset: The zone index of the highest zone
  3239. *
  3240. * nr_free_zone_pages() counts the number of counts pages which are beyond the
  3241. * high watermark within all zones at or below a given zone index. For each
  3242. * zone, the number of pages is calculated as:
  3243. * managed_pages - high_pages
  3244. */
  3245. static unsigned long nr_free_zone_pages(int offset)
  3246. {
  3247. struct zoneref *z;
  3248. struct zone *zone;
  3249. /* Just pick one node, since fallback list is circular */
  3250. unsigned long sum = 0;
  3251. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  3252. for_each_zone_zonelist(zone, z, zonelist, offset) {
  3253. unsigned long size = zone->managed_pages;
  3254. unsigned long high = high_wmark_pages(zone);
  3255. if (size > high)
  3256. sum += size - high;
  3257. }
  3258. return sum;
  3259. }
  3260. /**
  3261. * nr_free_buffer_pages - count number of pages beyond high watermark
  3262. *
  3263. * nr_free_buffer_pages() counts the number of pages which are beyond the high
  3264. * watermark within ZONE_DMA and ZONE_NORMAL.
  3265. */
  3266. unsigned long nr_free_buffer_pages(void)
  3267. {
  3268. return nr_free_zone_pages(gfp_zone(GFP_USER));
  3269. }
  3270. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  3271. /**
  3272. * nr_free_pagecache_pages - count number of pages beyond high watermark
  3273. *
  3274. * nr_free_pagecache_pages() counts the number of pages which are beyond the
  3275. * high watermark within all zones.
  3276. */
  3277. unsigned long nr_free_pagecache_pages(void)
  3278. {
  3279. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  3280. }
  3281. static inline void show_node(struct zone *zone)
  3282. {
  3283. if (IS_ENABLED(CONFIG_NUMA))
  3284. printk("Node %d ", zone_to_nid(zone));
  3285. }
  3286. long si_mem_available(void)
  3287. {
  3288. long available;
  3289. unsigned long pagecache;
  3290. unsigned long wmark_low = 0;
  3291. unsigned long pages[NR_LRU_LISTS];
  3292. struct zone *zone;
  3293. int lru;
  3294. for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
  3295. pages[lru] = global_page_state(NR_LRU_BASE + lru);
  3296. for_each_zone(zone)
  3297. wmark_low += zone->watermark[WMARK_LOW];
  3298. /*
  3299. * Estimate the amount of memory available for userspace allocations,
  3300. * without causing swapping.
  3301. */
  3302. available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
  3303. /*
  3304. * Not all the page cache can be freed, otherwise the system will
  3305. * start swapping. Assume at least half of the page cache, or the
  3306. * low watermark worth of cache, needs to stay.
  3307. */
  3308. pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
  3309. pagecache -= min(pagecache / 2, wmark_low);
  3310. available += pagecache;
  3311. /*
  3312. * Part of the reclaimable slab consists of items that are in use,
  3313. * and cannot be freed. Cap this estimate at the low watermark.
  3314. */
  3315. available += global_page_state(NR_SLAB_RECLAIMABLE) -
  3316. min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
  3317. if (available < 0)
  3318. available = 0;
  3319. return available;
  3320. }
  3321. EXPORT_SYMBOL_GPL(si_mem_available);
  3322. void si_meminfo(struct sysinfo *val)
  3323. {
  3324. val->totalram = totalram_pages;
  3325. val->sharedram = global_page_state(NR_SHMEM);
  3326. val->freeram = global_page_state(NR_FREE_PAGES);
  3327. val->bufferram = nr_blockdev_pages();
  3328. val->totalhigh = totalhigh_pages;
  3329. val->freehigh = nr_free_highpages();
  3330. val->mem_unit = PAGE_SIZE;
  3331. }
  3332. EXPORT_SYMBOL(si_meminfo);
  3333. #ifdef CONFIG_NUMA
  3334. void si_meminfo_node(struct sysinfo *val, int nid)
  3335. {
  3336. int zone_type; /* needs to be signed */
  3337. unsigned long managed_pages = 0;
  3338. unsigned long managed_highpages = 0;
  3339. unsigned long free_highpages = 0;
  3340. pg_data_t *pgdat = NODE_DATA(nid);
  3341. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
  3342. managed_pages += pgdat->node_zones[zone_type].managed_pages;
  3343. val->totalram = managed_pages;
  3344. val->sharedram = node_page_state(nid, NR_SHMEM);
  3345. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  3346. #ifdef CONFIG_HIGHMEM
  3347. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  3348. struct zone *zone = &pgdat->node_zones[zone_type];
  3349. if (is_highmem(zone)) {
  3350. managed_highpages += zone->managed_pages;
  3351. free_highpages += zone_page_state(zone, NR_FREE_PAGES);
  3352. }
  3353. }
  3354. val->totalhigh = managed_highpages;
  3355. val->freehigh = free_highpages;
  3356. #else
  3357. val->totalhigh = managed_highpages;
  3358. val->freehigh = free_highpages;
  3359. #endif
  3360. val->mem_unit = PAGE_SIZE;
  3361. }
  3362. #endif
  3363. /*
  3364. * Determine whether the node should be displayed or not, depending on whether
  3365. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  3366. */
  3367. bool skip_free_areas_node(unsigned int flags, int nid)
  3368. {
  3369. bool ret = false;
  3370. unsigned int cpuset_mems_cookie;
  3371. if (!(flags & SHOW_MEM_FILTER_NODES))
  3372. goto out;
  3373. do {
  3374. cpuset_mems_cookie = read_mems_allowed_begin();
  3375. ret = !node_isset(nid, cpuset_current_mems_allowed);
  3376. } while (read_mems_allowed_retry(cpuset_mems_cookie));
  3377. out:
  3378. return ret;
  3379. }
  3380. #define K(x) ((x) << (PAGE_SHIFT-10))
  3381. static void show_migration_types(unsigned char type)
  3382. {
  3383. static const char types[MIGRATE_TYPES] = {
  3384. [MIGRATE_UNMOVABLE] = 'U',
  3385. [MIGRATE_MOVABLE] = 'M',
  3386. [MIGRATE_RECLAIMABLE] = 'E',
  3387. [MIGRATE_HIGHATOMIC] = 'H',
  3388. #ifdef CONFIG_CMA
  3389. [MIGRATE_CMA] = 'C',
  3390. #endif
  3391. #ifdef CONFIG_MEMORY_ISOLATION
  3392. [MIGRATE_ISOLATE] = 'I',
  3393. #endif
  3394. };
  3395. char tmp[MIGRATE_TYPES + 1];
  3396. char *p = tmp;
  3397. int i;
  3398. for (i = 0; i < MIGRATE_TYPES; i++) {
  3399. if (type & (1 << i))
  3400. *p++ = types[i];
  3401. }
  3402. *p = '\0';
  3403. printk("(%s) ", tmp);
  3404. }
  3405. /*
  3406. * Show free area list (used inside shift_scroll-lock stuff)
  3407. * We also calculate the percentage fragmentation. We do this by counting the
  3408. * memory on each free list with the exception of the first item on the list.
  3409. *
  3410. * Bits in @filter:
  3411. * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
  3412. * cpuset.
  3413. */
  3414. void show_free_areas(unsigned int filter)
  3415. {
  3416. unsigned long free_pcp = 0;
  3417. int cpu;
  3418. struct zone *zone;
  3419. for_each_populated_zone(zone) {
  3420. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  3421. continue;
  3422. for_each_online_cpu(cpu)
  3423. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  3424. }
  3425. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  3426. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  3427. " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
  3428. " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  3429. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
  3430. " free:%lu free_pcp:%lu free_cma:%lu\n",
  3431. global_page_state(NR_ACTIVE_ANON),
  3432. global_page_state(NR_INACTIVE_ANON),
  3433. global_page_state(NR_ISOLATED_ANON),
  3434. global_page_state(NR_ACTIVE_FILE),
  3435. global_page_state(NR_INACTIVE_FILE),
  3436. global_page_state(NR_ISOLATED_FILE),
  3437. global_page_state(NR_UNEVICTABLE),
  3438. global_page_state(NR_FILE_DIRTY),
  3439. global_page_state(NR_WRITEBACK),
  3440. global_page_state(NR_UNSTABLE_NFS),
  3441. global_page_state(NR_SLAB_RECLAIMABLE),
  3442. global_page_state(NR_SLAB_UNRECLAIMABLE),
  3443. global_page_state(NR_FILE_MAPPED),
  3444. global_page_state(NR_SHMEM),
  3445. global_page_state(NR_PAGETABLE),
  3446. global_page_state(NR_BOUNCE),
  3447. global_page_state(NR_FREE_PAGES),
  3448. free_pcp,
  3449. global_page_state(NR_FREE_CMA_PAGES));
  3450. for_each_populated_zone(zone) {
  3451. int i;
  3452. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  3453. continue;
  3454. free_pcp = 0;
  3455. for_each_online_cpu(cpu)
  3456. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  3457. show_node(zone);
  3458. printk("%s"
  3459. " free:%lukB"
  3460. " min:%lukB"
  3461. " low:%lukB"
  3462. " high:%lukB"
  3463. " active_anon:%lukB"
  3464. " inactive_anon:%lukB"
  3465. " active_file:%lukB"
  3466. " inactive_file:%lukB"
  3467. " unevictable:%lukB"
  3468. " isolated(anon):%lukB"
  3469. " isolated(file):%lukB"
  3470. " present:%lukB"
  3471. " managed:%lukB"
  3472. " mlocked:%lukB"
  3473. " dirty:%lukB"
  3474. " writeback:%lukB"
  3475. " mapped:%lukB"
  3476. " shmem:%lukB"
  3477. " slab_reclaimable:%lukB"
  3478. " slab_unreclaimable:%lukB"
  3479. " kernel_stack:%lukB"
  3480. " pagetables:%lukB"
  3481. " unstable:%lukB"
  3482. " bounce:%lukB"
  3483. " free_pcp:%lukB"
  3484. " local_pcp:%ukB"
  3485. " free_cma:%lukB"
  3486. " writeback_tmp:%lukB"
  3487. " pages_scanned:%lu"
  3488. " all_unreclaimable? %s"
  3489. "\n",
  3490. zone->name,
  3491. K(zone_page_state(zone, NR_FREE_PAGES)),
  3492. K(min_wmark_pages(zone)),
  3493. K(low_wmark_pages(zone)),
  3494. K(high_wmark_pages(zone)),
  3495. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  3496. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  3497. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  3498. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  3499. K(zone_page_state(zone, NR_UNEVICTABLE)),
  3500. K(zone_page_state(zone, NR_ISOLATED_ANON)),
  3501. K(zone_page_state(zone, NR_ISOLATED_FILE)),
  3502. K(zone->present_pages),
  3503. K(zone->managed_pages),
  3504. K(zone_page_state(zone, NR_MLOCK)),
  3505. K(zone_page_state(zone, NR_FILE_DIRTY)),
  3506. K(zone_page_state(zone, NR_WRITEBACK)),
  3507. K(zone_page_state(zone, NR_FILE_MAPPED)),
  3508. K(zone_page_state(zone, NR_SHMEM)),
  3509. K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
  3510. K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
  3511. zone_page_state(zone, NR_KERNEL_STACK) *
  3512. THREAD_SIZE / 1024,
  3513. K(zone_page_state(zone, NR_PAGETABLE)),
  3514. K(zone_page_state(zone, NR_UNSTABLE_NFS)),
  3515. K(zone_page_state(zone, NR_BOUNCE)),
  3516. K(free_pcp),
  3517. K(this_cpu_read(zone->pageset->pcp.count)),
  3518. K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
  3519. K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
  3520. K(zone_page_state(zone, NR_PAGES_SCANNED)),
  3521. (!zone_reclaimable(zone) ? "yes" : "no")
  3522. );
  3523. printk("lowmem_reserve[]:");
  3524. for (i = 0; i < MAX_NR_ZONES; i++)
  3525. printk(" %ld", zone->lowmem_reserve[i]);
  3526. printk("\n");
  3527. }
  3528. for_each_populated_zone(zone) {
  3529. unsigned int order;
  3530. unsigned long nr[MAX_ORDER], flags, total = 0;
  3531. unsigned char types[MAX_ORDER];
  3532. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  3533. continue;
  3534. show_node(zone);
  3535. printk("%s: ", zone->name);
  3536. spin_lock_irqsave(&zone->lock, flags);
  3537. for (order = 0; order < MAX_ORDER; order++) {
  3538. struct free_area *area = &zone->free_area[order];
  3539. int type;
  3540. nr[order] = area->nr_free;
  3541. total += nr[order] << order;
  3542. types[order] = 0;
  3543. for (type = 0; type < MIGRATE_TYPES; type++) {
  3544. if (!list_empty(&area->free_list[type]))
  3545. types[order] |= 1 << type;
  3546. }
  3547. }
  3548. spin_unlock_irqrestore(&zone->lock, flags);
  3549. for (order = 0; order < MAX_ORDER; order++) {
  3550. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  3551. if (nr[order])
  3552. show_migration_types(types[order]);
  3553. }
  3554. printk("= %lukB\n", K(total));
  3555. }
  3556. hugetlb_show_meminfo();
  3557. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  3558. show_swap_cache_info();
  3559. }
  3560. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  3561. {
  3562. zoneref->zone = zone;
  3563. zoneref->zone_idx = zone_idx(zone);
  3564. }
  3565. /*
  3566. * Builds allocation fallback zone lists.
  3567. *
  3568. * Add all populated zones of a node to the zonelist.
  3569. */
  3570. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  3571. int nr_zones)
  3572. {
  3573. struct zone *zone;
  3574. enum zone_type zone_type = MAX_NR_ZONES;
  3575. do {
  3576. zone_type--;
  3577. zone = pgdat->node_zones + zone_type;
  3578. if (populated_zone(zone)) {
  3579. zoneref_set_zone(zone,
  3580. &zonelist->_zonerefs[nr_zones++]);
  3581. check_highest_zone(zone_type);
  3582. }
  3583. } while (zone_type);
  3584. return nr_zones;
  3585. }
  3586. /*
  3587. * zonelist_order:
  3588. * 0 = automatic detection of better ordering.
  3589. * 1 = order by ([node] distance, -zonetype)
  3590. * 2 = order by (-zonetype, [node] distance)
  3591. *
  3592. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  3593. * the same zonelist. So only NUMA can configure this param.
  3594. */
  3595. #define ZONELIST_ORDER_DEFAULT 0
  3596. #define ZONELIST_ORDER_NODE 1
  3597. #define ZONELIST_ORDER_ZONE 2
  3598. /* zonelist order in the kernel.
  3599. * set_zonelist_order() will set this to NODE or ZONE.
  3600. */
  3601. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  3602. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  3603. #ifdef CONFIG_NUMA
  3604. /* The value user specified ....changed by config */
  3605. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  3606. /* string for sysctl */
  3607. #define NUMA_ZONELIST_ORDER_LEN 16
  3608. char numa_zonelist_order[16] = "default";
  3609. /*
  3610. * interface for configure zonelist ordering.
  3611. * command line option "numa_zonelist_order"
  3612. * = "[dD]efault - default, automatic configuration.
  3613. * = "[nN]ode - order by node locality, then by zone within node
  3614. * = "[zZ]one - order by zone, then by locality within zone
  3615. */
  3616. static int __parse_numa_zonelist_order(char *s)
  3617. {
  3618. if (*s == 'd' || *s == 'D') {
  3619. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  3620. } else if (*s == 'n' || *s == 'N') {
  3621. user_zonelist_order = ZONELIST_ORDER_NODE;
  3622. } else if (*s == 'z' || *s == 'Z') {
  3623. user_zonelist_order = ZONELIST_ORDER_ZONE;
  3624. } else {
  3625. pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
  3626. return -EINVAL;
  3627. }
  3628. return 0;
  3629. }
  3630. static __init int setup_numa_zonelist_order(char *s)
  3631. {
  3632. int ret;
  3633. if (!s)
  3634. return 0;
  3635. ret = __parse_numa_zonelist_order(s);
  3636. if (ret == 0)
  3637. strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
  3638. return ret;
  3639. }
  3640. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  3641. /*
  3642. * sysctl handler for numa_zonelist_order
  3643. */
  3644. int numa_zonelist_order_handler(struct ctl_table *table, int write,
  3645. void __user *buffer, size_t *length,
  3646. loff_t *ppos)
  3647. {
  3648. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  3649. int ret;
  3650. static DEFINE_MUTEX(zl_order_mutex);
  3651. mutex_lock(&zl_order_mutex);
  3652. if (write) {
  3653. if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
  3654. ret = -EINVAL;
  3655. goto out;
  3656. }
  3657. strcpy(saved_string, (char *)table->data);
  3658. }
  3659. ret = proc_dostring(table, write, buffer, length, ppos);
  3660. if (ret)
  3661. goto out;
  3662. if (write) {
  3663. int oldval = user_zonelist_order;
  3664. ret = __parse_numa_zonelist_order((char *)table->data);
  3665. if (ret) {
  3666. /*
  3667. * bogus value. restore saved string
  3668. */
  3669. strncpy((char *)table->data, saved_string,
  3670. NUMA_ZONELIST_ORDER_LEN);
  3671. user_zonelist_order = oldval;
  3672. } else if (oldval != user_zonelist_order) {
  3673. mutex_lock(&zonelists_mutex);
  3674. build_all_zonelists(NULL, NULL);
  3675. mutex_unlock(&zonelists_mutex);
  3676. }
  3677. }
  3678. out:
  3679. mutex_unlock(&zl_order_mutex);
  3680. return ret;
  3681. }
  3682. #define MAX_NODE_LOAD (nr_online_nodes)
  3683. static int node_load[MAX_NUMNODES];
  3684. /**
  3685. * find_next_best_node - find the next node that should appear in a given node's fallback list
  3686. * @node: node whose fallback list we're appending
  3687. * @used_node_mask: nodemask_t of already used nodes
  3688. *
  3689. * We use a number of factors to determine which is the next node that should
  3690. * appear on a given node's fallback list. The node should not have appeared
  3691. * already in @node's fallback list, and it should be the next closest node
  3692. * according to the distance array (which contains arbitrary distance values
  3693. * from each node to each node in the system), and should also prefer nodes
  3694. * with no CPUs, since presumably they'll have very little allocation pressure
  3695. * on them otherwise.
  3696. * It returns -1 if no node is found.
  3697. */
  3698. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  3699. {
  3700. int n, val;
  3701. int min_val = INT_MAX;
  3702. int best_node = NUMA_NO_NODE;
  3703. const struct cpumask *tmp = cpumask_of_node(0);
  3704. /* Use the local node if we haven't already */
  3705. if (!node_isset(node, *used_node_mask)) {
  3706. node_set(node, *used_node_mask);
  3707. return node;
  3708. }
  3709. for_each_node_state(n, N_MEMORY) {
  3710. /* Don't want a node to appear more than once */
  3711. if (node_isset(n, *used_node_mask))
  3712. continue;
  3713. /* Use the distance array to find the distance */
  3714. val = node_distance(node, n);
  3715. /* Penalize nodes under us ("prefer the next node") */
  3716. val += (n < node);
  3717. /* Give preference to headless and unused nodes */
  3718. tmp = cpumask_of_node(n);
  3719. if (!cpumask_empty(tmp))
  3720. val += PENALTY_FOR_NODE_WITH_CPUS;
  3721. /* Slight preference for less loaded node */
  3722. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  3723. val += node_load[n];
  3724. if (val < min_val) {
  3725. min_val = val;
  3726. best_node = n;
  3727. }
  3728. }
  3729. if (best_node >= 0)
  3730. node_set(best_node, *used_node_mask);
  3731. return best_node;
  3732. }
  3733. /*
  3734. * Build zonelists ordered by node and zones within node.
  3735. * This results in maximum locality--normal zone overflows into local
  3736. * DMA zone, if any--but risks exhausting DMA zone.
  3737. */
  3738. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  3739. {
  3740. int j;
  3741. struct zonelist *zonelist;
  3742. zonelist = &pgdat->node_zonelists[0];
  3743. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  3744. ;
  3745. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3746. zonelist->_zonerefs[j].zone = NULL;
  3747. zonelist->_zonerefs[j].zone_idx = 0;
  3748. }
  3749. /*
  3750. * Build gfp_thisnode zonelists
  3751. */
  3752. static void build_thisnode_zonelists(pg_data_t *pgdat)
  3753. {
  3754. int j;
  3755. struct zonelist *zonelist;
  3756. zonelist = &pgdat->node_zonelists[1];
  3757. j = build_zonelists_node(pgdat, zonelist, 0);
  3758. zonelist->_zonerefs[j].zone = NULL;
  3759. zonelist->_zonerefs[j].zone_idx = 0;
  3760. }
  3761. /*
  3762. * Build zonelists ordered by zone and nodes within zones.
  3763. * This results in conserving DMA zone[s] until all Normal memory is
  3764. * exhausted, but results in overflowing to remote node while memory
  3765. * may still exist in local DMA zone.
  3766. */
  3767. static int node_order[MAX_NUMNODES];
  3768. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  3769. {
  3770. int pos, j, node;
  3771. int zone_type; /* needs to be signed */
  3772. struct zone *z;
  3773. struct zonelist *zonelist;
  3774. zonelist = &pgdat->node_zonelists[0];
  3775. pos = 0;
  3776. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  3777. for (j = 0; j < nr_nodes; j++) {
  3778. node = node_order[j];
  3779. z = &NODE_DATA(node)->node_zones[zone_type];
  3780. if (populated_zone(z)) {
  3781. zoneref_set_zone(z,
  3782. &zonelist->_zonerefs[pos++]);
  3783. check_highest_zone(zone_type);
  3784. }
  3785. }
  3786. }
  3787. zonelist->_zonerefs[pos].zone = NULL;
  3788. zonelist->_zonerefs[pos].zone_idx = 0;
  3789. }
  3790. #if defined(CONFIG_64BIT)
  3791. /*
  3792. * Devices that require DMA32/DMA are relatively rare and do not justify a
  3793. * penalty to every machine in case the specialised case applies. Default
  3794. * to Node-ordering on 64-bit NUMA machines
  3795. */
  3796. static int default_zonelist_order(void)
  3797. {
  3798. return ZONELIST_ORDER_NODE;
  3799. }
  3800. #else
  3801. /*
  3802. * On 32-bit, the Normal zone needs to be preserved for allocations accessible
  3803. * by the kernel. If processes running on node 0 deplete the low memory zone
  3804. * then reclaim will occur more frequency increasing stalls and potentially
  3805. * be easier to OOM if a large percentage of the zone is under writeback or
  3806. * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
  3807. * Hence, default to zone ordering on 32-bit.
  3808. */
  3809. static int default_zonelist_order(void)
  3810. {
  3811. return ZONELIST_ORDER_ZONE;
  3812. }
  3813. #endif /* CONFIG_64BIT */
  3814. static void set_zonelist_order(void)
  3815. {
  3816. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  3817. current_zonelist_order = default_zonelist_order();
  3818. else
  3819. current_zonelist_order = user_zonelist_order;
  3820. }
  3821. static void build_zonelists(pg_data_t *pgdat)
  3822. {
  3823. int i, node, load;
  3824. nodemask_t used_mask;
  3825. int local_node, prev_node;
  3826. struct zonelist *zonelist;
  3827. unsigned int order = current_zonelist_order;
  3828. /* initialize zonelists */
  3829. for (i = 0; i < MAX_ZONELISTS; i++) {
  3830. zonelist = pgdat->node_zonelists + i;
  3831. zonelist->_zonerefs[0].zone = NULL;
  3832. zonelist->_zonerefs[0].zone_idx = 0;
  3833. }
  3834. /* NUMA-aware ordering of nodes */
  3835. local_node = pgdat->node_id;
  3836. load = nr_online_nodes;
  3837. prev_node = local_node;
  3838. nodes_clear(used_mask);
  3839. memset(node_order, 0, sizeof(node_order));
  3840. i = 0;
  3841. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  3842. /*
  3843. * We don't want to pressure a particular node.
  3844. * So adding penalty to the first node in same
  3845. * distance group to make it round-robin.
  3846. */
  3847. if (node_distance(local_node, node) !=
  3848. node_distance(local_node, prev_node))
  3849. node_load[node] = load;
  3850. prev_node = node;
  3851. load--;
  3852. if (order == ZONELIST_ORDER_NODE)
  3853. build_zonelists_in_node_order(pgdat, node);
  3854. else
  3855. node_order[i++] = node; /* remember order */
  3856. }
  3857. if (order == ZONELIST_ORDER_ZONE) {
  3858. /* calculate node order -- i.e., DMA last! */
  3859. build_zonelists_in_zone_order(pgdat, i);
  3860. }
  3861. build_thisnode_zonelists(pgdat);
  3862. }
  3863. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3864. /*
  3865. * Return node id of node used for "local" allocations.
  3866. * I.e., first node id of first zone in arg node's generic zonelist.
  3867. * Used for initializing percpu 'numa_mem', which is used primarily
  3868. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  3869. */
  3870. int local_memory_node(int node)
  3871. {
  3872. struct zoneref *z;
  3873. z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  3874. gfp_zone(GFP_KERNEL),
  3875. NULL);
  3876. return z->zone->node;
  3877. }
  3878. #endif
  3879. #else /* CONFIG_NUMA */
  3880. static void set_zonelist_order(void)
  3881. {
  3882. current_zonelist_order = ZONELIST_ORDER_ZONE;
  3883. }
  3884. static void build_zonelists(pg_data_t *pgdat)
  3885. {
  3886. int node, local_node;
  3887. enum zone_type j;
  3888. struct zonelist *zonelist;
  3889. local_node = pgdat->node_id;
  3890. zonelist = &pgdat->node_zonelists[0];
  3891. j = build_zonelists_node(pgdat, zonelist, 0);
  3892. /*
  3893. * Now we build the zonelist so that it contains the zones
  3894. * of all the other nodes.
  3895. * We don't want to pressure a particular node, so when
  3896. * building the zones for node N, we make sure that the
  3897. * zones coming right after the local ones are those from
  3898. * node N+1 (modulo N)
  3899. */
  3900. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  3901. if (!node_online(node))
  3902. continue;
  3903. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3904. }
  3905. for (node = 0; node < local_node; node++) {
  3906. if (!node_online(node))
  3907. continue;
  3908. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3909. }
  3910. zonelist->_zonerefs[j].zone = NULL;
  3911. zonelist->_zonerefs[j].zone_idx = 0;
  3912. }
  3913. #endif /* CONFIG_NUMA */
  3914. /*
  3915. * Boot pageset table. One per cpu which is going to be used for all
  3916. * zones and all nodes. The parameters will be set in such a way
  3917. * that an item put on a list will immediately be handed over to
  3918. * the buddy list. This is safe since pageset manipulation is done
  3919. * with interrupts disabled.
  3920. *
  3921. * The boot_pagesets must be kept even after bootup is complete for
  3922. * unused processors and/or zones. They do play a role for bootstrapping
  3923. * hotplugged processors.
  3924. *
  3925. * zoneinfo_show() and maybe other functions do
  3926. * not check if the processor is online before following the pageset pointer.
  3927. * Other parts of the kernel may not check if the zone is available.
  3928. */
  3929. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  3930. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  3931. static void setup_zone_pageset(struct zone *zone);
  3932. /*
  3933. * Global mutex to protect against size modification of zonelists
  3934. * as well as to serialize pageset setup for the new populated zone.
  3935. */
  3936. DEFINE_MUTEX(zonelists_mutex);
  3937. /* return values int ....just for stop_machine() */
  3938. static int __build_all_zonelists(void *data)
  3939. {
  3940. int nid;
  3941. int cpu;
  3942. pg_data_t *self = data;
  3943. #ifdef CONFIG_NUMA
  3944. memset(node_load, 0, sizeof(node_load));
  3945. #endif
  3946. if (self && !node_online(self->node_id)) {
  3947. build_zonelists(self);
  3948. }
  3949. for_each_online_node(nid) {
  3950. pg_data_t *pgdat = NODE_DATA(nid);
  3951. build_zonelists(pgdat);
  3952. }
  3953. /*
  3954. * Initialize the boot_pagesets that are going to be used
  3955. * for bootstrapping processors. The real pagesets for
  3956. * each zone will be allocated later when the per cpu
  3957. * allocator is available.
  3958. *
  3959. * boot_pagesets are used also for bootstrapping offline
  3960. * cpus if the system is already booted because the pagesets
  3961. * are needed to initialize allocators on a specific cpu too.
  3962. * F.e. the percpu allocator needs the page allocator which
  3963. * needs the percpu allocator in order to allocate its pagesets
  3964. * (a chicken-egg dilemma).
  3965. */
  3966. for_each_possible_cpu(cpu) {
  3967. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  3968. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3969. /*
  3970. * We now know the "local memory node" for each node--
  3971. * i.e., the node of the first zone in the generic zonelist.
  3972. * Set up numa_mem percpu variable for on-line cpus. During
  3973. * boot, only the boot cpu should be on-line; we'll init the
  3974. * secondary cpus' numa_mem as they come on-line. During
  3975. * node/memory hotplug, we'll fixup all on-line cpus.
  3976. */
  3977. if (cpu_online(cpu))
  3978. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  3979. #endif
  3980. }
  3981. return 0;
  3982. }
  3983. static noinline void __init
  3984. build_all_zonelists_init(void)
  3985. {
  3986. __build_all_zonelists(NULL);
  3987. mminit_verify_zonelist();
  3988. cpuset_init_current_mems_allowed();
  3989. }
  3990. /*
  3991. * Called with zonelists_mutex held always
  3992. * unless system_state == SYSTEM_BOOTING.
  3993. *
  3994. * __ref due to (1) call of __meminit annotated setup_zone_pageset
  3995. * [we're only called with non-NULL zone through __meminit paths] and
  3996. * (2) call of __init annotated helper build_all_zonelists_init
  3997. * [protected by SYSTEM_BOOTING].
  3998. */
  3999. void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
  4000. {
  4001. set_zonelist_order();
  4002. if (system_state == SYSTEM_BOOTING) {
  4003. build_all_zonelists_init();
  4004. } else {
  4005. #ifdef CONFIG_MEMORY_HOTPLUG
  4006. if (zone)
  4007. setup_zone_pageset(zone);
  4008. #endif
  4009. /* we have to stop all cpus to guarantee there is no user
  4010. of zonelist */
  4011. stop_machine(__build_all_zonelists, pgdat, NULL);
  4012. /* cpuset refresh routine should be here */
  4013. }
  4014. vm_total_pages = nr_free_pagecache_pages();
  4015. /*
  4016. * Disable grouping by mobility if the number of pages in the
  4017. * system is too low to allow the mechanism to work. It would be
  4018. * more accurate, but expensive to check per-zone. This check is
  4019. * made on memory-hotadd so a system can start with mobility
  4020. * disabled and enable it later
  4021. */
  4022. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  4023. page_group_by_mobility_disabled = 1;
  4024. else
  4025. page_group_by_mobility_disabled = 0;
  4026. pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
  4027. nr_online_nodes,
  4028. zonelist_order_name[current_zonelist_order],
  4029. page_group_by_mobility_disabled ? "off" : "on",
  4030. vm_total_pages);
  4031. #ifdef CONFIG_NUMA
  4032. pr_info("Policy zone: %s\n", zone_names[policy_zone]);
  4033. #endif
  4034. }
  4035. /*
  4036. * Helper functions to size the waitqueue hash table.
  4037. * Essentially these want to choose hash table sizes sufficiently
  4038. * large so that collisions trying to wait on pages are rare.
  4039. * But in fact, the number of active page waitqueues on typical
  4040. * systems is ridiculously low, less than 200. So this is even
  4041. * conservative, even though it seems large.
  4042. *
  4043. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  4044. * waitqueues, i.e. the size of the waitq table given the number of pages.
  4045. */
  4046. #define PAGES_PER_WAITQUEUE 256
  4047. #ifndef CONFIG_MEMORY_HOTPLUG
  4048. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  4049. {
  4050. unsigned long size = 1;
  4051. pages /= PAGES_PER_WAITQUEUE;
  4052. while (size < pages)
  4053. size <<= 1;
  4054. /*
  4055. * Once we have dozens or even hundreds of threads sleeping
  4056. * on IO we've got bigger problems than wait queue collision.
  4057. * Limit the size of the wait table to a reasonable size.
  4058. */
  4059. size = min(size, 4096UL);
  4060. return max(size, 4UL);
  4061. }
  4062. #else
  4063. /*
  4064. * A zone's size might be changed by hot-add, so it is not possible to determine
  4065. * a suitable size for its wait_table. So we use the maximum size now.
  4066. *
  4067. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  4068. *
  4069. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  4070. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  4071. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  4072. *
  4073. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  4074. * or more by the traditional way. (See above). It equals:
  4075. *
  4076. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  4077. * ia64(16K page size) : = ( 8G + 4M)byte.
  4078. * powerpc (64K page size) : = (32G +16M)byte.
  4079. */
  4080. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  4081. {
  4082. return 4096UL;
  4083. }
  4084. #endif
  4085. /*
  4086. * This is an integer logarithm so that shifts can be used later
  4087. * to extract the more random high bits from the multiplicative
  4088. * hash function before the remainder is taken.
  4089. */
  4090. static inline unsigned long wait_table_bits(unsigned long size)
  4091. {
  4092. return ffz(~size);
  4093. }
  4094. /*
  4095. * Initially all pages are reserved - free ones are freed
  4096. * up by free_all_bootmem() once the early boot process is
  4097. * done. Non-atomic initialization, single-pass.
  4098. */
  4099. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  4100. unsigned long start_pfn, enum memmap_context context)
  4101. {
  4102. struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
  4103. unsigned long end_pfn = start_pfn + size;
  4104. pg_data_t *pgdat = NODE_DATA(nid);
  4105. unsigned long pfn;
  4106. unsigned long nr_initialised = 0;
  4107. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4108. struct memblock_region *r = NULL, *tmp;
  4109. #endif
  4110. if (highest_memmap_pfn < end_pfn - 1)
  4111. highest_memmap_pfn = end_pfn - 1;
  4112. /*
  4113. * Honor reservation requested by the driver for this ZONE_DEVICE
  4114. * memory
  4115. */
  4116. if (altmap && start_pfn == altmap->base_pfn)
  4117. start_pfn += altmap->reserve;
  4118. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  4119. /*
  4120. * There can be holes in boot-time mem_map[]s handed to this
  4121. * function. They do not exist on hotplugged memory.
  4122. */
  4123. if (context != MEMMAP_EARLY)
  4124. goto not_early;
  4125. if (!early_pfn_valid(pfn))
  4126. continue;
  4127. if (!early_pfn_in_nid(pfn, nid))
  4128. continue;
  4129. if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
  4130. break;
  4131. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4132. /*
  4133. * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
  4134. * from zone_movable_pfn[nid] to end of each node should be
  4135. * ZONE_MOVABLE not ZONE_NORMAL. skip it.
  4136. */
  4137. if (!mirrored_kernelcore && zone_movable_pfn[nid])
  4138. if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
  4139. continue;
  4140. /*
  4141. * Check given memblock attribute by firmware which can affect
  4142. * kernel memory layout. If zone==ZONE_MOVABLE but memory is
  4143. * mirrored, it's an overlapped memmap init. skip it.
  4144. */
  4145. if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
  4146. if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
  4147. for_each_memblock(memory, tmp)
  4148. if (pfn < memblock_region_memory_end_pfn(tmp))
  4149. break;
  4150. r = tmp;
  4151. }
  4152. if (pfn >= memblock_region_memory_base_pfn(r) &&
  4153. memblock_is_mirror(r)) {
  4154. /* already initialized as NORMAL */
  4155. pfn = memblock_region_memory_end_pfn(r);
  4156. continue;
  4157. }
  4158. }
  4159. #endif
  4160. not_early:
  4161. /*
  4162. * Mark the block movable so that blocks are reserved for
  4163. * movable at startup. This will force kernel allocations
  4164. * to reserve their blocks rather than leaking throughout
  4165. * the address space during boot when many long-lived
  4166. * kernel allocations are made.
  4167. *
  4168. * bitmap is created for zone's valid pfn range. but memmap
  4169. * can be created for invalid pages (for alignment)
  4170. * check here not to call set_pageblock_migratetype() against
  4171. * pfn out of zone.
  4172. */
  4173. if (!(pfn & (pageblock_nr_pages - 1))) {
  4174. struct page *page = pfn_to_page(pfn);
  4175. __init_single_page(page, pfn, zone, nid);
  4176. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4177. } else {
  4178. __init_single_pfn(pfn, zone, nid);
  4179. }
  4180. }
  4181. }
  4182. static void __meminit zone_init_free_lists(struct zone *zone)
  4183. {
  4184. unsigned int order, t;
  4185. for_each_migratetype_order(order, t) {
  4186. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  4187. zone->free_area[order].nr_free = 0;
  4188. }
  4189. }
  4190. #ifndef __HAVE_ARCH_MEMMAP_INIT
  4191. #define memmap_init(size, nid, zone, start_pfn) \
  4192. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  4193. #endif
  4194. static int zone_batchsize(struct zone *zone)
  4195. {
  4196. #ifdef CONFIG_MMU
  4197. int batch;
  4198. /*
  4199. * The per-cpu-pages pools are set to around 1000th of the
  4200. * size of the zone. But no more than 1/2 of a meg.
  4201. *
  4202. * OK, so we don't know how big the cache is. So guess.
  4203. */
  4204. batch = zone->managed_pages / 1024;
  4205. if (batch * PAGE_SIZE > 512 * 1024)
  4206. batch = (512 * 1024) / PAGE_SIZE;
  4207. batch /= 4; /* We effectively *= 4 below */
  4208. if (batch < 1)
  4209. batch = 1;
  4210. /*
  4211. * Clamp the batch to a 2^n - 1 value. Having a power
  4212. * of 2 value was found to be more likely to have
  4213. * suboptimal cache aliasing properties in some cases.
  4214. *
  4215. * For example if 2 tasks are alternately allocating
  4216. * batches of pages, one task can end up with a lot
  4217. * of pages of one half of the possible page colors
  4218. * and the other with pages of the other colors.
  4219. */
  4220. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  4221. return batch;
  4222. #else
  4223. /* The deferral and batching of frees should be suppressed under NOMMU
  4224. * conditions.
  4225. *
  4226. * The problem is that NOMMU needs to be able to allocate large chunks
  4227. * of contiguous memory as there's no hardware page translation to
  4228. * assemble apparent contiguous memory from discontiguous pages.
  4229. *
  4230. * Queueing large contiguous runs of pages for batching, however,
  4231. * causes the pages to actually be freed in smaller chunks. As there
  4232. * can be a significant delay between the individual batches being
  4233. * recycled, this leads to the once large chunks of space being
  4234. * fragmented and becoming unavailable for high-order allocations.
  4235. */
  4236. return 0;
  4237. #endif
  4238. }
  4239. /*
  4240. * pcp->high and pcp->batch values are related and dependent on one another:
  4241. * ->batch must never be higher then ->high.
  4242. * The following function updates them in a safe manner without read side
  4243. * locking.
  4244. *
  4245. * Any new users of pcp->batch and pcp->high should ensure they can cope with
  4246. * those fields changing asynchronously (acording the the above rule).
  4247. *
  4248. * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  4249. * outside of boot time (or some other assurance that no concurrent updaters
  4250. * exist).
  4251. */
  4252. static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
  4253. unsigned long batch)
  4254. {
  4255. /* start with a fail safe value for batch */
  4256. pcp->batch = 1;
  4257. smp_wmb();
  4258. /* Update high, then batch, in order */
  4259. pcp->high = high;
  4260. smp_wmb();
  4261. pcp->batch = batch;
  4262. }
  4263. /* a companion to pageset_set_high() */
  4264. static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
  4265. {
  4266. pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
  4267. }
  4268. static void pageset_init(struct per_cpu_pageset *p)
  4269. {
  4270. struct per_cpu_pages *pcp;
  4271. int migratetype;
  4272. memset(p, 0, sizeof(*p));
  4273. pcp = &p->pcp;
  4274. pcp->count = 0;
  4275. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  4276. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  4277. }
  4278. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  4279. {
  4280. pageset_init(p);
  4281. pageset_set_batch(p, batch);
  4282. }
  4283. /*
  4284. * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
  4285. * to the value high for the pageset p.
  4286. */
  4287. static void pageset_set_high(struct per_cpu_pageset *p,
  4288. unsigned long high)
  4289. {
  4290. unsigned long batch = max(1UL, high / 4);
  4291. if ((high / 4) > (PAGE_SHIFT * 8))
  4292. batch = PAGE_SHIFT * 8;
  4293. pageset_update(&p->pcp, high, batch);
  4294. }
  4295. static void pageset_set_high_and_batch(struct zone *zone,
  4296. struct per_cpu_pageset *pcp)
  4297. {
  4298. if (percpu_pagelist_fraction)
  4299. pageset_set_high(pcp,
  4300. (zone->managed_pages /
  4301. percpu_pagelist_fraction));
  4302. else
  4303. pageset_set_batch(pcp, zone_batchsize(zone));
  4304. }
  4305. static void __meminit zone_pageset_init(struct zone *zone, int cpu)
  4306. {
  4307. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  4308. pageset_init(pcp);
  4309. pageset_set_high_and_batch(zone, pcp);
  4310. }
  4311. static void __meminit setup_zone_pageset(struct zone *zone)
  4312. {
  4313. int cpu;
  4314. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  4315. for_each_possible_cpu(cpu)
  4316. zone_pageset_init(zone, cpu);
  4317. }
  4318. /*
  4319. * Allocate per cpu pagesets and initialize them.
  4320. * Before this call only boot pagesets were available.
  4321. */
  4322. void __init setup_per_cpu_pageset(void)
  4323. {
  4324. struct zone *zone;
  4325. for_each_populated_zone(zone)
  4326. setup_zone_pageset(zone);
  4327. }
  4328. static noinline __init_refok
  4329. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  4330. {
  4331. int i;
  4332. size_t alloc_size;
  4333. /*
  4334. * The per-page waitqueue mechanism uses hashed waitqueues
  4335. * per zone.
  4336. */
  4337. zone->wait_table_hash_nr_entries =
  4338. wait_table_hash_nr_entries(zone_size_pages);
  4339. zone->wait_table_bits =
  4340. wait_table_bits(zone->wait_table_hash_nr_entries);
  4341. alloc_size = zone->wait_table_hash_nr_entries
  4342. * sizeof(wait_queue_head_t);
  4343. if (!slab_is_available()) {
  4344. zone->wait_table = (wait_queue_head_t *)
  4345. memblock_virt_alloc_node_nopanic(
  4346. alloc_size, zone->zone_pgdat->node_id);
  4347. } else {
  4348. /*
  4349. * This case means that a zone whose size was 0 gets new memory
  4350. * via memory hot-add.
  4351. * But it may be the case that a new node was hot-added. In
  4352. * this case vmalloc() will not be able to use this new node's
  4353. * memory - this wait_table must be initialized to use this new
  4354. * node itself as well.
  4355. * To use this new node's memory, further consideration will be
  4356. * necessary.
  4357. */
  4358. zone->wait_table = vmalloc(alloc_size);
  4359. }
  4360. if (!zone->wait_table)
  4361. return -ENOMEM;
  4362. for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  4363. init_waitqueue_head(zone->wait_table + i);
  4364. return 0;
  4365. }
  4366. static __meminit void zone_pcp_init(struct zone *zone)
  4367. {
  4368. /*
  4369. * per cpu subsystem is not up at this point. The following code
  4370. * relies on the ability of the linker to provide the
  4371. * offset of a (static) per cpu variable into the per cpu area.
  4372. */
  4373. zone->pageset = &boot_pageset;
  4374. if (populated_zone(zone))
  4375. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  4376. zone->name, zone->present_pages,
  4377. zone_batchsize(zone));
  4378. }
  4379. int __meminit init_currently_empty_zone(struct zone *zone,
  4380. unsigned long zone_start_pfn,
  4381. unsigned long size)
  4382. {
  4383. struct pglist_data *pgdat = zone->zone_pgdat;
  4384. int ret;
  4385. ret = zone_wait_table_init(zone, size);
  4386. if (ret)
  4387. return ret;
  4388. pgdat->nr_zones = zone_idx(zone) + 1;
  4389. zone->zone_start_pfn = zone_start_pfn;
  4390. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  4391. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  4392. pgdat->node_id,
  4393. (unsigned long)zone_idx(zone),
  4394. zone_start_pfn, (zone_start_pfn + size));
  4395. zone_init_free_lists(zone);
  4396. return 0;
  4397. }
  4398. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4399. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  4400. /*
  4401. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  4402. */
  4403. int __meminit __early_pfn_to_nid(unsigned long pfn,
  4404. struct mminit_pfnnid_cache *state)
  4405. {
  4406. unsigned long start_pfn, end_pfn;
  4407. int nid;
  4408. if (state->last_start <= pfn && pfn < state->last_end)
  4409. return state->last_nid;
  4410. nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
  4411. if (nid != -1) {
  4412. state->last_start = start_pfn;
  4413. state->last_end = end_pfn;
  4414. state->last_nid = nid;
  4415. }
  4416. return nid;
  4417. }
  4418. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  4419. /**
  4420. * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  4421. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  4422. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
  4423. *
  4424. * If an architecture guarantees that all ranges registered contain no holes
  4425. * and may be freed, this this function may be used instead of calling
  4426. * memblock_free_early_nid() manually.
  4427. */
  4428. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  4429. {
  4430. unsigned long start_pfn, end_pfn;
  4431. int i, this_nid;
  4432. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  4433. start_pfn = min(start_pfn, max_low_pfn);
  4434. end_pfn = min(end_pfn, max_low_pfn);
  4435. if (start_pfn < end_pfn)
  4436. memblock_free_early_nid(PFN_PHYS(start_pfn),
  4437. (end_pfn - start_pfn) << PAGE_SHIFT,
  4438. this_nid);
  4439. }
  4440. }
  4441. /**
  4442. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  4443. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  4444. *
  4445. * If an architecture guarantees that all ranges registered contain no holes and may
  4446. * be freed, this function may be used instead of calling memory_present() manually.
  4447. */
  4448. void __init sparse_memory_present_with_active_regions(int nid)
  4449. {
  4450. unsigned long start_pfn, end_pfn;
  4451. int i, this_nid;
  4452. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  4453. memory_present(this_nid, start_pfn, end_pfn);
  4454. }
  4455. /**
  4456. * get_pfn_range_for_nid - Return the start and end page frames for a node
  4457. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  4458. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  4459. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  4460. *
  4461. * It returns the start and end page frame of a node based on information
  4462. * provided by memblock_set_node(). If called for a node
  4463. * with no available memory, a warning is printed and the start and end
  4464. * PFNs will be 0.
  4465. */
  4466. void __meminit get_pfn_range_for_nid(unsigned int nid,
  4467. unsigned long *start_pfn, unsigned long *end_pfn)
  4468. {
  4469. unsigned long this_start_pfn, this_end_pfn;
  4470. int i;
  4471. *start_pfn = -1UL;
  4472. *end_pfn = 0;
  4473. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  4474. *start_pfn = min(*start_pfn, this_start_pfn);
  4475. *end_pfn = max(*end_pfn, this_end_pfn);
  4476. }
  4477. if (*start_pfn == -1UL)
  4478. *start_pfn = 0;
  4479. }
  4480. /*
  4481. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  4482. * assumption is made that zones within a node are ordered in monotonic
  4483. * increasing memory addresses so that the "highest" populated zone is used
  4484. */
  4485. static void __init find_usable_zone_for_movable(void)
  4486. {
  4487. int zone_index;
  4488. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  4489. if (zone_index == ZONE_MOVABLE)
  4490. continue;
  4491. if (arch_zone_highest_possible_pfn[zone_index] >
  4492. arch_zone_lowest_possible_pfn[zone_index])
  4493. break;
  4494. }
  4495. VM_BUG_ON(zone_index == -1);
  4496. movable_zone = zone_index;
  4497. }
  4498. /*
  4499. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  4500. * because it is sized independent of architecture. Unlike the other zones,
  4501. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  4502. * in each node depending on the size of each node and how evenly kernelcore
  4503. * is distributed. This helper function adjusts the zone ranges
  4504. * provided by the architecture for a given node by using the end of the
  4505. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  4506. * zones within a node are in order of monotonic increases memory addresses
  4507. */
  4508. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  4509. unsigned long zone_type,
  4510. unsigned long node_start_pfn,
  4511. unsigned long node_end_pfn,
  4512. unsigned long *zone_start_pfn,
  4513. unsigned long *zone_end_pfn)
  4514. {
  4515. /* Only adjust if ZONE_MOVABLE is on this node */
  4516. if (zone_movable_pfn[nid]) {
  4517. /* Size ZONE_MOVABLE */
  4518. if (zone_type == ZONE_MOVABLE) {
  4519. *zone_start_pfn = zone_movable_pfn[nid];
  4520. *zone_end_pfn = min(node_end_pfn,
  4521. arch_zone_highest_possible_pfn[movable_zone]);
  4522. /* Check if this whole range is within ZONE_MOVABLE */
  4523. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  4524. *zone_start_pfn = *zone_end_pfn;
  4525. }
  4526. }
  4527. /*
  4528. * Return the number of pages a zone spans in a node, including holes
  4529. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  4530. */
  4531. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4532. unsigned long zone_type,
  4533. unsigned long node_start_pfn,
  4534. unsigned long node_end_pfn,
  4535. unsigned long *zone_start_pfn,
  4536. unsigned long *zone_end_pfn,
  4537. unsigned long *ignored)
  4538. {
  4539. /* When hotadd a new node from cpu_up(), the node should be empty */
  4540. if (!node_start_pfn && !node_end_pfn)
  4541. return 0;
  4542. /* Get the start and end of the zone */
  4543. *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  4544. *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  4545. adjust_zone_range_for_zone_movable(nid, zone_type,
  4546. node_start_pfn, node_end_pfn,
  4547. zone_start_pfn, zone_end_pfn);
  4548. /* Check that this node has pages within the zone's required range */
  4549. if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
  4550. return 0;
  4551. /* Move the zone boundaries inside the node if necessary */
  4552. *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
  4553. *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
  4554. /* Return the spanned pages */
  4555. return *zone_end_pfn - *zone_start_pfn;
  4556. }
  4557. /*
  4558. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  4559. * then all holes in the requested range will be accounted for.
  4560. */
  4561. unsigned long __meminit __absent_pages_in_range(int nid,
  4562. unsigned long range_start_pfn,
  4563. unsigned long range_end_pfn)
  4564. {
  4565. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  4566. unsigned long start_pfn, end_pfn;
  4567. int i;
  4568. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  4569. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  4570. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  4571. nr_absent -= end_pfn - start_pfn;
  4572. }
  4573. return nr_absent;
  4574. }
  4575. /**
  4576. * absent_pages_in_range - Return number of page frames in holes within a range
  4577. * @start_pfn: The start PFN to start searching for holes
  4578. * @end_pfn: The end PFN to stop searching for holes
  4579. *
  4580. * It returns the number of pages frames in memory holes within a range.
  4581. */
  4582. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  4583. unsigned long end_pfn)
  4584. {
  4585. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  4586. }
  4587. /* Return the number of page frames in holes in a zone on a node */
  4588. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  4589. unsigned long zone_type,
  4590. unsigned long node_start_pfn,
  4591. unsigned long node_end_pfn,
  4592. unsigned long *ignored)
  4593. {
  4594. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  4595. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  4596. unsigned long zone_start_pfn, zone_end_pfn;
  4597. unsigned long nr_absent;
  4598. /* When hotadd a new node from cpu_up(), the node should be empty */
  4599. if (!node_start_pfn && !node_end_pfn)
  4600. return 0;
  4601. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  4602. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  4603. adjust_zone_range_for_zone_movable(nid, zone_type,
  4604. node_start_pfn, node_end_pfn,
  4605. &zone_start_pfn, &zone_end_pfn);
  4606. nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  4607. /*
  4608. * ZONE_MOVABLE handling.
  4609. * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
  4610. * and vice versa.
  4611. */
  4612. if (zone_movable_pfn[nid]) {
  4613. if (mirrored_kernelcore) {
  4614. unsigned long start_pfn, end_pfn;
  4615. struct memblock_region *r;
  4616. for_each_memblock(memory, r) {
  4617. start_pfn = clamp(memblock_region_memory_base_pfn(r),
  4618. zone_start_pfn, zone_end_pfn);
  4619. end_pfn = clamp(memblock_region_memory_end_pfn(r),
  4620. zone_start_pfn, zone_end_pfn);
  4621. if (zone_type == ZONE_MOVABLE &&
  4622. memblock_is_mirror(r))
  4623. nr_absent += end_pfn - start_pfn;
  4624. if (zone_type == ZONE_NORMAL &&
  4625. !memblock_is_mirror(r))
  4626. nr_absent += end_pfn - start_pfn;
  4627. }
  4628. } else {
  4629. if (zone_type == ZONE_NORMAL)
  4630. nr_absent += node_end_pfn - zone_movable_pfn[nid];
  4631. }
  4632. }
  4633. return nr_absent;
  4634. }
  4635. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4636. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4637. unsigned long zone_type,
  4638. unsigned long node_start_pfn,
  4639. unsigned long node_end_pfn,
  4640. unsigned long *zone_start_pfn,
  4641. unsigned long *zone_end_pfn,
  4642. unsigned long *zones_size)
  4643. {
  4644. unsigned int zone;
  4645. *zone_start_pfn = node_start_pfn;
  4646. for (zone = 0; zone < zone_type; zone++)
  4647. *zone_start_pfn += zones_size[zone];
  4648. *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
  4649. return zones_size[zone_type];
  4650. }
  4651. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  4652. unsigned long zone_type,
  4653. unsigned long node_start_pfn,
  4654. unsigned long node_end_pfn,
  4655. unsigned long *zholes_size)
  4656. {
  4657. if (!zholes_size)
  4658. return 0;
  4659. return zholes_size[zone_type];
  4660. }
  4661. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4662. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  4663. unsigned long node_start_pfn,
  4664. unsigned long node_end_pfn,
  4665. unsigned long *zones_size,
  4666. unsigned long *zholes_size)
  4667. {
  4668. unsigned long realtotalpages = 0, totalpages = 0;
  4669. enum zone_type i;
  4670. for (i = 0; i < MAX_NR_ZONES; i++) {
  4671. struct zone *zone = pgdat->node_zones + i;
  4672. unsigned long zone_start_pfn, zone_end_pfn;
  4673. unsigned long size, real_size;
  4674. size = zone_spanned_pages_in_node(pgdat->node_id, i,
  4675. node_start_pfn,
  4676. node_end_pfn,
  4677. &zone_start_pfn,
  4678. &zone_end_pfn,
  4679. zones_size);
  4680. real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
  4681. node_start_pfn, node_end_pfn,
  4682. zholes_size);
  4683. if (size)
  4684. zone->zone_start_pfn = zone_start_pfn;
  4685. else
  4686. zone->zone_start_pfn = 0;
  4687. zone->spanned_pages = size;
  4688. zone->present_pages = real_size;
  4689. totalpages += size;
  4690. realtotalpages += real_size;
  4691. }
  4692. pgdat->node_spanned_pages = totalpages;
  4693. pgdat->node_present_pages = realtotalpages;
  4694. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  4695. realtotalpages);
  4696. }
  4697. #ifndef CONFIG_SPARSEMEM
  4698. /*
  4699. * Calculate the size of the zone->blockflags rounded to an unsigned long
  4700. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  4701. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  4702. * round what is now in bits to nearest long in bits, then return it in
  4703. * bytes.
  4704. */
  4705. static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
  4706. {
  4707. unsigned long usemapsize;
  4708. zonesize += zone_start_pfn & (pageblock_nr_pages-1);
  4709. usemapsize = roundup(zonesize, pageblock_nr_pages);
  4710. usemapsize = usemapsize >> pageblock_order;
  4711. usemapsize *= NR_PAGEBLOCK_BITS;
  4712. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  4713. return usemapsize / 8;
  4714. }
  4715. static void __init setup_usemap(struct pglist_data *pgdat,
  4716. struct zone *zone,
  4717. unsigned long zone_start_pfn,
  4718. unsigned long zonesize)
  4719. {
  4720. unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
  4721. zone->pageblock_flags = NULL;
  4722. if (usemapsize)
  4723. zone->pageblock_flags =
  4724. memblock_virt_alloc_node_nopanic(usemapsize,
  4725. pgdat->node_id);
  4726. }
  4727. #else
  4728. static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
  4729. unsigned long zone_start_pfn, unsigned long zonesize) {}
  4730. #endif /* CONFIG_SPARSEMEM */
  4731. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  4732. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  4733. void __paginginit set_pageblock_order(void)
  4734. {
  4735. unsigned int order;
  4736. /* Check that pageblock_nr_pages has not already been setup */
  4737. if (pageblock_order)
  4738. return;
  4739. if (HPAGE_SHIFT > PAGE_SHIFT)
  4740. order = HUGETLB_PAGE_ORDER;
  4741. else
  4742. order = MAX_ORDER - 1;
  4743. /*
  4744. * Assume the largest contiguous order of interest is a huge page.
  4745. * This value may be variable depending on boot parameters on IA64 and
  4746. * powerpc.
  4747. */
  4748. pageblock_order = order;
  4749. }
  4750. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4751. /*
  4752. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  4753. * is unused as pageblock_order is set at compile-time. See
  4754. * include/linux/pageblock-flags.h for the values of pageblock_order based on
  4755. * the kernel config
  4756. */
  4757. void __paginginit set_pageblock_order(void)
  4758. {
  4759. }
  4760. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4761. static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  4762. unsigned long present_pages)
  4763. {
  4764. unsigned long pages = spanned_pages;
  4765. /*
  4766. * Provide a more accurate estimation if there are holes within
  4767. * the zone and SPARSEMEM is in use. If there are holes within the
  4768. * zone, each populated memory region may cost us one or two extra
  4769. * memmap pages due to alignment because memmap pages for each
  4770. * populated regions may not naturally algined on page boundary.
  4771. * So the (present_pages >> 4) heuristic is a tradeoff for that.
  4772. */
  4773. if (spanned_pages > present_pages + (present_pages >> 4) &&
  4774. IS_ENABLED(CONFIG_SPARSEMEM))
  4775. pages = present_pages;
  4776. return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
  4777. }
  4778. /*
  4779. * Set up the zone data structures:
  4780. * - mark all pages reserved
  4781. * - mark all memory queues empty
  4782. * - clear the memory bitmaps
  4783. *
  4784. * NOTE: pgdat should get zeroed by caller.
  4785. */
  4786. static void __paginginit free_area_init_core(struct pglist_data *pgdat)
  4787. {
  4788. enum zone_type j;
  4789. int nid = pgdat->node_id;
  4790. int ret;
  4791. pgdat_resize_init(pgdat);
  4792. #ifdef CONFIG_NUMA_BALANCING
  4793. spin_lock_init(&pgdat->numabalancing_migrate_lock);
  4794. pgdat->numabalancing_migrate_nr_pages = 0;
  4795. pgdat->numabalancing_migrate_next_window = jiffies;
  4796. #endif
  4797. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4798. spin_lock_init(&pgdat->split_queue_lock);
  4799. INIT_LIST_HEAD(&pgdat->split_queue);
  4800. pgdat->split_queue_len = 0;
  4801. #endif
  4802. init_waitqueue_head(&pgdat->kswapd_wait);
  4803. init_waitqueue_head(&pgdat->pfmemalloc_wait);
  4804. #ifdef CONFIG_COMPACTION
  4805. init_waitqueue_head(&pgdat->kcompactd_wait);
  4806. #endif
  4807. pgdat_page_ext_init(pgdat);
  4808. for (j = 0; j < MAX_NR_ZONES; j++) {
  4809. struct zone *zone = pgdat->node_zones + j;
  4810. unsigned long size, realsize, freesize, memmap_pages;
  4811. unsigned long zone_start_pfn = zone->zone_start_pfn;
  4812. size = zone->spanned_pages;
  4813. realsize = freesize = zone->present_pages;
  4814. /*
  4815. * Adjust freesize so that it accounts for how much memory
  4816. * is used by this zone for memmap. This affects the watermark
  4817. * and per-cpu initialisations
  4818. */
  4819. memmap_pages = calc_memmap_size(size, realsize);
  4820. if (!is_highmem_idx(j)) {
  4821. if (freesize >= memmap_pages) {
  4822. freesize -= memmap_pages;
  4823. if (memmap_pages)
  4824. printk(KERN_DEBUG
  4825. " %s zone: %lu pages used for memmap\n",
  4826. zone_names[j], memmap_pages);
  4827. } else
  4828. pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
  4829. zone_names[j], memmap_pages, freesize);
  4830. }
  4831. /* Account for reserved pages */
  4832. if (j == 0 && freesize > dma_reserve) {
  4833. freesize -= dma_reserve;
  4834. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  4835. zone_names[0], dma_reserve);
  4836. }
  4837. if (!is_highmem_idx(j))
  4838. nr_kernel_pages += freesize;
  4839. /* Charge for highmem memmap if there are enough kernel pages */
  4840. else if (nr_kernel_pages > memmap_pages * 2)
  4841. nr_kernel_pages -= memmap_pages;
  4842. nr_all_pages += freesize;
  4843. /*
  4844. * Set an approximate value for lowmem here, it will be adjusted
  4845. * when the bootmem allocator frees pages into the buddy system.
  4846. * And all highmem pages will be managed by the buddy system.
  4847. */
  4848. zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
  4849. #ifdef CONFIG_NUMA
  4850. zone->node = nid;
  4851. zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
  4852. / 100;
  4853. zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
  4854. #endif
  4855. zone->name = zone_names[j];
  4856. spin_lock_init(&zone->lock);
  4857. spin_lock_init(&zone->lru_lock);
  4858. zone_seqlock_init(zone);
  4859. zone->zone_pgdat = pgdat;
  4860. zone_pcp_init(zone);
  4861. /* For bootup, initialized properly in watermark setup */
  4862. mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
  4863. lruvec_init(&zone->lruvec);
  4864. if (!size)
  4865. continue;
  4866. set_pageblock_order();
  4867. setup_usemap(pgdat, zone, zone_start_pfn, size);
  4868. ret = init_currently_empty_zone(zone, zone_start_pfn, size);
  4869. BUG_ON(ret);
  4870. memmap_init(size, nid, j, zone_start_pfn);
  4871. }
  4872. }
  4873. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  4874. {
  4875. unsigned long __maybe_unused start = 0;
  4876. unsigned long __maybe_unused offset = 0;
  4877. /* Skip empty nodes */
  4878. if (!pgdat->node_spanned_pages)
  4879. return;
  4880. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4881. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  4882. offset = pgdat->node_start_pfn - start;
  4883. /* ia64 gets its own node_mem_map, before this, without bootmem */
  4884. if (!pgdat->node_mem_map) {
  4885. unsigned long size, end;
  4886. struct page *map;
  4887. /*
  4888. * The zone's endpoints aren't required to be MAX_ORDER
  4889. * aligned but the node_mem_map endpoints must be in order
  4890. * for the buddy allocator to function correctly.
  4891. */
  4892. end = pgdat_end_pfn(pgdat);
  4893. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  4894. size = (end - start) * sizeof(struct page);
  4895. map = alloc_remap(pgdat->node_id, size);
  4896. if (!map)
  4897. map = memblock_virt_alloc_node_nopanic(size,
  4898. pgdat->node_id);
  4899. pgdat->node_mem_map = map + offset;
  4900. }
  4901. #ifndef CONFIG_NEED_MULTIPLE_NODES
  4902. /*
  4903. * With no DISCONTIG, the global mem_map is just set as node 0's
  4904. */
  4905. if (pgdat == NODE_DATA(0)) {
  4906. mem_map = NODE_DATA(0)->node_mem_map;
  4907. #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
  4908. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  4909. mem_map -= offset;
  4910. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4911. }
  4912. #endif
  4913. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  4914. }
  4915. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  4916. unsigned long node_start_pfn, unsigned long *zholes_size)
  4917. {
  4918. pg_data_t *pgdat = NODE_DATA(nid);
  4919. unsigned long start_pfn = 0;
  4920. unsigned long end_pfn = 0;
  4921. /* pg_data_t should be reset to zero when it's allocated */
  4922. WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
  4923. reset_deferred_meminit(pgdat);
  4924. pgdat->node_id = nid;
  4925. pgdat->node_start_pfn = node_start_pfn;
  4926. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4927. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  4928. pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
  4929. (u64)start_pfn << PAGE_SHIFT,
  4930. end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
  4931. #else
  4932. start_pfn = node_start_pfn;
  4933. #endif
  4934. calculate_node_totalpages(pgdat, start_pfn, end_pfn,
  4935. zones_size, zholes_size);
  4936. alloc_node_mem_map(pgdat);
  4937. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4938. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  4939. nid, (unsigned long)pgdat,
  4940. (unsigned long)pgdat->node_mem_map);
  4941. #endif
  4942. free_area_init_core(pgdat);
  4943. }
  4944. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4945. #if MAX_NUMNODES > 1
  4946. /*
  4947. * Figure out the number of possible node ids.
  4948. */
  4949. void __init setup_nr_node_ids(void)
  4950. {
  4951. unsigned int highest;
  4952. highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
  4953. nr_node_ids = highest + 1;
  4954. }
  4955. #endif
  4956. /**
  4957. * node_map_pfn_alignment - determine the maximum internode alignment
  4958. *
  4959. * This function should be called after node map is populated and sorted.
  4960. * It calculates the maximum power of two alignment which can distinguish
  4961. * all the nodes.
  4962. *
  4963. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  4964. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  4965. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  4966. * shifted, 1GiB is enough and this function will indicate so.
  4967. *
  4968. * This is used to test whether pfn -> nid mapping of the chosen memory
  4969. * model has fine enough granularity to avoid incorrect mapping for the
  4970. * populated node map.
  4971. *
  4972. * Returns the determined alignment in pfn's. 0 if there is no alignment
  4973. * requirement (single node).
  4974. */
  4975. unsigned long __init node_map_pfn_alignment(void)
  4976. {
  4977. unsigned long accl_mask = 0, last_end = 0;
  4978. unsigned long start, end, mask;
  4979. int last_nid = -1;
  4980. int i, nid;
  4981. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  4982. if (!start || last_nid < 0 || last_nid == nid) {
  4983. last_nid = nid;
  4984. last_end = end;
  4985. continue;
  4986. }
  4987. /*
  4988. * Start with a mask granular enough to pin-point to the
  4989. * start pfn and tick off bits one-by-one until it becomes
  4990. * too coarse to separate the current node from the last.
  4991. */
  4992. mask = ~((1 << __ffs(start)) - 1);
  4993. while (mask && last_end <= (start & (mask << 1)))
  4994. mask <<= 1;
  4995. /* accumulate all internode masks */
  4996. accl_mask |= mask;
  4997. }
  4998. /* convert mask to number of pages */
  4999. return ~accl_mask + 1;
  5000. }
  5001. /* Find the lowest pfn for a node */
  5002. static unsigned long __init find_min_pfn_for_node(int nid)
  5003. {
  5004. unsigned long min_pfn = ULONG_MAX;
  5005. unsigned long start_pfn;
  5006. int i;
  5007. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  5008. min_pfn = min(min_pfn, start_pfn);
  5009. if (min_pfn == ULONG_MAX) {
  5010. pr_warn("Could not find start_pfn for node %d\n", nid);
  5011. return 0;
  5012. }
  5013. return min_pfn;
  5014. }
  5015. /**
  5016. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  5017. *
  5018. * It returns the minimum PFN based on information provided via
  5019. * memblock_set_node().
  5020. */
  5021. unsigned long __init find_min_pfn_with_active_regions(void)
  5022. {
  5023. return find_min_pfn_for_node(MAX_NUMNODES);
  5024. }
  5025. /*
  5026. * early_calculate_totalpages()
  5027. * Sum pages in active regions for movable zone.
  5028. * Populate N_MEMORY for calculating usable_nodes.
  5029. */
  5030. static unsigned long __init early_calculate_totalpages(void)
  5031. {
  5032. unsigned long totalpages = 0;
  5033. unsigned long start_pfn, end_pfn;
  5034. int i, nid;
  5035. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  5036. unsigned long pages = end_pfn - start_pfn;
  5037. totalpages += pages;
  5038. if (pages)
  5039. node_set_state(nid, N_MEMORY);
  5040. }
  5041. return totalpages;
  5042. }
  5043. /*
  5044. * Find the PFN the Movable zone begins in each node. Kernel memory
  5045. * is spread evenly between nodes as long as the nodes have enough
  5046. * memory. When they don't, some nodes will have more kernelcore than
  5047. * others
  5048. */
  5049. static void __init find_zone_movable_pfns_for_nodes(void)
  5050. {
  5051. int i, nid;
  5052. unsigned long usable_startpfn;
  5053. unsigned long kernelcore_node, kernelcore_remaining;
  5054. /* save the state before borrow the nodemask */
  5055. nodemask_t saved_node_state = node_states[N_MEMORY];
  5056. unsigned long totalpages = early_calculate_totalpages();
  5057. int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  5058. struct memblock_region *r;
  5059. /* Need to find movable_zone earlier when movable_node is specified. */
  5060. find_usable_zone_for_movable();
  5061. /*
  5062. * If movable_node is specified, ignore kernelcore and movablecore
  5063. * options.
  5064. */
  5065. if (movable_node_is_enabled()) {
  5066. for_each_memblock(memory, r) {
  5067. if (!memblock_is_hotpluggable(r))
  5068. continue;
  5069. nid = r->nid;
  5070. usable_startpfn = PFN_DOWN(r->base);
  5071. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  5072. min(usable_startpfn, zone_movable_pfn[nid]) :
  5073. usable_startpfn;
  5074. }
  5075. goto out2;
  5076. }
  5077. /*
  5078. * If kernelcore=mirror is specified, ignore movablecore option
  5079. */
  5080. if (mirrored_kernelcore) {
  5081. bool mem_below_4gb_not_mirrored = false;
  5082. for_each_memblock(memory, r) {
  5083. if (memblock_is_mirror(r))
  5084. continue;
  5085. nid = r->nid;
  5086. usable_startpfn = memblock_region_memory_base_pfn(r);
  5087. if (usable_startpfn < 0x100000) {
  5088. mem_below_4gb_not_mirrored = true;
  5089. continue;
  5090. }
  5091. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  5092. min(usable_startpfn, zone_movable_pfn[nid]) :
  5093. usable_startpfn;
  5094. }
  5095. if (mem_below_4gb_not_mirrored)
  5096. pr_warn("This configuration results in unmirrored kernel memory.");
  5097. goto out2;
  5098. }
  5099. /*
  5100. * If movablecore=nn[KMG] was specified, calculate what size of
  5101. * kernelcore that corresponds so that memory usable for
  5102. * any allocation type is evenly spread. If both kernelcore
  5103. * and movablecore are specified, then the value of kernelcore
  5104. * will be used for required_kernelcore if it's greater than
  5105. * what movablecore would have allowed.
  5106. */
  5107. if (required_movablecore) {
  5108. unsigned long corepages;
  5109. /*
  5110. * Round-up so that ZONE_MOVABLE is at least as large as what
  5111. * was requested by the user
  5112. */
  5113. required_movablecore =
  5114. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  5115. required_movablecore = min(totalpages, required_movablecore);
  5116. corepages = totalpages - required_movablecore;
  5117. required_kernelcore = max(required_kernelcore, corepages);
  5118. }
  5119. /*
  5120. * If kernelcore was not specified or kernelcore size is larger
  5121. * than totalpages, there is no ZONE_MOVABLE.
  5122. */
  5123. if (!required_kernelcore || required_kernelcore >= totalpages)
  5124. goto out;
  5125. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  5126. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  5127. restart:
  5128. /* Spread kernelcore memory as evenly as possible throughout nodes */
  5129. kernelcore_node = required_kernelcore / usable_nodes;
  5130. for_each_node_state(nid, N_MEMORY) {
  5131. unsigned long start_pfn, end_pfn;
  5132. /*
  5133. * Recalculate kernelcore_node if the division per node
  5134. * now exceeds what is necessary to satisfy the requested
  5135. * amount of memory for the kernel
  5136. */
  5137. if (required_kernelcore < kernelcore_node)
  5138. kernelcore_node = required_kernelcore / usable_nodes;
  5139. /*
  5140. * As the map is walked, we track how much memory is usable
  5141. * by the kernel using kernelcore_remaining. When it is
  5142. * 0, the rest of the node is usable by ZONE_MOVABLE
  5143. */
  5144. kernelcore_remaining = kernelcore_node;
  5145. /* Go through each range of PFNs within this node */
  5146. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  5147. unsigned long size_pages;
  5148. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  5149. if (start_pfn >= end_pfn)
  5150. continue;
  5151. /* Account for what is only usable for kernelcore */
  5152. if (start_pfn < usable_startpfn) {
  5153. unsigned long kernel_pages;
  5154. kernel_pages = min(end_pfn, usable_startpfn)
  5155. - start_pfn;
  5156. kernelcore_remaining -= min(kernel_pages,
  5157. kernelcore_remaining);
  5158. required_kernelcore -= min(kernel_pages,
  5159. required_kernelcore);
  5160. /* Continue if range is now fully accounted */
  5161. if (end_pfn <= usable_startpfn) {
  5162. /*
  5163. * Push zone_movable_pfn to the end so
  5164. * that if we have to rebalance
  5165. * kernelcore across nodes, we will
  5166. * not double account here
  5167. */
  5168. zone_movable_pfn[nid] = end_pfn;
  5169. continue;
  5170. }
  5171. start_pfn = usable_startpfn;
  5172. }
  5173. /*
  5174. * The usable PFN range for ZONE_MOVABLE is from
  5175. * start_pfn->end_pfn. Calculate size_pages as the
  5176. * number of pages used as kernelcore
  5177. */
  5178. size_pages = end_pfn - start_pfn;
  5179. if (size_pages > kernelcore_remaining)
  5180. size_pages = kernelcore_remaining;
  5181. zone_movable_pfn[nid] = start_pfn + size_pages;
  5182. /*
  5183. * Some kernelcore has been met, update counts and
  5184. * break if the kernelcore for this node has been
  5185. * satisfied
  5186. */
  5187. required_kernelcore -= min(required_kernelcore,
  5188. size_pages);
  5189. kernelcore_remaining -= size_pages;
  5190. if (!kernelcore_remaining)
  5191. break;
  5192. }
  5193. }
  5194. /*
  5195. * If there is still required_kernelcore, we do another pass with one
  5196. * less node in the count. This will push zone_movable_pfn[nid] further
  5197. * along on the nodes that still have memory until kernelcore is
  5198. * satisfied
  5199. */
  5200. usable_nodes--;
  5201. if (usable_nodes && required_kernelcore > usable_nodes)
  5202. goto restart;
  5203. out2:
  5204. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  5205. for (nid = 0; nid < MAX_NUMNODES; nid++)
  5206. zone_movable_pfn[nid] =
  5207. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  5208. out:
  5209. /* restore the node_state */
  5210. node_states[N_MEMORY] = saved_node_state;
  5211. }
  5212. /* Any regular or high memory on that node ? */
  5213. static void check_for_memory(pg_data_t *pgdat, int nid)
  5214. {
  5215. enum zone_type zone_type;
  5216. if (N_MEMORY == N_NORMAL_MEMORY)
  5217. return;
  5218. for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
  5219. struct zone *zone = &pgdat->node_zones[zone_type];
  5220. if (populated_zone(zone)) {
  5221. node_set_state(nid, N_HIGH_MEMORY);
  5222. if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
  5223. zone_type <= ZONE_NORMAL)
  5224. node_set_state(nid, N_NORMAL_MEMORY);
  5225. break;
  5226. }
  5227. }
  5228. }
  5229. /**
  5230. * free_area_init_nodes - Initialise all pg_data_t and zone data
  5231. * @max_zone_pfn: an array of max PFNs for each zone
  5232. *
  5233. * This will call free_area_init_node() for each active node in the system.
  5234. * Using the page ranges provided by memblock_set_node(), the size of each
  5235. * zone in each node and their holes is calculated. If the maximum PFN
  5236. * between two adjacent zones match, it is assumed that the zone is empty.
  5237. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  5238. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  5239. * starts where the previous one ended. For example, ZONE_DMA32 starts
  5240. * at arch_max_dma_pfn.
  5241. */
  5242. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  5243. {
  5244. unsigned long start_pfn, end_pfn;
  5245. int i, nid;
  5246. /* Record where the zone boundaries are */
  5247. memset(arch_zone_lowest_possible_pfn, 0,
  5248. sizeof(arch_zone_lowest_possible_pfn));
  5249. memset(arch_zone_highest_possible_pfn, 0,
  5250. sizeof(arch_zone_highest_possible_pfn));
  5251. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  5252. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  5253. for (i = 1; i < MAX_NR_ZONES; i++) {
  5254. if (i == ZONE_MOVABLE)
  5255. continue;
  5256. arch_zone_lowest_possible_pfn[i] =
  5257. arch_zone_highest_possible_pfn[i-1];
  5258. arch_zone_highest_possible_pfn[i] =
  5259. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  5260. }
  5261. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  5262. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  5263. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  5264. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  5265. find_zone_movable_pfns_for_nodes();
  5266. /* Print out the zone ranges */
  5267. pr_info("Zone ranges:\n");
  5268. for (i = 0; i < MAX_NR_ZONES; i++) {
  5269. if (i == ZONE_MOVABLE)
  5270. continue;
  5271. pr_info(" %-8s ", zone_names[i]);
  5272. if (arch_zone_lowest_possible_pfn[i] ==
  5273. arch_zone_highest_possible_pfn[i])
  5274. pr_cont("empty\n");
  5275. else
  5276. pr_cont("[mem %#018Lx-%#018Lx]\n",
  5277. (u64)arch_zone_lowest_possible_pfn[i]
  5278. << PAGE_SHIFT,
  5279. ((u64)arch_zone_highest_possible_pfn[i]
  5280. << PAGE_SHIFT) - 1);
  5281. }
  5282. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  5283. pr_info("Movable zone start for each node\n");
  5284. for (i = 0; i < MAX_NUMNODES; i++) {
  5285. if (zone_movable_pfn[i])
  5286. pr_info(" Node %d: %#018Lx\n", i,
  5287. (u64)zone_movable_pfn[i] << PAGE_SHIFT);
  5288. }
  5289. /* Print out the early node map */
  5290. pr_info("Early memory node ranges\n");
  5291. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  5292. pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
  5293. (u64)start_pfn << PAGE_SHIFT,
  5294. ((u64)end_pfn << PAGE_SHIFT) - 1);
  5295. /* Initialise every node */
  5296. mminit_verify_pageflags_layout();
  5297. setup_nr_node_ids();
  5298. for_each_online_node(nid) {
  5299. pg_data_t *pgdat = NODE_DATA(nid);
  5300. free_area_init_node(nid, NULL,
  5301. find_min_pfn_for_node(nid), NULL);
  5302. /* Any memory on that node */
  5303. if (pgdat->node_present_pages)
  5304. node_set_state(nid, N_MEMORY);
  5305. check_for_memory(pgdat, nid);
  5306. }
  5307. }
  5308. static int __init cmdline_parse_core(char *p, unsigned long *core)
  5309. {
  5310. unsigned long long coremem;
  5311. if (!p)
  5312. return -EINVAL;
  5313. coremem = memparse(p, &p);
  5314. *core = coremem >> PAGE_SHIFT;
  5315. /* Paranoid check that UL is enough for the coremem value */
  5316. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  5317. return 0;
  5318. }
  5319. /*
  5320. * kernelcore=size sets the amount of memory for use for allocations that
  5321. * cannot be reclaimed or migrated.
  5322. */
  5323. static int __init cmdline_parse_kernelcore(char *p)
  5324. {
  5325. /* parse kernelcore=mirror */
  5326. if (parse_option_str(p, "mirror")) {
  5327. mirrored_kernelcore = true;
  5328. return 0;
  5329. }
  5330. return cmdline_parse_core(p, &required_kernelcore);
  5331. }
  5332. /*
  5333. * movablecore=size sets the amount of memory for use for allocations that
  5334. * can be reclaimed or migrated.
  5335. */
  5336. static int __init cmdline_parse_movablecore(char *p)
  5337. {
  5338. return cmdline_parse_core(p, &required_movablecore);
  5339. }
  5340. early_param("kernelcore", cmdline_parse_kernelcore);
  5341. early_param("movablecore", cmdline_parse_movablecore);
  5342. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5343. void adjust_managed_page_count(struct page *page, long count)
  5344. {
  5345. spin_lock(&managed_page_count_lock);
  5346. page_zone(page)->managed_pages += count;
  5347. totalram_pages += count;
  5348. #ifdef CONFIG_HIGHMEM
  5349. if (PageHighMem(page))
  5350. totalhigh_pages += count;
  5351. #endif
  5352. spin_unlock(&managed_page_count_lock);
  5353. }
  5354. EXPORT_SYMBOL(adjust_managed_page_count);
  5355. unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
  5356. {
  5357. void *pos;
  5358. unsigned long pages = 0;
  5359. start = (void *)PAGE_ALIGN((unsigned long)start);
  5360. end = (void *)((unsigned long)end & PAGE_MASK);
  5361. for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
  5362. if ((unsigned int)poison <= 0xFF)
  5363. memset(pos, poison, PAGE_SIZE);
  5364. free_reserved_page(virt_to_page(pos));
  5365. }
  5366. if (pages && s)
  5367. pr_info("Freeing %s memory: %ldK (%p - %p)\n",
  5368. s, pages << (PAGE_SHIFT - 10), start, end);
  5369. return pages;
  5370. }
  5371. EXPORT_SYMBOL(free_reserved_area);
  5372. #ifdef CONFIG_HIGHMEM
  5373. void free_highmem_page(struct page *page)
  5374. {
  5375. __free_reserved_page(page);
  5376. totalram_pages++;
  5377. page_zone(page)->managed_pages++;
  5378. totalhigh_pages++;
  5379. }
  5380. #endif
  5381. void __init mem_init_print_info(const char *str)
  5382. {
  5383. unsigned long physpages, codesize, datasize, rosize, bss_size;
  5384. unsigned long init_code_size, init_data_size;
  5385. physpages = get_num_physpages();
  5386. codesize = _etext - _stext;
  5387. datasize = _edata - _sdata;
  5388. rosize = __end_rodata - __start_rodata;
  5389. bss_size = __bss_stop - __bss_start;
  5390. init_data_size = __init_end - __init_begin;
  5391. init_code_size = _einittext - _sinittext;
  5392. /*
  5393. * Detect special cases and adjust section sizes accordingly:
  5394. * 1) .init.* may be embedded into .data sections
  5395. * 2) .init.text.* may be out of [__init_begin, __init_end],
  5396. * please refer to arch/tile/kernel/vmlinux.lds.S.
  5397. * 3) .rodata.* may be embedded into .text or .data sections.
  5398. */
  5399. #define adj_init_size(start, end, size, pos, adj) \
  5400. do { \
  5401. if (start <= pos && pos < end && size > adj) \
  5402. size -= adj; \
  5403. } while (0)
  5404. adj_init_size(__init_begin, __init_end, init_data_size,
  5405. _sinittext, init_code_size);
  5406. adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
  5407. adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
  5408. adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
  5409. adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
  5410. #undef adj_init_size
  5411. pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
  5412. #ifdef CONFIG_HIGHMEM
  5413. ", %luK highmem"
  5414. #endif
  5415. "%s%s)\n",
  5416. nr_free_pages() << (PAGE_SHIFT - 10),
  5417. physpages << (PAGE_SHIFT - 10),
  5418. codesize >> 10, datasize >> 10, rosize >> 10,
  5419. (init_data_size + init_code_size) >> 10, bss_size >> 10,
  5420. (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
  5421. totalcma_pages << (PAGE_SHIFT - 10),
  5422. #ifdef CONFIG_HIGHMEM
  5423. totalhigh_pages << (PAGE_SHIFT - 10),
  5424. #endif
  5425. str ? ", " : "", str ? str : "");
  5426. }
  5427. /**
  5428. * set_dma_reserve - set the specified number of pages reserved in the first zone
  5429. * @new_dma_reserve: The number of pages to mark reserved
  5430. *
  5431. * The per-cpu batchsize and zone watermarks are determined by managed_pages.
  5432. * In the DMA zone, a significant percentage may be consumed by kernel image
  5433. * and other unfreeable allocations which can skew the watermarks badly. This
  5434. * function may optionally be used to account for unfreeable pages in the
  5435. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  5436. * smaller per-cpu batchsize.
  5437. */
  5438. void __init set_dma_reserve(unsigned long new_dma_reserve)
  5439. {
  5440. dma_reserve = new_dma_reserve;
  5441. }
  5442. void __init free_area_init(unsigned long *zones_size)
  5443. {
  5444. free_area_init_node(0, zones_size,
  5445. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  5446. }
  5447. static int page_alloc_cpu_notify(struct notifier_block *self,
  5448. unsigned long action, void *hcpu)
  5449. {
  5450. int cpu = (unsigned long)hcpu;
  5451. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  5452. lru_add_drain_cpu(cpu);
  5453. drain_pages(cpu);
  5454. /*
  5455. * Spill the event counters of the dead processor
  5456. * into the current processors event counters.
  5457. * This artificially elevates the count of the current
  5458. * processor.
  5459. */
  5460. vm_events_fold_cpu(cpu);
  5461. /*
  5462. * Zero the differential counters of the dead processor
  5463. * so that the vm statistics are consistent.
  5464. *
  5465. * This is only okay since the processor is dead and cannot
  5466. * race with what we are doing.
  5467. */
  5468. cpu_vm_stats_fold(cpu);
  5469. }
  5470. return NOTIFY_OK;
  5471. }
  5472. void __init page_alloc_init(void)
  5473. {
  5474. hotcpu_notifier(page_alloc_cpu_notify, 0);
  5475. }
  5476. /*
  5477. * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
  5478. * or min_free_kbytes changes.
  5479. */
  5480. static void calculate_totalreserve_pages(void)
  5481. {
  5482. struct pglist_data *pgdat;
  5483. unsigned long reserve_pages = 0;
  5484. enum zone_type i, j;
  5485. for_each_online_pgdat(pgdat) {
  5486. for (i = 0; i < MAX_NR_ZONES; i++) {
  5487. struct zone *zone = pgdat->node_zones + i;
  5488. long max = 0;
  5489. /* Find valid and maximum lowmem_reserve in the zone */
  5490. for (j = i; j < MAX_NR_ZONES; j++) {
  5491. if (zone->lowmem_reserve[j] > max)
  5492. max = zone->lowmem_reserve[j];
  5493. }
  5494. /* we treat the high watermark as reserved pages. */
  5495. max += high_wmark_pages(zone);
  5496. if (max > zone->managed_pages)
  5497. max = zone->managed_pages;
  5498. zone->totalreserve_pages = max;
  5499. reserve_pages += max;
  5500. }
  5501. }
  5502. totalreserve_pages = reserve_pages;
  5503. }
  5504. /*
  5505. * setup_per_zone_lowmem_reserve - called whenever
  5506. * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
  5507. * has a correct pages reserved value, so an adequate number of
  5508. * pages are left in the zone after a successful __alloc_pages().
  5509. */
  5510. static void setup_per_zone_lowmem_reserve(void)
  5511. {
  5512. struct pglist_data *pgdat;
  5513. enum zone_type j, idx;
  5514. for_each_online_pgdat(pgdat) {
  5515. for (j = 0; j < MAX_NR_ZONES; j++) {
  5516. struct zone *zone = pgdat->node_zones + j;
  5517. unsigned long managed_pages = zone->managed_pages;
  5518. zone->lowmem_reserve[j] = 0;
  5519. idx = j;
  5520. while (idx) {
  5521. struct zone *lower_zone;
  5522. idx--;
  5523. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  5524. sysctl_lowmem_reserve_ratio[idx] = 1;
  5525. lower_zone = pgdat->node_zones + idx;
  5526. lower_zone->lowmem_reserve[j] = managed_pages /
  5527. sysctl_lowmem_reserve_ratio[idx];
  5528. managed_pages += lower_zone->managed_pages;
  5529. }
  5530. }
  5531. }
  5532. /* update totalreserve_pages */
  5533. calculate_totalreserve_pages();
  5534. }
  5535. static void __setup_per_zone_wmarks(void)
  5536. {
  5537. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  5538. unsigned long lowmem_pages = 0;
  5539. struct zone *zone;
  5540. unsigned long flags;
  5541. /* Calculate total number of !ZONE_HIGHMEM pages */
  5542. for_each_zone(zone) {
  5543. if (!is_highmem(zone))
  5544. lowmem_pages += zone->managed_pages;
  5545. }
  5546. for_each_zone(zone) {
  5547. u64 tmp;
  5548. spin_lock_irqsave(&zone->lock, flags);
  5549. tmp = (u64)pages_min * zone->managed_pages;
  5550. do_div(tmp, lowmem_pages);
  5551. if (is_highmem(zone)) {
  5552. /*
  5553. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  5554. * need highmem pages, so cap pages_min to a small
  5555. * value here.
  5556. *
  5557. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  5558. * deltas control asynch page reclaim, and so should
  5559. * not be capped for highmem.
  5560. */
  5561. unsigned long min_pages;
  5562. min_pages = zone->managed_pages / 1024;
  5563. min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
  5564. zone->watermark[WMARK_MIN] = min_pages;
  5565. } else {
  5566. /*
  5567. * If it's a lowmem zone, reserve a number of pages
  5568. * proportionate to the zone's size.
  5569. */
  5570. zone->watermark[WMARK_MIN] = tmp;
  5571. }
  5572. /*
  5573. * Set the kswapd watermarks distance according to the
  5574. * scale factor in proportion to available memory, but
  5575. * ensure a minimum size on small systems.
  5576. */
  5577. tmp = max_t(u64, tmp >> 2,
  5578. mult_frac(zone->managed_pages,
  5579. watermark_scale_factor, 10000));
  5580. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
  5581. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
  5582. __mod_zone_page_state(zone, NR_ALLOC_BATCH,
  5583. high_wmark_pages(zone) - low_wmark_pages(zone) -
  5584. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  5585. spin_unlock_irqrestore(&zone->lock, flags);
  5586. }
  5587. /* update totalreserve_pages */
  5588. calculate_totalreserve_pages();
  5589. }
  5590. /**
  5591. * setup_per_zone_wmarks - called when min_free_kbytes changes
  5592. * or when memory is hot-{added|removed}
  5593. *
  5594. * Ensures that the watermark[min,low,high] values for each zone are set
  5595. * correctly with respect to min_free_kbytes.
  5596. */
  5597. void setup_per_zone_wmarks(void)
  5598. {
  5599. mutex_lock(&zonelists_mutex);
  5600. __setup_per_zone_wmarks();
  5601. mutex_unlock(&zonelists_mutex);
  5602. }
  5603. /*
  5604. * The inactive anon list should be small enough that the VM never has to
  5605. * do too much work, but large enough that each inactive page has a chance
  5606. * to be referenced again before it is swapped out.
  5607. *
  5608. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  5609. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  5610. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  5611. * the anonymous pages are kept on the inactive list.
  5612. *
  5613. * total target max
  5614. * memory ratio inactive anon
  5615. * -------------------------------------
  5616. * 10MB 1 5MB
  5617. * 100MB 1 50MB
  5618. * 1GB 3 250MB
  5619. * 10GB 10 0.9GB
  5620. * 100GB 31 3GB
  5621. * 1TB 101 10GB
  5622. * 10TB 320 32GB
  5623. */
  5624. static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
  5625. {
  5626. unsigned int gb, ratio;
  5627. /* Zone size in gigabytes */
  5628. gb = zone->managed_pages >> (30 - PAGE_SHIFT);
  5629. if (gb)
  5630. ratio = int_sqrt(10 * gb);
  5631. else
  5632. ratio = 1;
  5633. zone->inactive_ratio = ratio;
  5634. }
  5635. static void __meminit setup_per_zone_inactive_ratio(void)
  5636. {
  5637. struct zone *zone;
  5638. for_each_zone(zone)
  5639. calculate_zone_inactive_ratio(zone);
  5640. }
  5641. /*
  5642. * Initialise min_free_kbytes.
  5643. *
  5644. * For small machines we want it small (128k min). For large machines
  5645. * we want it large (64MB max). But it is not linear, because network
  5646. * bandwidth does not increase linearly with machine size. We use
  5647. *
  5648. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  5649. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  5650. *
  5651. * which yields
  5652. *
  5653. * 16MB: 512k
  5654. * 32MB: 724k
  5655. * 64MB: 1024k
  5656. * 128MB: 1448k
  5657. * 256MB: 2048k
  5658. * 512MB: 2896k
  5659. * 1024MB: 4096k
  5660. * 2048MB: 5792k
  5661. * 4096MB: 8192k
  5662. * 8192MB: 11584k
  5663. * 16384MB: 16384k
  5664. */
  5665. int __meminit init_per_zone_wmark_min(void)
  5666. {
  5667. unsigned long lowmem_kbytes;
  5668. int new_min_free_kbytes;
  5669. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  5670. new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  5671. if (new_min_free_kbytes > user_min_free_kbytes) {
  5672. min_free_kbytes = new_min_free_kbytes;
  5673. if (min_free_kbytes < 128)
  5674. min_free_kbytes = 128;
  5675. if (min_free_kbytes > 65536)
  5676. min_free_kbytes = 65536;
  5677. } else {
  5678. pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
  5679. new_min_free_kbytes, user_min_free_kbytes);
  5680. }
  5681. setup_per_zone_wmarks();
  5682. refresh_zone_stat_thresholds();
  5683. setup_per_zone_lowmem_reserve();
  5684. setup_per_zone_inactive_ratio();
  5685. return 0;
  5686. }
  5687. core_initcall(init_per_zone_wmark_min)
  5688. /*
  5689. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  5690. * that we can call two helper functions whenever min_free_kbytes
  5691. * changes.
  5692. */
  5693. int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
  5694. void __user *buffer, size_t *length, loff_t *ppos)
  5695. {
  5696. int rc;
  5697. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5698. if (rc)
  5699. return rc;
  5700. if (write) {
  5701. user_min_free_kbytes = min_free_kbytes;
  5702. setup_per_zone_wmarks();
  5703. }
  5704. return 0;
  5705. }
  5706. int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
  5707. void __user *buffer, size_t *length, loff_t *ppos)
  5708. {
  5709. int rc;
  5710. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5711. if (rc)
  5712. return rc;
  5713. if (write)
  5714. setup_per_zone_wmarks();
  5715. return 0;
  5716. }
  5717. #ifdef CONFIG_NUMA
  5718. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
  5719. void __user *buffer, size_t *length, loff_t *ppos)
  5720. {
  5721. struct zone *zone;
  5722. int rc;
  5723. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5724. if (rc)
  5725. return rc;
  5726. for_each_zone(zone)
  5727. zone->min_unmapped_pages = (zone->managed_pages *
  5728. sysctl_min_unmapped_ratio) / 100;
  5729. return 0;
  5730. }
  5731. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
  5732. void __user *buffer, size_t *length, loff_t *ppos)
  5733. {
  5734. struct zone *zone;
  5735. int rc;
  5736. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5737. if (rc)
  5738. return rc;
  5739. for_each_zone(zone)
  5740. zone->min_slab_pages = (zone->managed_pages *
  5741. sysctl_min_slab_ratio) / 100;
  5742. return 0;
  5743. }
  5744. #endif
  5745. /*
  5746. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  5747. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  5748. * whenever sysctl_lowmem_reserve_ratio changes.
  5749. *
  5750. * The reserve ratio obviously has absolutely no relation with the
  5751. * minimum watermarks. The lowmem reserve ratio can only make sense
  5752. * if in function of the boot time zone sizes.
  5753. */
  5754. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
  5755. void __user *buffer, size_t *length, loff_t *ppos)
  5756. {
  5757. proc_dointvec_minmax(table, write, buffer, length, ppos);
  5758. setup_per_zone_lowmem_reserve();
  5759. return 0;
  5760. }
  5761. /*
  5762. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  5763. * cpu. It is the fraction of total pages in each zone that a hot per cpu
  5764. * pagelist can have before it gets flushed back to buddy allocator.
  5765. */
  5766. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
  5767. void __user *buffer, size_t *length, loff_t *ppos)
  5768. {
  5769. struct zone *zone;
  5770. int old_percpu_pagelist_fraction;
  5771. int ret;
  5772. mutex_lock(&pcp_batch_high_lock);
  5773. old_percpu_pagelist_fraction = percpu_pagelist_fraction;
  5774. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5775. if (!write || ret < 0)
  5776. goto out;
  5777. /* Sanity checking to avoid pcp imbalance */
  5778. if (percpu_pagelist_fraction &&
  5779. percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
  5780. percpu_pagelist_fraction = old_percpu_pagelist_fraction;
  5781. ret = -EINVAL;
  5782. goto out;
  5783. }
  5784. /* No change? */
  5785. if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
  5786. goto out;
  5787. for_each_populated_zone(zone) {
  5788. unsigned int cpu;
  5789. for_each_possible_cpu(cpu)
  5790. pageset_set_high_and_batch(zone,
  5791. per_cpu_ptr(zone->pageset, cpu));
  5792. }
  5793. out:
  5794. mutex_unlock(&pcp_batch_high_lock);
  5795. return ret;
  5796. }
  5797. #ifdef CONFIG_NUMA
  5798. int hashdist = HASHDIST_DEFAULT;
  5799. static int __init set_hashdist(char *str)
  5800. {
  5801. if (!str)
  5802. return 0;
  5803. hashdist = simple_strtoul(str, &str, 0);
  5804. return 1;
  5805. }
  5806. __setup("hashdist=", set_hashdist);
  5807. #endif
  5808. /*
  5809. * allocate a large system hash table from bootmem
  5810. * - it is assumed that the hash table must contain an exact power-of-2
  5811. * quantity of entries
  5812. * - limit is the number of hash buckets, not the total allocation size
  5813. */
  5814. void *__init alloc_large_system_hash(const char *tablename,
  5815. unsigned long bucketsize,
  5816. unsigned long numentries,
  5817. int scale,
  5818. int flags,
  5819. unsigned int *_hash_shift,
  5820. unsigned int *_hash_mask,
  5821. unsigned long low_limit,
  5822. unsigned long high_limit)
  5823. {
  5824. unsigned long long max = high_limit;
  5825. unsigned long log2qty, size;
  5826. void *table = NULL;
  5827. /* allow the kernel cmdline to have a say */
  5828. if (!numentries) {
  5829. /* round applicable memory size up to nearest megabyte */
  5830. numentries = nr_kernel_pages;
  5831. /* It isn't necessary when PAGE_SIZE >= 1MB */
  5832. if (PAGE_SHIFT < 20)
  5833. numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
  5834. /* limit to 1 bucket per 2^scale bytes of low memory */
  5835. if (scale > PAGE_SHIFT)
  5836. numentries >>= (scale - PAGE_SHIFT);
  5837. else
  5838. numentries <<= (PAGE_SHIFT - scale);
  5839. /* Make sure we've got at least a 0-order allocation.. */
  5840. if (unlikely(flags & HASH_SMALL)) {
  5841. /* Makes no sense without HASH_EARLY */
  5842. WARN_ON(!(flags & HASH_EARLY));
  5843. if (!(numentries >> *_hash_shift)) {
  5844. numentries = 1UL << *_hash_shift;
  5845. BUG_ON(!numentries);
  5846. }
  5847. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  5848. numentries = PAGE_SIZE / bucketsize;
  5849. }
  5850. numentries = roundup_pow_of_two(numentries);
  5851. /* limit allocation size to 1/16 total memory by default */
  5852. if (max == 0) {
  5853. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  5854. do_div(max, bucketsize);
  5855. }
  5856. max = min(max, 0x80000000ULL);
  5857. if (numentries < low_limit)
  5858. numentries = low_limit;
  5859. if (numentries > max)
  5860. numentries = max;
  5861. log2qty = ilog2(numentries);
  5862. do {
  5863. size = bucketsize << log2qty;
  5864. if (flags & HASH_EARLY)
  5865. table = memblock_virt_alloc_nopanic(size, 0);
  5866. else if (hashdist)
  5867. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  5868. else {
  5869. /*
  5870. * If bucketsize is not a power-of-two, we may free
  5871. * some pages at the end of hash table which
  5872. * alloc_pages_exact() automatically does
  5873. */
  5874. if (get_order(size) < MAX_ORDER) {
  5875. table = alloc_pages_exact(size, GFP_ATOMIC);
  5876. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  5877. }
  5878. }
  5879. } while (!table && size > PAGE_SIZE && --log2qty);
  5880. if (!table)
  5881. panic("Failed to allocate %s hash table\n", tablename);
  5882. pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
  5883. tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
  5884. if (_hash_shift)
  5885. *_hash_shift = log2qty;
  5886. if (_hash_mask)
  5887. *_hash_mask = (1 << log2qty) - 1;
  5888. return table;
  5889. }
  5890. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  5891. static inline unsigned long *get_pageblock_bitmap(struct page *page,
  5892. unsigned long pfn)
  5893. {
  5894. #ifdef CONFIG_SPARSEMEM
  5895. return __pfn_to_section(pfn)->pageblock_flags;
  5896. #else
  5897. return page_zone(page)->pageblock_flags;
  5898. #endif /* CONFIG_SPARSEMEM */
  5899. }
  5900. static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
  5901. {
  5902. #ifdef CONFIG_SPARSEMEM
  5903. pfn &= (PAGES_PER_SECTION-1);
  5904. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5905. #else
  5906. pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
  5907. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5908. #endif /* CONFIG_SPARSEMEM */
  5909. }
  5910. /**
  5911. * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  5912. * @page: The page within the block of interest
  5913. * @pfn: The target page frame number
  5914. * @end_bitidx: The last bit of interest to retrieve
  5915. * @mask: mask of bits that the caller is interested in
  5916. *
  5917. * Return: pageblock_bits flags
  5918. */
  5919. unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
  5920. unsigned long end_bitidx,
  5921. unsigned long mask)
  5922. {
  5923. unsigned long *bitmap;
  5924. unsigned long bitidx, word_bitidx;
  5925. unsigned long word;
  5926. bitmap = get_pageblock_bitmap(page, pfn);
  5927. bitidx = pfn_to_bitidx(page, pfn);
  5928. word_bitidx = bitidx / BITS_PER_LONG;
  5929. bitidx &= (BITS_PER_LONG-1);
  5930. word = bitmap[word_bitidx];
  5931. bitidx += end_bitidx;
  5932. return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
  5933. }
  5934. /**
  5935. * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  5936. * @page: The page within the block of interest
  5937. * @flags: The flags to set
  5938. * @pfn: The target page frame number
  5939. * @end_bitidx: The last bit of interest
  5940. * @mask: mask of bits that the caller is interested in
  5941. */
  5942. void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
  5943. unsigned long pfn,
  5944. unsigned long end_bitidx,
  5945. unsigned long mask)
  5946. {
  5947. unsigned long *bitmap;
  5948. unsigned long bitidx, word_bitidx;
  5949. unsigned long old_word, word;
  5950. BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
  5951. bitmap = get_pageblock_bitmap(page, pfn);
  5952. bitidx = pfn_to_bitidx(page, pfn);
  5953. word_bitidx = bitidx / BITS_PER_LONG;
  5954. bitidx &= (BITS_PER_LONG-1);
  5955. VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
  5956. bitidx += end_bitidx;
  5957. mask <<= (BITS_PER_LONG - bitidx - 1);
  5958. flags <<= (BITS_PER_LONG - bitidx - 1);
  5959. word = READ_ONCE(bitmap[word_bitidx]);
  5960. for (;;) {
  5961. old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
  5962. if (word == old_word)
  5963. break;
  5964. word = old_word;
  5965. }
  5966. }
  5967. /*
  5968. * This function checks whether pageblock includes unmovable pages or not.
  5969. * If @count is not zero, it is okay to include less @count unmovable pages
  5970. *
  5971. * PageLRU check without isolation or lru_lock could race so that
  5972. * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
  5973. * expect this function should be exact.
  5974. */
  5975. bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
  5976. bool skip_hwpoisoned_pages)
  5977. {
  5978. unsigned long pfn, iter, found;
  5979. int mt;
  5980. /*
  5981. * For avoiding noise data, lru_add_drain_all() should be called
  5982. * If ZONE_MOVABLE, the zone never contains unmovable pages
  5983. */
  5984. if (zone_idx(zone) == ZONE_MOVABLE)
  5985. return false;
  5986. mt = get_pageblock_migratetype(page);
  5987. if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
  5988. return false;
  5989. pfn = page_to_pfn(page);
  5990. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  5991. unsigned long check = pfn + iter;
  5992. if (!pfn_valid_within(check))
  5993. continue;
  5994. page = pfn_to_page(check);
  5995. /*
  5996. * Hugepages are not in LRU lists, but they're movable.
  5997. * We need not scan over tail pages bacause we don't
  5998. * handle each tail page individually in migration.
  5999. */
  6000. if (PageHuge(page)) {
  6001. iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
  6002. continue;
  6003. }
  6004. /*
  6005. * We can't use page_count without pin a page
  6006. * because another CPU can free compound page.
  6007. * This check already skips compound tails of THP
  6008. * because their page->_refcount is zero at all time.
  6009. */
  6010. if (!page_ref_count(page)) {
  6011. if (PageBuddy(page))
  6012. iter += (1 << page_order(page)) - 1;
  6013. continue;
  6014. }
  6015. /*
  6016. * The HWPoisoned page may be not in buddy system, and
  6017. * page_count() is not 0.
  6018. */
  6019. if (skip_hwpoisoned_pages && PageHWPoison(page))
  6020. continue;
  6021. if (!PageLRU(page))
  6022. found++;
  6023. /*
  6024. * If there are RECLAIMABLE pages, we need to check
  6025. * it. But now, memory offline itself doesn't call
  6026. * shrink_node_slabs() and it still to be fixed.
  6027. */
  6028. /*
  6029. * If the page is not RAM, page_count()should be 0.
  6030. * we don't need more check. This is an _used_ not-movable page.
  6031. *
  6032. * The problematic thing here is PG_reserved pages. PG_reserved
  6033. * is set to both of a memory hole page and a _used_ kernel
  6034. * page at boot.
  6035. */
  6036. if (found > count)
  6037. return true;
  6038. }
  6039. return false;
  6040. }
  6041. bool is_pageblock_removable_nolock(struct page *page)
  6042. {
  6043. struct zone *zone;
  6044. unsigned long pfn;
  6045. /*
  6046. * We have to be careful here because we are iterating over memory
  6047. * sections which are not zone aware so we might end up outside of
  6048. * the zone but still within the section.
  6049. * We have to take care about the node as well. If the node is offline
  6050. * its NODE_DATA will be NULL - see page_zone.
  6051. */
  6052. if (!node_online(page_to_nid(page)))
  6053. return false;
  6054. zone = page_zone(page);
  6055. pfn = page_to_pfn(page);
  6056. if (!zone_spans_pfn(zone, pfn))
  6057. return false;
  6058. return !has_unmovable_pages(zone, page, 0, true);
  6059. }
  6060. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  6061. static unsigned long pfn_max_align_down(unsigned long pfn)
  6062. {
  6063. return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
  6064. pageblock_nr_pages) - 1);
  6065. }
  6066. static unsigned long pfn_max_align_up(unsigned long pfn)
  6067. {
  6068. return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
  6069. pageblock_nr_pages));
  6070. }
  6071. /* [start, end) must belong to a single zone. */
  6072. static int __alloc_contig_migrate_range(struct compact_control *cc,
  6073. unsigned long start, unsigned long end)
  6074. {
  6075. /* This function is based on compact_zone() from compaction.c. */
  6076. unsigned long nr_reclaimed;
  6077. unsigned long pfn = start;
  6078. unsigned int tries = 0;
  6079. int ret = 0;
  6080. migrate_prep();
  6081. while (pfn < end || !list_empty(&cc->migratepages)) {
  6082. if (fatal_signal_pending(current)) {
  6083. ret = -EINTR;
  6084. break;
  6085. }
  6086. if (list_empty(&cc->migratepages)) {
  6087. cc->nr_migratepages = 0;
  6088. pfn = isolate_migratepages_range(cc, pfn, end);
  6089. if (!pfn) {
  6090. ret = -EINTR;
  6091. break;
  6092. }
  6093. tries = 0;
  6094. } else if (++tries == 5) {
  6095. ret = ret < 0 ? ret : -EBUSY;
  6096. break;
  6097. }
  6098. nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
  6099. &cc->migratepages);
  6100. cc->nr_migratepages -= nr_reclaimed;
  6101. ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
  6102. NULL, 0, cc->mode, MR_CMA);
  6103. }
  6104. if (ret < 0) {
  6105. putback_movable_pages(&cc->migratepages);
  6106. return ret;
  6107. }
  6108. return 0;
  6109. }
  6110. /**
  6111. * alloc_contig_range() -- tries to allocate given range of pages
  6112. * @start: start PFN to allocate
  6113. * @end: one-past-the-last PFN to allocate
  6114. * @migratetype: migratetype of the underlaying pageblocks (either
  6115. * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
  6116. * in range must have the same migratetype and it must
  6117. * be either of the two.
  6118. *
  6119. * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
  6120. * aligned, however it's the caller's responsibility to guarantee that
  6121. * we are the only thread that changes migrate type of pageblocks the
  6122. * pages fall in.
  6123. *
  6124. * The PFN range must belong to a single zone.
  6125. *
  6126. * Returns zero on success or negative error code. On success all
  6127. * pages which PFN is in [start, end) are allocated for the caller and
  6128. * need to be freed with free_contig_range().
  6129. */
  6130. int alloc_contig_range(unsigned long start, unsigned long end,
  6131. unsigned migratetype)
  6132. {
  6133. unsigned long outer_start, outer_end;
  6134. unsigned int order;
  6135. int ret = 0;
  6136. struct compact_control cc = {
  6137. .nr_migratepages = 0,
  6138. .order = -1,
  6139. .zone = page_zone(pfn_to_page(start)),
  6140. .mode = MIGRATE_SYNC,
  6141. .ignore_skip_hint = true,
  6142. };
  6143. INIT_LIST_HEAD(&cc.migratepages);
  6144. /*
  6145. * What we do here is we mark all pageblocks in range as
  6146. * MIGRATE_ISOLATE. Because pageblock and max order pages may
  6147. * have different sizes, and due to the way page allocator
  6148. * work, we align the range to biggest of the two pages so
  6149. * that page allocator won't try to merge buddies from
  6150. * different pageblocks and change MIGRATE_ISOLATE to some
  6151. * other migration type.
  6152. *
  6153. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  6154. * migrate the pages from an unaligned range (ie. pages that
  6155. * we are interested in). This will put all the pages in
  6156. * range back to page allocator as MIGRATE_ISOLATE.
  6157. *
  6158. * When this is done, we take the pages in range from page
  6159. * allocator removing them from the buddy system. This way
  6160. * page allocator will never consider using them.
  6161. *
  6162. * This lets us mark the pageblocks back as
  6163. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  6164. * aligned range but not in the unaligned, original range are
  6165. * put back to page allocator so that buddy can use them.
  6166. */
  6167. ret = start_isolate_page_range(pfn_max_align_down(start),
  6168. pfn_max_align_up(end), migratetype,
  6169. false);
  6170. if (ret)
  6171. return ret;
  6172. /*
  6173. * In case of -EBUSY, we'd like to know which page causes problem.
  6174. * So, just fall through. We will check it in test_pages_isolated().
  6175. */
  6176. ret = __alloc_contig_migrate_range(&cc, start, end);
  6177. if (ret && ret != -EBUSY)
  6178. goto done;
  6179. /*
  6180. * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
  6181. * aligned blocks that are marked as MIGRATE_ISOLATE. What's
  6182. * more, all pages in [start, end) are free in page allocator.
  6183. * What we are going to do is to allocate all pages from
  6184. * [start, end) (that is remove them from page allocator).
  6185. *
  6186. * The only problem is that pages at the beginning and at the
  6187. * end of interesting range may be not aligned with pages that
  6188. * page allocator holds, ie. they can be part of higher order
  6189. * pages. Because of this, we reserve the bigger range and
  6190. * once this is done free the pages we are not interested in.
  6191. *
  6192. * We don't have to hold zone->lock here because the pages are
  6193. * isolated thus they won't get removed from buddy.
  6194. */
  6195. lru_add_drain_all();
  6196. drain_all_pages(cc.zone);
  6197. order = 0;
  6198. outer_start = start;
  6199. while (!PageBuddy(pfn_to_page(outer_start))) {
  6200. if (++order >= MAX_ORDER) {
  6201. outer_start = start;
  6202. break;
  6203. }
  6204. outer_start &= ~0UL << order;
  6205. }
  6206. if (outer_start != start) {
  6207. order = page_order(pfn_to_page(outer_start));
  6208. /*
  6209. * outer_start page could be small order buddy page and
  6210. * it doesn't include start page. Adjust outer_start
  6211. * in this case to report failed page properly
  6212. * on tracepoint in test_pages_isolated()
  6213. */
  6214. if (outer_start + (1UL << order) <= start)
  6215. outer_start = start;
  6216. }
  6217. /* Make sure the range is really isolated. */
  6218. if (test_pages_isolated(outer_start, end, false)) {
  6219. pr_info("%s: [%lx, %lx) PFNs busy\n",
  6220. __func__, outer_start, end);
  6221. ret = -EBUSY;
  6222. goto done;
  6223. }
  6224. /* Grab isolated pages from freelists. */
  6225. outer_end = isolate_freepages_range(&cc, outer_start, end);
  6226. if (!outer_end) {
  6227. ret = -EBUSY;
  6228. goto done;
  6229. }
  6230. /* Free head and tail (if any) */
  6231. if (start != outer_start)
  6232. free_contig_range(outer_start, start - outer_start);
  6233. if (end != outer_end)
  6234. free_contig_range(end, outer_end - end);
  6235. done:
  6236. undo_isolate_page_range(pfn_max_align_down(start),
  6237. pfn_max_align_up(end), migratetype);
  6238. return ret;
  6239. }
  6240. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  6241. {
  6242. unsigned int count = 0;
  6243. for (; nr_pages--; pfn++) {
  6244. struct page *page = pfn_to_page(pfn);
  6245. count += page_count(page) != 1;
  6246. __free_page(page);
  6247. }
  6248. WARN(count != 0, "%d pages are still in use!\n", count);
  6249. }
  6250. #endif
  6251. #ifdef CONFIG_MEMORY_HOTPLUG
  6252. /*
  6253. * The zone indicated has a new number of managed_pages; batch sizes and percpu
  6254. * page high values need to be recalulated.
  6255. */
  6256. void __meminit zone_pcp_update(struct zone *zone)
  6257. {
  6258. unsigned cpu;
  6259. mutex_lock(&pcp_batch_high_lock);
  6260. for_each_possible_cpu(cpu)
  6261. pageset_set_high_and_batch(zone,
  6262. per_cpu_ptr(zone->pageset, cpu));
  6263. mutex_unlock(&pcp_batch_high_lock);
  6264. }
  6265. #endif
  6266. void zone_pcp_reset(struct zone *zone)
  6267. {
  6268. unsigned long flags;
  6269. int cpu;
  6270. struct per_cpu_pageset *pset;
  6271. /* avoid races with drain_pages() */
  6272. local_irq_save(flags);
  6273. if (zone->pageset != &boot_pageset) {
  6274. for_each_online_cpu(cpu) {
  6275. pset = per_cpu_ptr(zone->pageset, cpu);
  6276. drain_zonestat(zone, pset);
  6277. }
  6278. free_percpu(zone->pageset);
  6279. zone->pageset = &boot_pageset;
  6280. }
  6281. local_irq_restore(flags);
  6282. }
  6283. #ifdef CONFIG_MEMORY_HOTREMOVE
  6284. /*
  6285. * All pages in the range must be in a single zone and isolated
  6286. * before calling this.
  6287. */
  6288. void
  6289. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  6290. {
  6291. struct page *page;
  6292. struct zone *zone;
  6293. unsigned int order, i;
  6294. unsigned long pfn;
  6295. unsigned long flags;
  6296. /* find the first valid pfn */
  6297. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  6298. if (pfn_valid(pfn))
  6299. break;
  6300. if (pfn == end_pfn)
  6301. return;
  6302. zone = page_zone(pfn_to_page(pfn));
  6303. spin_lock_irqsave(&zone->lock, flags);
  6304. pfn = start_pfn;
  6305. while (pfn < end_pfn) {
  6306. if (!pfn_valid(pfn)) {
  6307. pfn++;
  6308. continue;
  6309. }
  6310. page = pfn_to_page(pfn);
  6311. /*
  6312. * The HWPoisoned page may be not in buddy system, and
  6313. * page_count() is not 0.
  6314. */
  6315. if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
  6316. pfn++;
  6317. SetPageReserved(page);
  6318. continue;
  6319. }
  6320. BUG_ON(page_count(page));
  6321. BUG_ON(!PageBuddy(page));
  6322. order = page_order(page);
  6323. #ifdef CONFIG_DEBUG_VM
  6324. pr_info("remove from free list %lx %d %lx\n",
  6325. pfn, 1 << order, end_pfn);
  6326. #endif
  6327. list_del(&page->lru);
  6328. rmv_page_order(page);
  6329. zone->free_area[order].nr_free--;
  6330. for (i = 0; i < (1 << order); i++)
  6331. SetPageReserved((page+i));
  6332. pfn += (1 << order);
  6333. }
  6334. spin_unlock_irqrestore(&zone->lock, flags);
  6335. }
  6336. #endif
  6337. bool is_free_buddy_page(struct page *page)
  6338. {
  6339. struct zone *zone = page_zone(page);
  6340. unsigned long pfn = page_to_pfn(page);
  6341. unsigned long flags;
  6342. unsigned int order;
  6343. spin_lock_irqsave(&zone->lock, flags);
  6344. for (order = 0; order < MAX_ORDER; order++) {
  6345. struct page *page_head = page - (pfn & ((1 << order) - 1));
  6346. if (PageBuddy(page_head) && page_order(page_head) >= order)
  6347. break;
  6348. }
  6349. spin_unlock_irqrestore(&zone->lock, flags);
  6350. return order < MAX_ORDER;
  6351. }