page_alloc.c 214 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kasan.h>
  27. #include <linux/module.h>
  28. #include <linux/suspend.h>
  29. #include <linux/pagevec.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/slab.h>
  32. #include <linux/ratelimit.h>
  33. #include <linux/oom.h>
  34. #include <linux/notifier.h>
  35. #include <linux/topology.h>
  36. #include <linux/sysctl.h>
  37. #include <linux/cpu.h>
  38. #include <linux/cpuset.h>
  39. #include <linux/memory_hotplug.h>
  40. #include <linux/nodemask.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/vmstat.h>
  43. #include <linux/mempolicy.h>
  44. #include <linux/memremap.h>
  45. #include <linux/stop_machine.h>
  46. #include <linux/sort.h>
  47. #include <linux/pfn.h>
  48. #include <linux/backing-dev.h>
  49. #include <linux/fault-inject.h>
  50. #include <linux/page-isolation.h>
  51. #include <linux/page_ext.h>
  52. #include <linux/debugobjects.h>
  53. #include <linux/kmemleak.h>
  54. #include <linux/compaction.h>
  55. #include <trace/events/kmem.h>
  56. #include <trace/events/oom.h>
  57. #include <linux/prefetch.h>
  58. #include <linux/mm_inline.h>
  59. #include <linux/migrate.h>
  60. #include <linux/hugetlb.h>
  61. #include <linux/sched/rt.h>
  62. #include <linux/sched/mm.h>
  63. #include <linux/page_owner.h>
  64. #include <linux/kthread.h>
  65. #include <linux/memcontrol.h>
  66. #include <linux/ftrace.h>
  67. #include <linux/lockdep.h>
  68. #include <linux/nmi.h>
  69. #include <asm/sections.h>
  70. #include <asm/tlbflush.h>
  71. #include <asm/div64.h>
  72. #include "internal.h"
  73. /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  74. static DEFINE_MUTEX(pcp_batch_high_lock);
  75. #define MIN_PERCPU_PAGELIST_FRACTION (8)
  76. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  77. DEFINE_PER_CPU(int, numa_node);
  78. EXPORT_PER_CPU_SYMBOL(numa_node);
  79. #endif
  80. DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
  81. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  82. /*
  83. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  84. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  85. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  86. * defined in <linux/topology.h>.
  87. */
  88. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  89. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  90. int _node_numa_mem_[MAX_NUMNODES];
  91. #endif
  92. /* work_structs for global per-cpu drains */
  93. DEFINE_MUTEX(pcpu_drain_mutex);
  94. DEFINE_PER_CPU(struct work_struct, pcpu_drain);
  95. #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
  96. volatile unsigned long latent_entropy __latent_entropy;
  97. EXPORT_SYMBOL(latent_entropy);
  98. #endif
  99. /*
  100. * Array of node states.
  101. */
  102. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  103. [N_POSSIBLE] = NODE_MASK_ALL,
  104. [N_ONLINE] = { { [0] = 1UL } },
  105. #ifndef CONFIG_NUMA
  106. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  107. #ifdef CONFIG_HIGHMEM
  108. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  109. #endif
  110. [N_MEMORY] = { { [0] = 1UL } },
  111. [N_CPU] = { { [0] = 1UL } },
  112. #endif /* NUMA */
  113. };
  114. EXPORT_SYMBOL(node_states);
  115. /* Protect totalram_pages and zone->managed_pages */
  116. static DEFINE_SPINLOCK(managed_page_count_lock);
  117. unsigned long totalram_pages __read_mostly;
  118. unsigned long totalreserve_pages __read_mostly;
  119. unsigned long totalcma_pages __read_mostly;
  120. int percpu_pagelist_fraction;
  121. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  122. /*
  123. * A cached value of the page's pageblock's migratetype, used when the page is
  124. * put on a pcplist. Used to avoid the pageblock migratetype lookup when
  125. * freeing from pcplists in most cases, at the cost of possibly becoming stale.
  126. * Also the migratetype set in the page does not necessarily match the pcplist
  127. * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
  128. * other index - this ensures that it will be put on the correct CMA freelist.
  129. */
  130. static inline int get_pcppage_migratetype(struct page *page)
  131. {
  132. return page->index;
  133. }
  134. static inline void set_pcppage_migratetype(struct page *page, int migratetype)
  135. {
  136. page->index = migratetype;
  137. }
  138. #ifdef CONFIG_PM_SLEEP
  139. /*
  140. * The following functions are used by the suspend/hibernate code to temporarily
  141. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  142. * while devices are suspended. To avoid races with the suspend/hibernate code,
  143. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  144. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  145. * guaranteed not to run in parallel with that modification).
  146. */
  147. static gfp_t saved_gfp_mask;
  148. void pm_restore_gfp_mask(void)
  149. {
  150. WARN_ON(!mutex_is_locked(&pm_mutex));
  151. if (saved_gfp_mask) {
  152. gfp_allowed_mask = saved_gfp_mask;
  153. saved_gfp_mask = 0;
  154. }
  155. }
  156. void pm_restrict_gfp_mask(void)
  157. {
  158. WARN_ON(!mutex_is_locked(&pm_mutex));
  159. WARN_ON(saved_gfp_mask);
  160. saved_gfp_mask = gfp_allowed_mask;
  161. gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
  162. }
  163. bool pm_suspended_storage(void)
  164. {
  165. if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
  166. return false;
  167. return true;
  168. }
  169. #endif /* CONFIG_PM_SLEEP */
  170. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  171. unsigned int pageblock_order __read_mostly;
  172. #endif
  173. static void __free_pages_ok(struct page *page, unsigned int order);
  174. /*
  175. * results with 256, 32 in the lowmem_reserve sysctl:
  176. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  177. * 1G machine -> (16M dma, 784M normal, 224M high)
  178. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  179. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  180. * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
  181. *
  182. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  183. * don't need any ZONE_NORMAL reservation
  184. */
  185. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  186. #ifdef CONFIG_ZONE_DMA
  187. 256,
  188. #endif
  189. #ifdef CONFIG_ZONE_DMA32
  190. 256,
  191. #endif
  192. #ifdef CONFIG_HIGHMEM
  193. 32,
  194. #endif
  195. 32,
  196. };
  197. EXPORT_SYMBOL(totalram_pages);
  198. static char * const zone_names[MAX_NR_ZONES] = {
  199. #ifdef CONFIG_ZONE_DMA
  200. "DMA",
  201. #endif
  202. #ifdef CONFIG_ZONE_DMA32
  203. "DMA32",
  204. #endif
  205. "Normal",
  206. #ifdef CONFIG_HIGHMEM
  207. "HighMem",
  208. #endif
  209. "Movable",
  210. #ifdef CONFIG_ZONE_DEVICE
  211. "Device",
  212. #endif
  213. };
  214. char * const migratetype_names[MIGRATE_TYPES] = {
  215. "Unmovable",
  216. "Movable",
  217. "Reclaimable",
  218. "HighAtomic",
  219. #ifdef CONFIG_CMA
  220. "CMA",
  221. #endif
  222. #ifdef CONFIG_MEMORY_ISOLATION
  223. "Isolate",
  224. #endif
  225. };
  226. compound_page_dtor * const compound_page_dtors[] = {
  227. NULL,
  228. free_compound_page,
  229. #ifdef CONFIG_HUGETLB_PAGE
  230. free_huge_page,
  231. #endif
  232. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  233. free_transhuge_page,
  234. #endif
  235. };
  236. int min_free_kbytes = 1024;
  237. int user_min_free_kbytes = -1;
  238. int watermark_scale_factor = 10;
  239. static unsigned long __meminitdata nr_kernel_pages;
  240. static unsigned long __meminitdata nr_all_pages;
  241. static unsigned long __meminitdata dma_reserve;
  242. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  243. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  244. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  245. static unsigned long __initdata required_kernelcore;
  246. static unsigned long __initdata required_movablecore;
  247. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  248. static bool mirrored_kernelcore;
  249. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  250. int movable_zone;
  251. EXPORT_SYMBOL(movable_zone);
  252. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  253. #if MAX_NUMNODES > 1
  254. int nr_node_ids __read_mostly = MAX_NUMNODES;
  255. int nr_online_nodes __read_mostly = 1;
  256. EXPORT_SYMBOL(nr_node_ids);
  257. EXPORT_SYMBOL(nr_online_nodes);
  258. #endif
  259. int page_group_by_mobility_disabled __read_mostly;
  260. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  261. /*
  262. * Determine how many pages need to be initialized durig early boot
  263. * (non-deferred initialization).
  264. * The value of first_deferred_pfn will be set later, once non-deferred pages
  265. * are initialized, but for now set it ULONG_MAX.
  266. */
  267. static inline void reset_deferred_meminit(pg_data_t *pgdat)
  268. {
  269. phys_addr_t start_addr, end_addr;
  270. unsigned long max_pgcnt;
  271. unsigned long reserved;
  272. /*
  273. * Initialise at least 2G of a node but also take into account that
  274. * two large system hashes that can take up 1GB for 0.25TB/node.
  275. */
  276. max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
  277. (pgdat->node_spanned_pages >> 8));
  278. /*
  279. * Compensate the all the memblock reservations (e.g. crash kernel)
  280. * from the initial estimation to make sure we will initialize enough
  281. * memory to boot.
  282. */
  283. start_addr = PFN_PHYS(pgdat->node_start_pfn);
  284. end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
  285. reserved = memblock_reserved_memory_within(start_addr, end_addr);
  286. max_pgcnt += PHYS_PFN(reserved);
  287. pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
  288. pgdat->first_deferred_pfn = ULONG_MAX;
  289. }
  290. /* Returns true if the struct page for the pfn is uninitialised */
  291. static inline bool __meminit early_page_uninitialised(unsigned long pfn)
  292. {
  293. int nid = early_pfn_to_nid(pfn);
  294. if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
  295. return true;
  296. return false;
  297. }
  298. /*
  299. * Returns false when the remaining initialisation should be deferred until
  300. * later in the boot cycle when it can be parallelised.
  301. */
  302. static inline bool update_defer_init(pg_data_t *pgdat,
  303. unsigned long pfn, unsigned long zone_end,
  304. unsigned long *nr_initialised)
  305. {
  306. /* Always populate low zones for address-contrained allocations */
  307. if (zone_end < pgdat_end_pfn(pgdat))
  308. return true;
  309. (*nr_initialised)++;
  310. if ((*nr_initialised > pgdat->static_init_pgcnt) &&
  311. (pfn & (PAGES_PER_SECTION - 1)) == 0) {
  312. pgdat->first_deferred_pfn = pfn;
  313. return false;
  314. }
  315. return true;
  316. }
  317. #else
  318. static inline void reset_deferred_meminit(pg_data_t *pgdat)
  319. {
  320. }
  321. static inline bool early_page_uninitialised(unsigned long pfn)
  322. {
  323. return false;
  324. }
  325. static inline bool update_defer_init(pg_data_t *pgdat,
  326. unsigned long pfn, unsigned long zone_end,
  327. unsigned long *nr_initialised)
  328. {
  329. return true;
  330. }
  331. #endif
  332. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  333. static inline unsigned long *get_pageblock_bitmap(struct page *page,
  334. unsigned long pfn)
  335. {
  336. #ifdef CONFIG_SPARSEMEM
  337. return __pfn_to_section(pfn)->pageblock_flags;
  338. #else
  339. return page_zone(page)->pageblock_flags;
  340. #endif /* CONFIG_SPARSEMEM */
  341. }
  342. static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
  343. {
  344. #ifdef CONFIG_SPARSEMEM
  345. pfn &= (PAGES_PER_SECTION-1);
  346. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  347. #else
  348. pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
  349. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  350. #endif /* CONFIG_SPARSEMEM */
  351. }
  352. /**
  353. * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  354. * @page: The page within the block of interest
  355. * @pfn: The target page frame number
  356. * @end_bitidx: The last bit of interest to retrieve
  357. * @mask: mask of bits that the caller is interested in
  358. *
  359. * Return: pageblock_bits flags
  360. */
  361. static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
  362. unsigned long pfn,
  363. unsigned long end_bitidx,
  364. unsigned long mask)
  365. {
  366. unsigned long *bitmap;
  367. unsigned long bitidx, word_bitidx;
  368. unsigned long word;
  369. bitmap = get_pageblock_bitmap(page, pfn);
  370. bitidx = pfn_to_bitidx(page, pfn);
  371. word_bitidx = bitidx / BITS_PER_LONG;
  372. bitidx &= (BITS_PER_LONG-1);
  373. word = bitmap[word_bitidx];
  374. bitidx += end_bitidx;
  375. return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
  376. }
  377. unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
  378. unsigned long end_bitidx,
  379. unsigned long mask)
  380. {
  381. return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
  382. }
  383. static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
  384. {
  385. return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
  386. }
  387. /**
  388. * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  389. * @page: The page within the block of interest
  390. * @flags: The flags to set
  391. * @pfn: The target page frame number
  392. * @end_bitidx: The last bit of interest
  393. * @mask: mask of bits that the caller is interested in
  394. */
  395. void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
  396. unsigned long pfn,
  397. unsigned long end_bitidx,
  398. unsigned long mask)
  399. {
  400. unsigned long *bitmap;
  401. unsigned long bitidx, word_bitidx;
  402. unsigned long old_word, word;
  403. BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
  404. bitmap = get_pageblock_bitmap(page, pfn);
  405. bitidx = pfn_to_bitidx(page, pfn);
  406. word_bitidx = bitidx / BITS_PER_LONG;
  407. bitidx &= (BITS_PER_LONG-1);
  408. VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
  409. bitidx += end_bitidx;
  410. mask <<= (BITS_PER_LONG - bitidx - 1);
  411. flags <<= (BITS_PER_LONG - bitidx - 1);
  412. word = READ_ONCE(bitmap[word_bitidx]);
  413. for (;;) {
  414. old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
  415. if (word == old_word)
  416. break;
  417. word = old_word;
  418. }
  419. }
  420. void set_pageblock_migratetype(struct page *page, int migratetype)
  421. {
  422. if (unlikely(page_group_by_mobility_disabled &&
  423. migratetype < MIGRATE_PCPTYPES))
  424. migratetype = MIGRATE_UNMOVABLE;
  425. set_pageblock_flags_group(page, (unsigned long)migratetype,
  426. PB_migrate, PB_migrate_end);
  427. }
  428. #ifdef CONFIG_DEBUG_VM
  429. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  430. {
  431. int ret = 0;
  432. unsigned seq;
  433. unsigned long pfn = page_to_pfn(page);
  434. unsigned long sp, start_pfn;
  435. do {
  436. seq = zone_span_seqbegin(zone);
  437. start_pfn = zone->zone_start_pfn;
  438. sp = zone->spanned_pages;
  439. if (!zone_spans_pfn(zone, pfn))
  440. ret = 1;
  441. } while (zone_span_seqretry(zone, seq));
  442. if (ret)
  443. pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
  444. pfn, zone_to_nid(zone), zone->name,
  445. start_pfn, start_pfn + sp);
  446. return ret;
  447. }
  448. static int page_is_consistent(struct zone *zone, struct page *page)
  449. {
  450. if (!pfn_valid_within(page_to_pfn(page)))
  451. return 0;
  452. if (zone != page_zone(page))
  453. return 0;
  454. return 1;
  455. }
  456. /*
  457. * Temporary debugging check for pages not lying within a given zone.
  458. */
  459. static int __maybe_unused bad_range(struct zone *zone, struct page *page)
  460. {
  461. if (page_outside_zone_boundaries(zone, page))
  462. return 1;
  463. if (!page_is_consistent(zone, page))
  464. return 1;
  465. return 0;
  466. }
  467. #else
  468. static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
  469. {
  470. return 0;
  471. }
  472. #endif
  473. static void bad_page(struct page *page, const char *reason,
  474. unsigned long bad_flags)
  475. {
  476. static unsigned long resume;
  477. static unsigned long nr_shown;
  478. static unsigned long nr_unshown;
  479. /*
  480. * Allow a burst of 60 reports, then keep quiet for that minute;
  481. * or allow a steady drip of one report per second.
  482. */
  483. if (nr_shown == 60) {
  484. if (time_before(jiffies, resume)) {
  485. nr_unshown++;
  486. goto out;
  487. }
  488. if (nr_unshown) {
  489. pr_alert(
  490. "BUG: Bad page state: %lu messages suppressed\n",
  491. nr_unshown);
  492. nr_unshown = 0;
  493. }
  494. nr_shown = 0;
  495. }
  496. if (nr_shown++ == 0)
  497. resume = jiffies + 60 * HZ;
  498. pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
  499. current->comm, page_to_pfn(page));
  500. __dump_page(page, reason);
  501. bad_flags &= page->flags;
  502. if (bad_flags)
  503. pr_alert("bad because of flags: %#lx(%pGp)\n",
  504. bad_flags, &bad_flags);
  505. dump_page_owner(page);
  506. print_modules();
  507. dump_stack();
  508. out:
  509. /* Leave bad fields for debug, except PageBuddy could make trouble */
  510. page_mapcount_reset(page); /* remove PageBuddy */
  511. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  512. }
  513. /*
  514. * Higher-order pages are called "compound pages". They are structured thusly:
  515. *
  516. * The first PAGE_SIZE page is called the "head page" and have PG_head set.
  517. *
  518. * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
  519. * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
  520. *
  521. * The first tail page's ->compound_dtor holds the offset in array of compound
  522. * page destructors. See compound_page_dtors.
  523. *
  524. * The first tail page's ->compound_order holds the order of allocation.
  525. * This usage means that zero-order pages may not be compound.
  526. */
  527. void free_compound_page(struct page *page)
  528. {
  529. __free_pages_ok(page, compound_order(page));
  530. }
  531. void prep_compound_page(struct page *page, unsigned int order)
  532. {
  533. int i;
  534. int nr_pages = 1 << order;
  535. set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
  536. set_compound_order(page, order);
  537. __SetPageHead(page);
  538. for (i = 1; i < nr_pages; i++) {
  539. struct page *p = page + i;
  540. set_page_count(p, 0);
  541. p->mapping = TAIL_MAPPING;
  542. set_compound_head(p, page);
  543. }
  544. atomic_set(compound_mapcount_ptr(page), -1);
  545. }
  546. #ifdef CONFIG_DEBUG_PAGEALLOC
  547. unsigned int _debug_guardpage_minorder;
  548. bool _debug_pagealloc_enabled __read_mostly
  549. = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
  550. EXPORT_SYMBOL(_debug_pagealloc_enabled);
  551. bool _debug_guardpage_enabled __read_mostly;
  552. static int __init early_debug_pagealloc(char *buf)
  553. {
  554. if (!buf)
  555. return -EINVAL;
  556. return kstrtobool(buf, &_debug_pagealloc_enabled);
  557. }
  558. early_param("debug_pagealloc", early_debug_pagealloc);
  559. static bool need_debug_guardpage(void)
  560. {
  561. /* If we don't use debug_pagealloc, we don't need guard page */
  562. if (!debug_pagealloc_enabled())
  563. return false;
  564. if (!debug_guardpage_minorder())
  565. return false;
  566. return true;
  567. }
  568. static void init_debug_guardpage(void)
  569. {
  570. if (!debug_pagealloc_enabled())
  571. return;
  572. if (!debug_guardpage_minorder())
  573. return;
  574. _debug_guardpage_enabled = true;
  575. }
  576. struct page_ext_operations debug_guardpage_ops = {
  577. .need = need_debug_guardpage,
  578. .init = init_debug_guardpage,
  579. };
  580. static int __init debug_guardpage_minorder_setup(char *buf)
  581. {
  582. unsigned long res;
  583. if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
  584. pr_err("Bad debug_guardpage_minorder value\n");
  585. return 0;
  586. }
  587. _debug_guardpage_minorder = res;
  588. pr_info("Setting debug_guardpage_minorder to %lu\n", res);
  589. return 0;
  590. }
  591. early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
  592. static inline bool set_page_guard(struct zone *zone, struct page *page,
  593. unsigned int order, int migratetype)
  594. {
  595. struct page_ext *page_ext;
  596. if (!debug_guardpage_enabled())
  597. return false;
  598. if (order >= debug_guardpage_minorder())
  599. return false;
  600. page_ext = lookup_page_ext(page);
  601. if (unlikely(!page_ext))
  602. return false;
  603. __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  604. INIT_LIST_HEAD(&page->lru);
  605. set_page_private(page, order);
  606. /* Guard pages are not available for any usage */
  607. __mod_zone_freepage_state(zone, -(1 << order), migratetype);
  608. return true;
  609. }
  610. static inline void clear_page_guard(struct zone *zone, struct page *page,
  611. unsigned int order, int migratetype)
  612. {
  613. struct page_ext *page_ext;
  614. if (!debug_guardpage_enabled())
  615. return;
  616. page_ext = lookup_page_ext(page);
  617. if (unlikely(!page_ext))
  618. return;
  619. __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  620. set_page_private(page, 0);
  621. if (!is_migrate_isolate(migratetype))
  622. __mod_zone_freepage_state(zone, (1 << order), migratetype);
  623. }
  624. #else
  625. struct page_ext_operations debug_guardpage_ops;
  626. static inline bool set_page_guard(struct zone *zone, struct page *page,
  627. unsigned int order, int migratetype) { return false; }
  628. static inline void clear_page_guard(struct zone *zone, struct page *page,
  629. unsigned int order, int migratetype) {}
  630. #endif
  631. static inline void set_page_order(struct page *page, unsigned int order)
  632. {
  633. set_page_private(page, order);
  634. __SetPageBuddy(page);
  635. }
  636. static inline void rmv_page_order(struct page *page)
  637. {
  638. __ClearPageBuddy(page);
  639. set_page_private(page, 0);
  640. }
  641. /*
  642. * This function checks whether a page is free && is the buddy
  643. * we can do coalesce a page and its buddy if
  644. * (a) the buddy is not in a hole (check before calling!) &&
  645. * (b) the buddy is in the buddy system &&
  646. * (c) a page and its buddy have the same order &&
  647. * (d) a page and its buddy are in the same zone.
  648. *
  649. * For recording whether a page is in the buddy system, we set ->_mapcount
  650. * PAGE_BUDDY_MAPCOUNT_VALUE.
  651. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
  652. * serialized by zone->lock.
  653. *
  654. * For recording page's order, we use page_private(page).
  655. */
  656. static inline int page_is_buddy(struct page *page, struct page *buddy,
  657. unsigned int order)
  658. {
  659. if (page_is_guard(buddy) && page_order(buddy) == order) {
  660. if (page_zone_id(page) != page_zone_id(buddy))
  661. return 0;
  662. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  663. return 1;
  664. }
  665. if (PageBuddy(buddy) && page_order(buddy) == order) {
  666. /*
  667. * zone check is done late to avoid uselessly
  668. * calculating zone/node ids for pages that could
  669. * never merge.
  670. */
  671. if (page_zone_id(page) != page_zone_id(buddy))
  672. return 0;
  673. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  674. return 1;
  675. }
  676. return 0;
  677. }
  678. /*
  679. * Freeing function for a buddy system allocator.
  680. *
  681. * The concept of a buddy system is to maintain direct-mapped table
  682. * (containing bit values) for memory blocks of various "orders".
  683. * The bottom level table contains the map for the smallest allocatable
  684. * units of memory (here, pages), and each level above it describes
  685. * pairs of units from the levels below, hence, "buddies".
  686. * At a high level, all that happens here is marking the table entry
  687. * at the bottom level available, and propagating the changes upward
  688. * as necessary, plus some accounting needed to play nicely with other
  689. * parts of the VM system.
  690. * At each level, we keep a list of pages, which are heads of continuous
  691. * free pages of length of (1 << order) and marked with _mapcount
  692. * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
  693. * field.
  694. * So when we are allocating or freeing one, we can derive the state of the
  695. * other. That is, if we allocate a small block, and both were
  696. * free, the remainder of the region must be split into blocks.
  697. * If a block is freed, and its buddy is also free, then this
  698. * triggers coalescing into a block of larger size.
  699. *
  700. * -- nyc
  701. */
  702. static inline void __free_one_page(struct page *page,
  703. unsigned long pfn,
  704. struct zone *zone, unsigned int order,
  705. int migratetype)
  706. {
  707. unsigned long combined_pfn;
  708. unsigned long uninitialized_var(buddy_pfn);
  709. struct page *buddy;
  710. unsigned int max_order;
  711. max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
  712. VM_BUG_ON(!zone_is_initialized(zone));
  713. VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
  714. VM_BUG_ON(migratetype == -1);
  715. if (likely(!is_migrate_isolate(migratetype)))
  716. __mod_zone_freepage_state(zone, 1 << order, migratetype);
  717. VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
  718. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  719. continue_merging:
  720. while (order < max_order - 1) {
  721. buddy_pfn = __find_buddy_pfn(pfn, order);
  722. buddy = page + (buddy_pfn - pfn);
  723. if (!pfn_valid_within(buddy_pfn))
  724. goto done_merging;
  725. if (!page_is_buddy(page, buddy, order))
  726. goto done_merging;
  727. /*
  728. * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  729. * merge with it and move up one order.
  730. */
  731. if (page_is_guard(buddy)) {
  732. clear_page_guard(zone, buddy, order, migratetype);
  733. } else {
  734. list_del(&buddy->lru);
  735. zone->free_area[order].nr_free--;
  736. rmv_page_order(buddy);
  737. }
  738. combined_pfn = buddy_pfn & pfn;
  739. page = page + (combined_pfn - pfn);
  740. pfn = combined_pfn;
  741. order++;
  742. }
  743. if (max_order < MAX_ORDER) {
  744. /* If we are here, it means order is >= pageblock_order.
  745. * We want to prevent merge between freepages on isolate
  746. * pageblock and normal pageblock. Without this, pageblock
  747. * isolation could cause incorrect freepage or CMA accounting.
  748. *
  749. * We don't want to hit this code for the more frequent
  750. * low-order merging.
  751. */
  752. if (unlikely(has_isolate_pageblock(zone))) {
  753. int buddy_mt;
  754. buddy_pfn = __find_buddy_pfn(pfn, order);
  755. buddy = page + (buddy_pfn - pfn);
  756. buddy_mt = get_pageblock_migratetype(buddy);
  757. if (migratetype != buddy_mt
  758. && (is_migrate_isolate(migratetype) ||
  759. is_migrate_isolate(buddy_mt)))
  760. goto done_merging;
  761. }
  762. max_order++;
  763. goto continue_merging;
  764. }
  765. done_merging:
  766. set_page_order(page, order);
  767. /*
  768. * If this is not the largest possible page, check if the buddy
  769. * of the next-highest order is free. If it is, it's possible
  770. * that pages are being freed that will coalesce soon. In case,
  771. * that is happening, add the free page to the tail of the list
  772. * so it's less likely to be used soon and more likely to be merged
  773. * as a higher order page
  774. */
  775. if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
  776. struct page *higher_page, *higher_buddy;
  777. combined_pfn = buddy_pfn & pfn;
  778. higher_page = page + (combined_pfn - pfn);
  779. buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
  780. higher_buddy = higher_page + (buddy_pfn - combined_pfn);
  781. if (pfn_valid_within(buddy_pfn) &&
  782. page_is_buddy(higher_page, higher_buddy, order + 1)) {
  783. list_add_tail(&page->lru,
  784. &zone->free_area[order].free_list[migratetype]);
  785. goto out;
  786. }
  787. }
  788. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  789. out:
  790. zone->free_area[order].nr_free++;
  791. }
  792. /*
  793. * A bad page could be due to a number of fields. Instead of multiple branches,
  794. * try and check multiple fields with one check. The caller must do a detailed
  795. * check if necessary.
  796. */
  797. static inline bool page_expected_state(struct page *page,
  798. unsigned long check_flags)
  799. {
  800. if (unlikely(atomic_read(&page->_mapcount) != -1))
  801. return false;
  802. if (unlikely((unsigned long)page->mapping |
  803. page_ref_count(page) |
  804. #ifdef CONFIG_MEMCG
  805. (unsigned long)page->mem_cgroup |
  806. #endif
  807. (page->flags & check_flags)))
  808. return false;
  809. return true;
  810. }
  811. static void free_pages_check_bad(struct page *page)
  812. {
  813. const char *bad_reason;
  814. unsigned long bad_flags;
  815. bad_reason = NULL;
  816. bad_flags = 0;
  817. if (unlikely(atomic_read(&page->_mapcount) != -1))
  818. bad_reason = "nonzero mapcount";
  819. if (unlikely(page->mapping != NULL))
  820. bad_reason = "non-NULL mapping";
  821. if (unlikely(page_ref_count(page) != 0))
  822. bad_reason = "nonzero _refcount";
  823. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
  824. bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
  825. bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
  826. }
  827. #ifdef CONFIG_MEMCG
  828. if (unlikely(page->mem_cgroup))
  829. bad_reason = "page still charged to cgroup";
  830. #endif
  831. bad_page(page, bad_reason, bad_flags);
  832. }
  833. static inline int free_pages_check(struct page *page)
  834. {
  835. if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
  836. return 0;
  837. /* Something has gone sideways, find it */
  838. free_pages_check_bad(page);
  839. return 1;
  840. }
  841. static int free_tail_pages_check(struct page *head_page, struct page *page)
  842. {
  843. int ret = 1;
  844. /*
  845. * We rely page->lru.next never has bit 0 set, unless the page
  846. * is PageTail(). Let's make sure that's true even for poisoned ->lru.
  847. */
  848. BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
  849. if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
  850. ret = 0;
  851. goto out;
  852. }
  853. switch (page - head_page) {
  854. case 1:
  855. /* the first tail page: ->mapping is compound_mapcount() */
  856. if (unlikely(compound_mapcount(page))) {
  857. bad_page(page, "nonzero compound_mapcount", 0);
  858. goto out;
  859. }
  860. break;
  861. case 2:
  862. /*
  863. * the second tail page: ->mapping is
  864. * page_deferred_list().next -- ignore value.
  865. */
  866. break;
  867. default:
  868. if (page->mapping != TAIL_MAPPING) {
  869. bad_page(page, "corrupted mapping in tail page", 0);
  870. goto out;
  871. }
  872. break;
  873. }
  874. if (unlikely(!PageTail(page))) {
  875. bad_page(page, "PageTail not set", 0);
  876. goto out;
  877. }
  878. if (unlikely(compound_head(page) != head_page)) {
  879. bad_page(page, "compound_head not consistent", 0);
  880. goto out;
  881. }
  882. ret = 0;
  883. out:
  884. page->mapping = NULL;
  885. clear_compound_head(page);
  886. return ret;
  887. }
  888. static __always_inline bool free_pages_prepare(struct page *page,
  889. unsigned int order, bool check_free)
  890. {
  891. int bad = 0;
  892. VM_BUG_ON_PAGE(PageTail(page), page);
  893. trace_mm_page_free(page, order);
  894. /*
  895. * Check tail pages before head page information is cleared to
  896. * avoid checking PageCompound for order-0 pages.
  897. */
  898. if (unlikely(order)) {
  899. bool compound = PageCompound(page);
  900. int i;
  901. VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
  902. if (compound)
  903. ClearPageDoubleMap(page);
  904. for (i = 1; i < (1 << order); i++) {
  905. if (compound)
  906. bad += free_tail_pages_check(page, page + i);
  907. if (unlikely(free_pages_check(page + i))) {
  908. bad++;
  909. continue;
  910. }
  911. (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  912. }
  913. }
  914. if (PageMappingFlags(page))
  915. page->mapping = NULL;
  916. if (memcg_kmem_enabled() && PageKmemcg(page))
  917. memcg_kmem_uncharge(page, order);
  918. if (check_free)
  919. bad += free_pages_check(page);
  920. if (bad)
  921. return false;
  922. page_cpupid_reset_last(page);
  923. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  924. reset_page_owner(page, order);
  925. if (!PageHighMem(page)) {
  926. debug_check_no_locks_freed(page_address(page),
  927. PAGE_SIZE << order);
  928. debug_check_no_obj_freed(page_address(page),
  929. PAGE_SIZE << order);
  930. }
  931. arch_free_page(page, order);
  932. kernel_poison_pages(page, 1 << order, 0);
  933. kernel_map_pages(page, 1 << order, 0);
  934. kasan_free_pages(page, order);
  935. return true;
  936. }
  937. #ifdef CONFIG_DEBUG_VM
  938. static inline bool free_pcp_prepare(struct page *page)
  939. {
  940. return free_pages_prepare(page, 0, true);
  941. }
  942. static inline bool bulkfree_pcp_prepare(struct page *page)
  943. {
  944. return false;
  945. }
  946. #else
  947. static bool free_pcp_prepare(struct page *page)
  948. {
  949. return free_pages_prepare(page, 0, false);
  950. }
  951. static bool bulkfree_pcp_prepare(struct page *page)
  952. {
  953. return free_pages_check(page);
  954. }
  955. #endif /* CONFIG_DEBUG_VM */
  956. /*
  957. * Frees a number of pages from the PCP lists
  958. * Assumes all pages on list are in same zone, and of same order.
  959. * count is the number of pages to free.
  960. *
  961. * If the zone was previously in an "all pages pinned" state then look to
  962. * see if this freeing clears that state.
  963. *
  964. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  965. * pinned" detection logic.
  966. */
  967. static void free_pcppages_bulk(struct zone *zone, int count,
  968. struct per_cpu_pages *pcp)
  969. {
  970. int migratetype = 0;
  971. int batch_free = 0;
  972. bool isolated_pageblocks;
  973. spin_lock(&zone->lock);
  974. isolated_pageblocks = has_isolate_pageblock(zone);
  975. while (count) {
  976. struct page *page;
  977. struct list_head *list;
  978. /*
  979. * Remove pages from lists in a round-robin fashion. A
  980. * batch_free count is maintained that is incremented when an
  981. * empty list is encountered. This is so more pages are freed
  982. * off fuller lists instead of spinning excessively around empty
  983. * lists
  984. */
  985. do {
  986. batch_free++;
  987. if (++migratetype == MIGRATE_PCPTYPES)
  988. migratetype = 0;
  989. list = &pcp->lists[migratetype];
  990. } while (list_empty(list));
  991. /* This is the only non-empty list. Free them all. */
  992. if (batch_free == MIGRATE_PCPTYPES)
  993. batch_free = count;
  994. do {
  995. int mt; /* migratetype of the to-be-freed page */
  996. page = list_last_entry(list, struct page, lru);
  997. /* must delete as __free_one_page list manipulates */
  998. list_del(&page->lru);
  999. mt = get_pcppage_migratetype(page);
  1000. /* MIGRATE_ISOLATE page should not go to pcplists */
  1001. VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
  1002. /* Pageblock could have been isolated meanwhile */
  1003. if (unlikely(isolated_pageblocks))
  1004. mt = get_pageblock_migratetype(page);
  1005. if (bulkfree_pcp_prepare(page))
  1006. continue;
  1007. __free_one_page(page, page_to_pfn(page), zone, 0, mt);
  1008. trace_mm_page_pcpu_drain(page, 0, mt);
  1009. } while (--count && --batch_free && !list_empty(list));
  1010. }
  1011. spin_unlock(&zone->lock);
  1012. }
  1013. static void free_one_page(struct zone *zone,
  1014. struct page *page, unsigned long pfn,
  1015. unsigned int order,
  1016. int migratetype)
  1017. {
  1018. spin_lock(&zone->lock);
  1019. if (unlikely(has_isolate_pageblock(zone) ||
  1020. is_migrate_isolate(migratetype))) {
  1021. migratetype = get_pfnblock_migratetype(page, pfn);
  1022. }
  1023. __free_one_page(page, pfn, zone, order, migratetype);
  1024. spin_unlock(&zone->lock);
  1025. }
  1026. static void __meminit __init_single_page(struct page *page, unsigned long pfn,
  1027. unsigned long zone, int nid)
  1028. {
  1029. mm_zero_struct_page(page);
  1030. set_page_links(page, zone, nid, pfn);
  1031. init_page_count(page);
  1032. page_mapcount_reset(page);
  1033. page_cpupid_reset_last(page);
  1034. INIT_LIST_HEAD(&page->lru);
  1035. #ifdef WANT_PAGE_VIRTUAL
  1036. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  1037. if (!is_highmem_idx(zone))
  1038. set_page_address(page, __va(pfn << PAGE_SHIFT));
  1039. #endif
  1040. }
  1041. static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
  1042. int nid)
  1043. {
  1044. return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
  1045. }
  1046. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1047. static void __meminit init_reserved_page(unsigned long pfn)
  1048. {
  1049. pg_data_t *pgdat;
  1050. int nid, zid;
  1051. if (!early_page_uninitialised(pfn))
  1052. return;
  1053. nid = early_pfn_to_nid(pfn);
  1054. pgdat = NODE_DATA(nid);
  1055. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1056. struct zone *zone = &pgdat->node_zones[zid];
  1057. if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
  1058. break;
  1059. }
  1060. __init_single_pfn(pfn, zid, nid);
  1061. }
  1062. #else
  1063. static inline void init_reserved_page(unsigned long pfn)
  1064. {
  1065. }
  1066. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  1067. /*
  1068. * Initialised pages do not have PageReserved set. This function is
  1069. * called for each range allocated by the bootmem allocator and
  1070. * marks the pages PageReserved. The remaining valid pages are later
  1071. * sent to the buddy page allocator.
  1072. */
  1073. void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
  1074. {
  1075. unsigned long start_pfn = PFN_DOWN(start);
  1076. unsigned long end_pfn = PFN_UP(end);
  1077. for (; start_pfn < end_pfn; start_pfn++) {
  1078. if (pfn_valid(start_pfn)) {
  1079. struct page *page = pfn_to_page(start_pfn);
  1080. init_reserved_page(start_pfn);
  1081. /* Avoid false-positive PageTail() */
  1082. INIT_LIST_HEAD(&page->lru);
  1083. SetPageReserved(page);
  1084. }
  1085. }
  1086. }
  1087. static void __free_pages_ok(struct page *page, unsigned int order)
  1088. {
  1089. unsigned long flags;
  1090. int migratetype;
  1091. unsigned long pfn = page_to_pfn(page);
  1092. if (!free_pages_prepare(page, order, true))
  1093. return;
  1094. migratetype = get_pfnblock_migratetype(page, pfn);
  1095. local_irq_save(flags);
  1096. __count_vm_events(PGFREE, 1 << order);
  1097. free_one_page(page_zone(page), page, pfn, order, migratetype);
  1098. local_irq_restore(flags);
  1099. }
  1100. static void __init __free_pages_boot_core(struct page *page, unsigned int order)
  1101. {
  1102. unsigned int nr_pages = 1 << order;
  1103. struct page *p = page;
  1104. unsigned int loop;
  1105. prefetchw(p);
  1106. for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
  1107. prefetchw(p + 1);
  1108. __ClearPageReserved(p);
  1109. set_page_count(p, 0);
  1110. }
  1111. __ClearPageReserved(p);
  1112. set_page_count(p, 0);
  1113. page_zone(page)->managed_pages += nr_pages;
  1114. set_page_refcounted(page);
  1115. __free_pages(page, order);
  1116. }
  1117. #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
  1118. defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  1119. static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
  1120. int __meminit early_pfn_to_nid(unsigned long pfn)
  1121. {
  1122. static DEFINE_SPINLOCK(early_pfn_lock);
  1123. int nid;
  1124. spin_lock(&early_pfn_lock);
  1125. nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
  1126. if (nid < 0)
  1127. nid = first_online_node;
  1128. spin_unlock(&early_pfn_lock);
  1129. return nid;
  1130. }
  1131. #endif
  1132. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  1133. static inline bool __meminit __maybe_unused
  1134. meminit_pfn_in_nid(unsigned long pfn, int node,
  1135. struct mminit_pfnnid_cache *state)
  1136. {
  1137. int nid;
  1138. nid = __early_pfn_to_nid(pfn, state);
  1139. if (nid >= 0 && nid != node)
  1140. return false;
  1141. return true;
  1142. }
  1143. /* Only safe to use early in boot when initialisation is single-threaded */
  1144. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  1145. {
  1146. return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
  1147. }
  1148. #else
  1149. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  1150. {
  1151. return true;
  1152. }
  1153. static inline bool __meminit __maybe_unused
  1154. meminit_pfn_in_nid(unsigned long pfn, int node,
  1155. struct mminit_pfnnid_cache *state)
  1156. {
  1157. return true;
  1158. }
  1159. #endif
  1160. void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
  1161. unsigned int order)
  1162. {
  1163. if (early_page_uninitialised(pfn))
  1164. return;
  1165. return __free_pages_boot_core(page, order);
  1166. }
  1167. /*
  1168. * Check that the whole (or subset of) a pageblock given by the interval of
  1169. * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
  1170. * with the migration of free compaction scanner. The scanners then need to
  1171. * use only pfn_valid_within() check for arches that allow holes within
  1172. * pageblocks.
  1173. *
  1174. * Return struct page pointer of start_pfn, or NULL if checks were not passed.
  1175. *
  1176. * It's possible on some configurations to have a setup like node0 node1 node0
  1177. * i.e. it's possible that all pages within a zones range of pages do not
  1178. * belong to a single zone. We assume that a border between node0 and node1
  1179. * can occur within a single pageblock, but not a node0 node1 node0
  1180. * interleaving within a single pageblock. It is therefore sufficient to check
  1181. * the first and last page of a pageblock and avoid checking each individual
  1182. * page in a pageblock.
  1183. */
  1184. struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
  1185. unsigned long end_pfn, struct zone *zone)
  1186. {
  1187. struct page *start_page;
  1188. struct page *end_page;
  1189. /* end_pfn is one past the range we are checking */
  1190. end_pfn--;
  1191. if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
  1192. return NULL;
  1193. start_page = pfn_to_online_page(start_pfn);
  1194. if (!start_page)
  1195. return NULL;
  1196. if (page_zone(start_page) != zone)
  1197. return NULL;
  1198. end_page = pfn_to_page(end_pfn);
  1199. /* This gives a shorter code than deriving page_zone(end_page) */
  1200. if (page_zone_id(start_page) != page_zone_id(end_page))
  1201. return NULL;
  1202. return start_page;
  1203. }
  1204. void set_zone_contiguous(struct zone *zone)
  1205. {
  1206. unsigned long block_start_pfn = zone->zone_start_pfn;
  1207. unsigned long block_end_pfn;
  1208. block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
  1209. for (; block_start_pfn < zone_end_pfn(zone);
  1210. block_start_pfn = block_end_pfn,
  1211. block_end_pfn += pageblock_nr_pages) {
  1212. block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
  1213. if (!__pageblock_pfn_to_page(block_start_pfn,
  1214. block_end_pfn, zone))
  1215. return;
  1216. }
  1217. /* We confirm that there is no hole */
  1218. zone->contiguous = true;
  1219. }
  1220. void clear_zone_contiguous(struct zone *zone)
  1221. {
  1222. zone->contiguous = false;
  1223. }
  1224. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1225. static void __init deferred_free_range(unsigned long pfn,
  1226. unsigned long nr_pages)
  1227. {
  1228. struct page *page;
  1229. unsigned long i;
  1230. if (!nr_pages)
  1231. return;
  1232. page = pfn_to_page(pfn);
  1233. /* Free a large naturally-aligned chunk if possible */
  1234. if (nr_pages == pageblock_nr_pages &&
  1235. (pfn & (pageblock_nr_pages - 1)) == 0) {
  1236. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1237. __free_pages_boot_core(page, pageblock_order);
  1238. return;
  1239. }
  1240. for (i = 0; i < nr_pages; i++, page++, pfn++) {
  1241. if ((pfn & (pageblock_nr_pages - 1)) == 0)
  1242. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1243. __free_pages_boot_core(page, 0);
  1244. }
  1245. }
  1246. /* Completion tracking for deferred_init_memmap() threads */
  1247. static atomic_t pgdat_init_n_undone __initdata;
  1248. static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
  1249. static inline void __init pgdat_init_report_one_done(void)
  1250. {
  1251. if (atomic_dec_and_test(&pgdat_init_n_undone))
  1252. complete(&pgdat_init_all_done_comp);
  1253. }
  1254. /*
  1255. * Helper for deferred_init_range, free the given range, reset the counters, and
  1256. * return number of pages freed.
  1257. */
  1258. static inline unsigned long __init __def_free(unsigned long *nr_free,
  1259. unsigned long *free_base_pfn,
  1260. struct page **page)
  1261. {
  1262. unsigned long nr = *nr_free;
  1263. deferred_free_range(*free_base_pfn, nr);
  1264. *free_base_pfn = 0;
  1265. *nr_free = 0;
  1266. *page = NULL;
  1267. return nr;
  1268. }
  1269. static unsigned long __init deferred_init_range(int nid, int zid,
  1270. unsigned long start_pfn,
  1271. unsigned long end_pfn)
  1272. {
  1273. struct mminit_pfnnid_cache nid_init_state = { };
  1274. unsigned long nr_pgmask = pageblock_nr_pages - 1;
  1275. unsigned long free_base_pfn = 0;
  1276. unsigned long nr_pages = 0;
  1277. unsigned long nr_free = 0;
  1278. struct page *page = NULL;
  1279. unsigned long pfn;
  1280. /*
  1281. * First we check if pfn is valid on architectures where it is possible
  1282. * to have holes within pageblock_nr_pages. On systems where it is not
  1283. * possible, this function is optimized out.
  1284. *
  1285. * Then, we check if a current large page is valid by only checking the
  1286. * validity of the head pfn.
  1287. *
  1288. * meminit_pfn_in_nid is checked on systems where pfns can interleave
  1289. * within a node: a pfn is between start and end of a node, but does not
  1290. * belong to this memory node.
  1291. *
  1292. * Finally, we minimize pfn page lookups and scheduler checks by
  1293. * performing it only once every pageblock_nr_pages.
  1294. *
  1295. * We do it in two loops: first we initialize struct page, than free to
  1296. * buddy allocator, becuse while we are freeing pages we can access
  1297. * pages that are ahead (computing buddy page in __free_one_page()).
  1298. */
  1299. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  1300. if (!pfn_valid_within(pfn))
  1301. continue;
  1302. if ((pfn & nr_pgmask) || pfn_valid(pfn)) {
  1303. if (meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
  1304. if (page && (pfn & nr_pgmask))
  1305. page++;
  1306. else
  1307. page = pfn_to_page(pfn);
  1308. __init_single_page(page, pfn, zid, nid);
  1309. cond_resched();
  1310. }
  1311. }
  1312. }
  1313. page = NULL;
  1314. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  1315. if (!pfn_valid_within(pfn)) {
  1316. nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
  1317. } else if (!(pfn & nr_pgmask) && !pfn_valid(pfn)) {
  1318. nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
  1319. } else if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
  1320. nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
  1321. } else if (page && (pfn & nr_pgmask)) {
  1322. page++;
  1323. nr_free++;
  1324. } else {
  1325. nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
  1326. page = pfn_to_page(pfn);
  1327. free_base_pfn = pfn;
  1328. nr_free = 1;
  1329. cond_resched();
  1330. }
  1331. }
  1332. /* Free the last block of pages to allocator */
  1333. nr_pages += __def_free(&nr_free, &free_base_pfn, &page);
  1334. return nr_pages;
  1335. }
  1336. /* Initialise remaining memory on a node */
  1337. static int __init deferred_init_memmap(void *data)
  1338. {
  1339. pg_data_t *pgdat = data;
  1340. int nid = pgdat->node_id;
  1341. unsigned long start = jiffies;
  1342. unsigned long nr_pages = 0;
  1343. unsigned long spfn, epfn;
  1344. phys_addr_t spa, epa;
  1345. int zid;
  1346. struct zone *zone;
  1347. unsigned long first_init_pfn = pgdat->first_deferred_pfn;
  1348. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1349. u64 i;
  1350. if (first_init_pfn == ULONG_MAX) {
  1351. pgdat_init_report_one_done();
  1352. return 0;
  1353. }
  1354. /* Bind memory initialisation thread to a local node if possible */
  1355. if (!cpumask_empty(cpumask))
  1356. set_cpus_allowed_ptr(current, cpumask);
  1357. /* Sanity check boundaries */
  1358. BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
  1359. BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
  1360. pgdat->first_deferred_pfn = ULONG_MAX;
  1361. /* Only the highest zone is deferred so find it */
  1362. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1363. zone = pgdat->node_zones + zid;
  1364. if (first_init_pfn < zone_end_pfn(zone))
  1365. break;
  1366. }
  1367. first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
  1368. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
  1369. spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
  1370. epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
  1371. nr_pages += deferred_init_range(nid, zid, spfn, epfn);
  1372. }
  1373. /* Sanity check that the next zone really is unpopulated */
  1374. WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
  1375. pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
  1376. jiffies_to_msecs(jiffies - start));
  1377. pgdat_init_report_one_done();
  1378. return 0;
  1379. }
  1380. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  1381. void __init page_alloc_init_late(void)
  1382. {
  1383. struct zone *zone;
  1384. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1385. int nid;
  1386. /* There will be num_node_state(N_MEMORY) threads */
  1387. atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
  1388. for_each_node_state(nid, N_MEMORY) {
  1389. kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
  1390. }
  1391. /* Block until all are initialised */
  1392. wait_for_completion(&pgdat_init_all_done_comp);
  1393. /* Reinit limits that are based on free pages after the kernel is up */
  1394. files_maxfiles_init();
  1395. #endif
  1396. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  1397. /* Discard memblock private memory */
  1398. memblock_discard();
  1399. #endif
  1400. for_each_populated_zone(zone)
  1401. set_zone_contiguous(zone);
  1402. }
  1403. #ifdef CONFIG_CMA
  1404. /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
  1405. void __init init_cma_reserved_pageblock(struct page *page)
  1406. {
  1407. unsigned i = pageblock_nr_pages;
  1408. struct page *p = page;
  1409. do {
  1410. __ClearPageReserved(p);
  1411. set_page_count(p, 0);
  1412. } while (++p, --i);
  1413. set_pageblock_migratetype(page, MIGRATE_CMA);
  1414. if (pageblock_order >= MAX_ORDER) {
  1415. i = pageblock_nr_pages;
  1416. p = page;
  1417. do {
  1418. set_page_refcounted(p);
  1419. __free_pages(p, MAX_ORDER - 1);
  1420. p += MAX_ORDER_NR_PAGES;
  1421. } while (i -= MAX_ORDER_NR_PAGES);
  1422. } else {
  1423. set_page_refcounted(page);
  1424. __free_pages(page, pageblock_order);
  1425. }
  1426. adjust_managed_page_count(page, pageblock_nr_pages);
  1427. }
  1428. #endif
  1429. /*
  1430. * The order of subdivision here is critical for the IO subsystem.
  1431. * Please do not alter this order without good reasons and regression
  1432. * testing. Specifically, as large blocks of memory are subdivided,
  1433. * the order in which smaller blocks are delivered depends on the order
  1434. * they're subdivided in this function. This is the primary factor
  1435. * influencing the order in which pages are delivered to the IO
  1436. * subsystem according to empirical testing, and this is also justified
  1437. * by considering the behavior of a buddy system containing a single
  1438. * large block of memory acted on by a series of small allocations.
  1439. * This behavior is a critical factor in sglist merging's success.
  1440. *
  1441. * -- nyc
  1442. */
  1443. static inline void expand(struct zone *zone, struct page *page,
  1444. int low, int high, struct free_area *area,
  1445. int migratetype)
  1446. {
  1447. unsigned long size = 1 << high;
  1448. while (high > low) {
  1449. area--;
  1450. high--;
  1451. size >>= 1;
  1452. VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  1453. /*
  1454. * Mark as guard pages (or page), that will allow to
  1455. * merge back to allocator when buddy will be freed.
  1456. * Corresponding page table entries will not be touched,
  1457. * pages will stay not present in virtual address space
  1458. */
  1459. if (set_page_guard(zone, &page[size], high, migratetype))
  1460. continue;
  1461. list_add(&page[size].lru, &area->free_list[migratetype]);
  1462. area->nr_free++;
  1463. set_page_order(&page[size], high);
  1464. }
  1465. }
  1466. static void check_new_page_bad(struct page *page)
  1467. {
  1468. const char *bad_reason = NULL;
  1469. unsigned long bad_flags = 0;
  1470. if (unlikely(atomic_read(&page->_mapcount) != -1))
  1471. bad_reason = "nonzero mapcount";
  1472. if (unlikely(page->mapping != NULL))
  1473. bad_reason = "non-NULL mapping";
  1474. if (unlikely(page_ref_count(page) != 0))
  1475. bad_reason = "nonzero _count";
  1476. if (unlikely(page->flags & __PG_HWPOISON)) {
  1477. bad_reason = "HWPoisoned (hardware-corrupted)";
  1478. bad_flags = __PG_HWPOISON;
  1479. /* Don't complain about hwpoisoned pages */
  1480. page_mapcount_reset(page); /* remove PageBuddy */
  1481. return;
  1482. }
  1483. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
  1484. bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
  1485. bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
  1486. }
  1487. #ifdef CONFIG_MEMCG
  1488. if (unlikely(page->mem_cgroup))
  1489. bad_reason = "page still charged to cgroup";
  1490. #endif
  1491. bad_page(page, bad_reason, bad_flags);
  1492. }
  1493. /*
  1494. * This page is about to be returned from the page allocator
  1495. */
  1496. static inline int check_new_page(struct page *page)
  1497. {
  1498. if (likely(page_expected_state(page,
  1499. PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
  1500. return 0;
  1501. check_new_page_bad(page);
  1502. return 1;
  1503. }
  1504. static inline bool free_pages_prezeroed(void)
  1505. {
  1506. return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
  1507. page_poisoning_enabled();
  1508. }
  1509. #ifdef CONFIG_DEBUG_VM
  1510. static bool check_pcp_refill(struct page *page)
  1511. {
  1512. return false;
  1513. }
  1514. static bool check_new_pcp(struct page *page)
  1515. {
  1516. return check_new_page(page);
  1517. }
  1518. #else
  1519. static bool check_pcp_refill(struct page *page)
  1520. {
  1521. return check_new_page(page);
  1522. }
  1523. static bool check_new_pcp(struct page *page)
  1524. {
  1525. return false;
  1526. }
  1527. #endif /* CONFIG_DEBUG_VM */
  1528. static bool check_new_pages(struct page *page, unsigned int order)
  1529. {
  1530. int i;
  1531. for (i = 0; i < (1 << order); i++) {
  1532. struct page *p = page + i;
  1533. if (unlikely(check_new_page(p)))
  1534. return true;
  1535. }
  1536. return false;
  1537. }
  1538. inline void post_alloc_hook(struct page *page, unsigned int order,
  1539. gfp_t gfp_flags)
  1540. {
  1541. set_page_private(page, 0);
  1542. set_page_refcounted(page);
  1543. arch_alloc_page(page, order);
  1544. kernel_map_pages(page, 1 << order, 1);
  1545. kernel_poison_pages(page, 1 << order, 1);
  1546. kasan_alloc_pages(page, order);
  1547. set_page_owner(page, order, gfp_flags);
  1548. }
  1549. static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
  1550. unsigned int alloc_flags)
  1551. {
  1552. int i;
  1553. post_alloc_hook(page, order, gfp_flags);
  1554. if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
  1555. for (i = 0; i < (1 << order); i++)
  1556. clear_highpage(page + i);
  1557. if (order && (gfp_flags & __GFP_COMP))
  1558. prep_compound_page(page, order);
  1559. /*
  1560. * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
  1561. * allocate the page. The expectation is that the caller is taking
  1562. * steps that will free more memory. The caller should avoid the page
  1563. * being used for !PFMEMALLOC purposes.
  1564. */
  1565. if (alloc_flags & ALLOC_NO_WATERMARKS)
  1566. set_page_pfmemalloc(page);
  1567. else
  1568. clear_page_pfmemalloc(page);
  1569. }
  1570. /*
  1571. * Go through the free lists for the given migratetype and remove
  1572. * the smallest available page from the freelists
  1573. */
  1574. static __always_inline
  1575. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  1576. int migratetype)
  1577. {
  1578. unsigned int current_order;
  1579. struct free_area *area;
  1580. struct page *page;
  1581. /* Find a page of the appropriate size in the preferred list */
  1582. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  1583. area = &(zone->free_area[current_order]);
  1584. page = list_first_entry_or_null(&area->free_list[migratetype],
  1585. struct page, lru);
  1586. if (!page)
  1587. continue;
  1588. list_del(&page->lru);
  1589. rmv_page_order(page);
  1590. area->nr_free--;
  1591. expand(zone, page, order, current_order, area, migratetype);
  1592. set_pcppage_migratetype(page, migratetype);
  1593. return page;
  1594. }
  1595. return NULL;
  1596. }
  1597. /*
  1598. * This array describes the order lists are fallen back to when
  1599. * the free lists for the desirable migrate type are depleted
  1600. */
  1601. static int fallbacks[MIGRATE_TYPES][4] = {
  1602. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1603. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1604. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
  1605. #ifdef CONFIG_CMA
  1606. [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
  1607. #endif
  1608. #ifdef CONFIG_MEMORY_ISOLATION
  1609. [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
  1610. #endif
  1611. };
  1612. #ifdef CONFIG_CMA
  1613. static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  1614. unsigned int order)
  1615. {
  1616. return __rmqueue_smallest(zone, order, MIGRATE_CMA);
  1617. }
  1618. #else
  1619. static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  1620. unsigned int order) { return NULL; }
  1621. #endif
  1622. /*
  1623. * Move the free pages in a range to the free lists of the requested type.
  1624. * Note that start_page and end_pages are not aligned on a pageblock
  1625. * boundary. If alignment is required, use move_freepages_block()
  1626. */
  1627. static int move_freepages(struct zone *zone,
  1628. struct page *start_page, struct page *end_page,
  1629. int migratetype, int *num_movable)
  1630. {
  1631. struct page *page;
  1632. unsigned int order;
  1633. int pages_moved = 0;
  1634. #ifndef CONFIG_HOLES_IN_ZONE
  1635. /*
  1636. * page_zone is not safe to call in this context when
  1637. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  1638. * anyway as we check zone boundaries in move_freepages_block().
  1639. * Remove at a later date when no bug reports exist related to
  1640. * grouping pages by mobility
  1641. */
  1642. VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
  1643. #endif
  1644. if (num_movable)
  1645. *num_movable = 0;
  1646. for (page = start_page; page <= end_page;) {
  1647. if (!pfn_valid_within(page_to_pfn(page))) {
  1648. page++;
  1649. continue;
  1650. }
  1651. /* Make sure we are not inadvertently changing nodes */
  1652. VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
  1653. if (!PageBuddy(page)) {
  1654. /*
  1655. * We assume that pages that could be isolated for
  1656. * migration are movable. But we don't actually try
  1657. * isolating, as that would be expensive.
  1658. */
  1659. if (num_movable &&
  1660. (PageLRU(page) || __PageMovable(page)))
  1661. (*num_movable)++;
  1662. page++;
  1663. continue;
  1664. }
  1665. order = page_order(page);
  1666. list_move(&page->lru,
  1667. &zone->free_area[order].free_list[migratetype]);
  1668. page += 1 << order;
  1669. pages_moved += 1 << order;
  1670. }
  1671. return pages_moved;
  1672. }
  1673. int move_freepages_block(struct zone *zone, struct page *page,
  1674. int migratetype, int *num_movable)
  1675. {
  1676. unsigned long start_pfn, end_pfn;
  1677. struct page *start_page, *end_page;
  1678. start_pfn = page_to_pfn(page);
  1679. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  1680. start_page = pfn_to_page(start_pfn);
  1681. end_page = start_page + pageblock_nr_pages - 1;
  1682. end_pfn = start_pfn + pageblock_nr_pages - 1;
  1683. /* Do not cross zone boundaries */
  1684. if (!zone_spans_pfn(zone, start_pfn))
  1685. start_page = page;
  1686. if (!zone_spans_pfn(zone, end_pfn))
  1687. return 0;
  1688. return move_freepages(zone, start_page, end_page, migratetype,
  1689. num_movable);
  1690. }
  1691. static void change_pageblock_range(struct page *pageblock_page,
  1692. int start_order, int migratetype)
  1693. {
  1694. int nr_pageblocks = 1 << (start_order - pageblock_order);
  1695. while (nr_pageblocks--) {
  1696. set_pageblock_migratetype(pageblock_page, migratetype);
  1697. pageblock_page += pageblock_nr_pages;
  1698. }
  1699. }
  1700. /*
  1701. * When we are falling back to another migratetype during allocation, try to
  1702. * steal extra free pages from the same pageblocks to satisfy further
  1703. * allocations, instead of polluting multiple pageblocks.
  1704. *
  1705. * If we are stealing a relatively large buddy page, it is likely there will
  1706. * be more free pages in the pageblock, so try to steal them all. For
  1707. * reclaimable and unmovable allocations, we steal regardless of page size,
  1708. * as fragmentation caused by those allocations polluting movable pageblocks
  1709. * is worse than movable allocations stealing from unmovable and reclaimable
  1710. * pageblocks.
  1711. */
  1712. static bool can_steal_fallback(unsigned int order, int start_mt)
  1713. {
  1714. /*
  1715. * Leaving this order check is intended, although there is
  1716. * relaxed order check in next check. The reason is that
  1717. * we can actually steal whole pageblock if this condition met,
  1718. * but, below check doesn't guarantee it and that is just heuristic
  1719. * so could be changed anytime.
  1720. */
  1721. if (order >= pageblock_order)
  1722. return true;
  1723. if (order >= pageblock_order / 2 ||
  1724. start_mt == MIGRATE_RECLAIMABLE ||
  1725. start_mt == MIGRATE_UNMOVABLE ||
  1726. page_group_by_mobility_disabled)
  1727. return true;
  1728. return false;
  1729. }
  1730. /*
  1731. * This function implements actual steal behaviour. If order is large enough,
  1732. * we can steal whole pageblock. If not, we first move freepages in this
  1733. * pageblock to our migratetype and determine how many already-allocated pages
  1734. * are there in the pageblock with a compatible migratetype. If at least half
  1735. * of pages are free or compatible, we can change migratetype of the pageblock
  1736. * itself, so pages freed in the future will be put on the correct free list.
  1737. */
  1738. static void steal_suitable_fallback(struct zone *zone, struct page *page,
  1739. int start_type, bool whole_block)
  1740. {
  1741. unsigned int current_order = page_order(page);
  1742. struct free_area *area;
  1743. int free_pages, movable_pages, alike_pages;
  1744. int old_block_type;
  1745. old_block_type = get_pageblock_migratetype(page);
  1746. /*
  1747. * This can happen due to races and we want to prevent broken
  1748. * highatomic accounting.
  1749. */
  1750. if (is_migrate_highatomic(old_block_type))
  1751. goto single_page;
  1752. /* Take ownership for orders >= pageblock_order */
  1753. if (current_order >= pageblock_order) {
  1754. change_pageblock_range(page, current_order, start_type);
  1755. goto single_page;
  1756. }
  1757. /* We are not allowed to try stealing from the whole block */
  1758. if (!whole_block)
  1759. goto single_page;
  1760. free_pages = move_freepages_block(zone, page, start_type,
  1761. &movable_pages);
  1762. /*
  1763. * Determine how many pages are compatible with our allocation.
  1764. * For movable allocation, it's the number of movable pages which
  1765. * we just obtained. For other types it's a bit more tricky.
  1766. */
  1767. if (start_type == MIGRATE_MOVABLE) {
  1768. alike_pages = movable_pages;
  1769. } else {
  1770. /*
  1771. * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
  1772. * to MOVABLE pageblock, consider all non-movable pages as
  1773. * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
  1774. * vice versa, be conservative since we can't distinguish the
  1775. * exact migratetype of non-movable pages.
  1776. */
  1777. if (old_block_type == MIGRATE_MOVABLE)
  1778. alike_pages = pageblock_nr_pages
  1779. - (free_pages + movable_pages);
  1780. else
  1781. alike_pages = 0;
  1782. }
  1783. /* moving whole block can fail due to zone boundary conditions */
  1784. if (!free_pages)
  1785. goto single_page;
  1786. /*
  1787. * If a sufficient number of pages in the block are either free or of
  1788. * comparable migratability as our allocation, claim the whole block.
  1789. */
  1790. if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
  1791. page_group_by_mobility_disabled)
  1792. set_pageblock_migratetype(page, start_type);
  1793. return;
  1794. single_page:
  1795. area = &zone->free_area[current_order];
  1796. list_move(&page->lru, &area->free_list[start_type]);
  1797. }
  1798. /*
  1799. * Check whether there is a suitable fallback freepage with requested order.
  1800. * If only_stealable is true, this function returns fallback_mt only if
  1801. * we can steal other freepages all together. This would help to reduce
  1802. * fragmentation due to mixed migratetype pages in one pageblock.
  1803. */
  1804. int find_suitable_fallback(struct free_area *area, unsigned int order,
  1805. int migratetype, bool only_stealable, bool *can_steal)
  1806. {
  1807. int i;
  1808. int fallback_mt;
  1809. if (area->nr_free == 0)
  1810. return -1;
  1811. *can_steal = false;
  1812. for (i = 0;; i++) {
  1813. fallback_mt = fallbacks[migratetype][i];
  1814. if (fallback_mt == MIGRATE_TYPES)
  1815. break;
  1816. if (list_empty(&area->free_list[fallback_mt]))
  1817. continue;
  1818. if (can_steal_fallback(order, migratetype))
  1819. *can_steal = true;
  1820. if (!only_stealable)
  1821. return fallback_mt;
  1822. if (*can_steal)
  1823. return fallback_mt;
  1824. }
  1825. return -1;
  1826. }
  1827. /*
  1828. * Reserve a pageblock for exclusive use of high-order atomic allocations if
  1829. * there are no empty page blocks that contain a page with a suitable order
  1830. */
  1831. static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
  1832. unsigned int alloc_order)
  1833. {
  1834. int mt;
  1835. unsigned long max_managed, flags;
  1836. /*
  1837. * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
  1838. * Check is race-prone but harmless.
  1839. */
  1840. max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
  1841. if (zone->nr_reserved_highatomic >= max_managed)
  1842. return;
  1843. spin_lock_irqsave(&zone->lock, flags);
  1844. /* Recheck the nr_reserved_highatomic limit under the lock */
  1845. if (zone->nr_reserved_highatomic >= max_managed)
  1846. goto out_unlock;
  1847. /* Yoink! */
  1848. mt = get_pageblock_migratetype(page);
  1849. if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
  1850. && !is_migrate_cma(mt)) {
  1851. zone->nr_reserved_highatomic += pageblock_nr_pages;
  1852. set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
  1853. move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
  1854. }
  1855. out_unlock:
  1856. spin_unlock_irqrestore(&zone->lock, flags);
  1857. }
  1858. /*
  1859. * Used when an allocation is about to fail under memory pressure. This
  1860. * potentially hurts the reliability of high-order allocations when under
  1861. * intense memory pressure but failed atomic allocations should be easier
  1862. * to recover from than an OOM.
  1863. *
  1864. * If @force is true, try to unreserve a pageblock even though highatomic
  1865. * pageblock is exhausted.
  1866. */
  1867. static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
  1868. bool force)
  1869. {
  1870. struct zonelist *zonelist = ac->zonelist;
  1871. unsigned long flags;
  1872. struct zoneref *z;
  1873. struct zone *zone;
  1874. struct page *page;
  1875. int order;
  1876. bool ret;
  1877. for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
  1878. ac->nodemask) {
  1879. /*
  1880. * Preserve at least one pageblock unless memory pressure
  1881. * is really high.
  1882. */
  1883. if (!force && zone->nr_reserved_highatomic <=
  1884. pageblock_nr_pages)
  1885. continue;
  1886. spin_lock_irqsave(&zone->lock, flags);
  1887. for (order = 0; order < MAX_ORDER; order++) {
  1888. struct free_area *area = &(zone->free_area[order]);
  1889. page = list_first_entry_or_null(
  1890. &area->free_list[MIGRATE_HIGHATOMIC],
  1891. struct page, lru);
  1892. if (!page)
  1893. continue;
  1894. /*
  1895. * In page freeing path, migratetype change is racy so
  1896. * we can counter several free pages in a pageblock
  1897. * in this loop althoug we changed the pageblock type
  1898. * from highatomic to ac->migratetype. So we should
  1899. * adjust the count once.
  1900. */
  1901. if (is_migrate_highatomic_page(page)) {
  1902. /*
  1903. * It should never happen but changes to
  1904. * locking could inadvertently allow a per-cpu
  1905. * drain to add pages to MIGRATE_HIGHATOMIC
  1906. * while unreserving so be safe and watch for
  1907. * underflows.
  1908. */
  1909. zone->nr_reserved_highatomic -= min(
  1910. pageblock_nr_pages,
  1911. zone->nr_reserved_highatomic);
  1912. }
  1913. /*
  1914. * Convert to ac->migratetype and avoid the normal
  1915. * pageblock stealing heuristics. Minimally, the caller
  1916. * is doing the work and needs the pages. More
  1917. * importantly, if the block was always converted to
  1918. * MIGRATE_UNMOVABLE or another type then the number
  1919. * of pageblocks that cannot be completely freed
  1920. * may increase.
  1921. */
  1922. set_pageblock_migratetype(page, ac->migratetype);
  1923. ret = move_freepages_block(zone, page, ac->migratetype,
  1924. NULL);
  1925. if (ret) {
  1926. spin_unlock_irqrestore(&zone->lock, flags);
  1927. return ret;
  1928. }
  1929. }
  1930. spin_unlock_irqrestore(&zone->lock, flags);
  1931. }
  1932. return false;
  1933. }
  1934. /*
  1935. * Try finding a free buddy page on the fallback list and put it on the free
  1936. * list of requested migratetype, possibly along with other pages from the same
  1937. * block, depending on fragmentation avoidance heuristics. Returns true if
  1938. * fallback was found so that __rmqueue_smallest() can grab it.
  1939. *
  1940. * The use of signed ints for order and current_order is a deliberate
  1941. * deviation from the rest of this file, to make the for loop
  1942. * condition simpler.
  1943. */
  1944. static __always_inline bool
  1945. __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
  1946. {
  1947. struct free_area *area;
  1948. int current_order;
  1949. struct page *page;
  1950. int fallback_mt;
  1951. bool can_steal;
  1952. /*
  1953. * Find the largest available free page in the other list. This roughly
  1954. * approximates finding the pageblock with the most free pages, which
  1955. * would be too costly to do exactly.
  1956. */
  1957. for (current_order = MAX_ORDER - 1; current_order >= order;
  1958. --current_order) {
  1959. area = &(zone->free_area[current_order]);
  1960. fallback_mt = find_suitable_fallback(area, current_order,
  1961. start_migratetype, false, &can_steal);
  1962. if (fallback_mt == -1)
  1963. continue;
  1964. /*
  1965. * We cannot steal all free pages from the pageblock and the
  1966. * requested migratetype is movable. In that case it's better to
  1967. * steal and split the smallest available page instead of the
  1968. * largest available page, because even if the next movable
  1969. * allocation falls back into a different pageblock than this
  1970. * one, it won't cause permanent fragmentation.
  1971. */
  1972. if (!can_steal && start_migratetype == MIGRATE_MOVABLE
  1973. && current_order > order)
  1974. goto find_smallest;
  1975. goto do_steal;
  1976. }
  1977. return false;
  1978. find_smallest:
  1979. for (current_order = order; current_order < MAX_ORDER;
  1980. current_order++) {
  1981. area = &(zone->free_area[current_order]);
  1982. fallback_mt = find_suitable_fallback(area, current_order,
  1983. start_migratetype, false, &can_steal);
  1984. if (fallback_mt != -1)
  1985. break;
  1986. }
  1987. /*
  1988. * This should not happen - we already found a suitable fallback
  1989. * when looking for the largest page.
  1990. */
  1991. VM_BUG_ON(current_order == MAX_ORDER);
  1992. do_steal:
  1993. page = list_first_entry(&area->free_list[fallback_mt],
  1994. struct page, lru);
  1995. steal_suitable_fallback(zone, page, start_migratetype, can_steal);
  1996. trace_mm_page_alloc_extfrag(page, order, current_order,
  1997. start_migratetype, fallback_mt);
  1998. return true;
  1999. }
  2000. /*
  2001. * Do the hard work of removing an element from the buddy allocator.
  2002. * Call me with the zone->lock already held.
  2003. */
  2004. static __always_inline struct page *
  2005. __rmqueue(struct zone *zone, unsigned int order, int migratetype)
  2006. {
  2007. struct page *page;
  2008. retry:
  2009. page = __rmqueue_smallest(zone, order, migratetype);
  2010. if (unlikely(!page)) {
  2011. if (migratetype == MIGRATE_MOVABLE)
  2012. page = __rmqueue_cma_fallback(zone, order);
  2013. if (!page && __rmqueue_fallback(zone, order, migratetype))
  2014. goto retry;
  2015. }
  2016. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  2017. return page;
  2018. }
  2019. /*
  2020. * Obtain a specified number of elements from the buddy allocator, all under
  2021. * a single hold of the lock, for efficiency. Add them to the supplied list.
  2022. * Returns the number of new pages which were placed at *list.
  2023. */
  2024. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  2025. unsigned long count, struct list_head *list,
  2026. int migratetype)
  2027. {
  2028. int i, alloced = 0;
  2029. spin_lock(&zone->lock);
  2030. for (i = 0; i < count; ++i) {
  2031. struct page *page = __rmqueue(zone, order, migratetype);
  2032. if (unlikely(page == NULL))
  2033. break;
  2034. if (unlikely(check_pcp_refill(page)))
  2035. continue;
  2036. /*
  2037. * Split buddy pages returned by expand() are received here in
  2038. * physical page order. The page is added to the tail of
  2039. * caller's list. From the callers perspective, the linked list
  2040. * is ordered by page number under some conditions. This is
  2041. * useful for IO devices that can forward direction from the
  2042. * head, thus also in the physical page order. This is useful
  2043. * for IO devices that can merge IO requests if the physical
  2044. * pages are ordered properly.
  2045. */
  2046. list_add_tail(&page->lru, list);
  2047. alloced++;
  2048. if (is_migrate_cma(get_pcppage_migratetype(page)))
  2049. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
  2050. -(1 << order));
  2051. }
  2052. /*
  2053. * i pages were removed from the buddy list even if some leak due
  2054. * to check_pcp_refill failing so adjust NR_FREE_PAGES based
  2055. * on i. Do not confuse with 'alloced' which is the number of
  2056. * pages added to the pcp list.
  2057. */
  2058. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  2059. spin_unlock(&zone->lock);
  2060. return alloced;
  2061. }
  2062. #ifdef CONFIG_NUMA
  2063. /*
  2064. * Called from the vmstat counter updater to drain pagesets of this
  2065. * currently executing processor on remote nodes after they have
  2066. * expired.
  2067. *
  2068. * Note that this function must be called with the thread pinned to
  2069. * a single processor.
  2070. */
  2071. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  2072. {
  2073. unsigned long flags;
  2074. int to_drain, batch;
  2075. local_irq_save(flags);
  2076. batch = READ_ONCE(pcp->batch);
  2077. to_drain = min(pcp->count, batch);
  2078. if (to_drain > 0) {
  2079. free_pcppages_bulk(zone, to_drain, pcp);
  2080. pcp->count -= to_drain;
  2081. }
  2082. local_irq_restore(flags);
  2083. }
  2084. #endif
  2085. /*
  2086. * Drain pcplists of the indicated processor and zone.
  2087. *
  2088. * The processor must either be the current processor and the
  2089. * thread pinned to the current processor or a processor that
  2090. * is not online.
  2091. */
  2092. static void drain_pages_zone(unsigned int cpu, struct zone *zone)
  2093. {
  2094. unsigned long flags;
  2095. struct per_cpu_pageset *pset;
  2096. struct per_cpu_pages *pcp;
  2097. local_irq_save(flags);
  2098. pset = per_cpu_ptr(zone->pageset, cpu);
  2099. pcp = &pset->pcp;
  2100. if (pcp->count) {
  2101. free_pcppages_bulk(zone, pcp->count, pcp);
  2102. pcp->count = 0;
  2103. }
  2104. local_irq_restore(flags);
  2105. }
  2106. /*
  2107. * Drain pcplists of all zones on the indicated processor.
  2108. *
  2109. * The processor must either be the current processor and the
  2110. * thread pinned to the current processor or a processor that
  2111. * is not online.
  2112. */
  2113. static void drain_pages(unsigned int cpu)
  2114. {
  2115. struct zone *zone;
  2116. for_each_populated_zone(zone) {
  2117. drain_pages_zone(cpu, zone);
  2118. }
  2119. }
  2120. /*
  2121. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  2122. *
  2123. * The CPU has to be pinned. When zone parameter is non-NULL, spill just
  2124. * the single zone's pages.
  2125. */
  2126. void drain_local_pages(struct zone *zone)
  2127. {
  2128. int cpu = smp_processor_id();
  2129. if (zone)
  2130. drain_pages_zone(cpu, zone);
  2131. else
  2132. drain_pages(cpu);
  2133. }
  2134. static void drain_local_pages_wq(struct work_struct *work)
  2135. {
  2136. /*
  2137. * drain_all_pages doesn't use proper cpu hotplug protection so
  2138. * we can race with cpu offline when the WQ can move this from
  2139. * a cpu pinned worker to an unbound one. We can operate on a different
  2140. * cpu which is allright but we also have to make sure to not move to
  2141. * a different one.
  2142. */
  2143. preempt_disable();
  2144. drain_local_pages(NULL);
  2145. preempt_enable();
  2146. }
  2147. /*
  2148. * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  2149. *
  2150. * When zone parameter is non-NULL, spill just the single zone's pages.
  2151. *
  2152. * Note that this can be extremely slow as the draining happens in a workqueue.
  2153. */
  2154. void drain_all_pages(struct zone *zone)
  2155. {
  2156. int cpu;
  2157. /*
  2158. * Allocate in the BSS so we wont require allocation in
  2159. * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
  2160. */
  2161. static cpumask_t cpus_with_pcps;
  2162. /*
  2163. * Make sure nobody triggers this path before mm_percpu_wq is fully
  2164. * initialized.
  2165. */
  2166. if (WARN_ON_ONCE(!mm_percpu_wq))
  2167. return;
  2168. /*
  2169. * Do not drain if one is already in progress unless it's specific to
  2170. * a zone. Such callers are primarily CMA and memory hotplug and need
  2171. * the drain to be complete when the call returns.
  2172. */
  2173. if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
  2174. if (!zone)
  2175. return;
  2176. mutex_lock(&pcpu_drain_mutex);
  2177. }
  2178. /*
  2179. * We don't care about racing with CPU hotplug event
  2180. * as offline notification will cause the notified
  2181. * cpu to drain that CPU pcps and on_each_cpu_mask
  2182. * disables preemption as part of its processing
  2183. */
  2184. for_each_online_cpu(cpu) {
  2185. struct per_cpu_pageset *pcp;
  2186. struct zone *z;
  2187. bool has_pcps = false;
  2188. if (zone) {
  2189. pcp = per_cpu_ptr(zone->pageset, cpu);
  2190. if (pcp->pcp.count)
  2191. has_pcps = true;
  2192. } else {
  2193. for_each_populated_zone(z) {
  2194. pcp = per_cpu_ptr(z->pageset, cpu);
  2195. if (pcp->pcp.count) {
  2196. has_pcps = true;
  2197. break;
  2198. }
  2199. }
  2200. }
  2201. if (has_pcps)
  2202. cpumask_set_cpu(cpu, &cpus_with_pcps);
  2203. else
  2204. cpumask_clear_cpu(cpu, &cpus_with_pcps);
  2205. }
  2206. for_each_cpu(cpu, &cpus_with_pcps) {
  2207. struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
  2208. INIT_WORK(work, drain_local_pages_wq);
  2209. queue_work_on(cpu, mm_percpu_wq, work);
  2210. }
  2211. for_each_cpu(cpu, &cpus_with_pcps)
  2212. flush_work(per_cpu_ptr(&pcpu_drain, cpu));
  2213. mutex_unlock(&pcpu_drain_mutex);
  2214. }
  2215. #ifdef CONFIG_HIBERNATION
  2216. /*
  2217. * Touch the watchdog for every WD_PAGE_COUNT pages.
  2218. */
  2219. #define WD_PAGE_COUNT (128*1024)
  2220. void mark_free_pages(struct zone *zone)
  2221. {
  2222. unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
  2223. unsigned long flags;
  2224. unsigned int order, t;
  2225. struct page *page;
  2226. if (zone_is_empty(zone))
  2227. return;
  2228. spin_lock_irqsave(&zone->lock, flags);
  2229. max_zone_pfn = zone_end_pfn(zone);
  2230. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  2231. if (pfn_valid(pfn)) {
  2232. page = pfn_to_page(pfn);
  2233. if (!--page_count) {
  2234. touch_nmi_watchdog();
  2235. page_count = WD_PAGE_COUNT;
  2236. }
  2237. if (page_zone(page) != zone)
  2238. continue;
  2239. if (!swsusp_page_is_forbidden(page))
  2240. swsusp_unset_page_free(page);
  2241. }
  2242. for_each_migratetype_order(order, t) {
  2243. list_for_each_entry(page,
  2244. &zone->free_area[order].free_list[t], lru) {
  2245. unsigned long i;
  2246. pfn = page_to_pfn(page);
  2247. for (i = 0; i < (1UL << order); i++) {
  2248. if (!--page_count) {
  2249. touch_nmi_watchdog();
  2250. page_count = WD_PAGE_COUNT;
  2251. }
  2252. swsusp_set_page_free(pfn_to_page(pfn + i));
  2253. }
  2254. }
  2255. }
  2256. spin_unlock_irqrestore(&zone->lock, flags);
  2257. }
  2258. #endif /* CONFIG_PM */
  2259. static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
  2260. {
  2261. int migratetype;
  2262. if (!free_pcp_prepare(page))
  2263. return false;
  2264. migratetype = get_pfnblock_migratetype(page, pfn);
  2265. set_pcppage_migratetype(page, migratetype);
  2266. return true;
  2267. }
  2268. static void free_unref_page_commit(struct page *page, unsigned long pfn)
  2269. {
  2270. struct zone *zone = page_zone(page);
  2271. struct per_cpu_pages *pcp;
  2272. int migratetype;
  2273. migratetype = get_pcppage_migratetype(page);
  2274. __count_vm_event(PGFREE);
  2275. /*
  2276. * We only track unmovable, reclaimable and movable on pcp lists.
  2277. * Free ISOLATE pages back to the allocator because they are being
  2278. * offlined but treat HIGHATOMIC as movable pages so we can get those
  2279. * areas back if necessary. Otherwise, we may have to free
  2280. * excessively into the page allocator
  2281. */
  2282. if (migratetype >= MIGRATE_PCPTYPES) {
  2283. if (unlikely(is_migrate_isolate(migratetype))) {
  2284. free_one_page(zone, page, pfn, 0, migratetype);
  2285. return;
  2286. }
  2287. migratetype = MIGRATE_MOVABLE;
  2288. }
  2289. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  2290. list_add(&page->lru, &pcp->lists[migratetype]);
  2291. pcp->count++;
  2292. if (pcp->count >= pcp->high) {
  2293. unsigned long batch = READ_ONCE(pcp->batch);
  2294. free_pcppages_bulk(zone, batch, pcp);
  2295. pcp->count -= batch;
  2296. }
  2297. }
  2298. /*
  2299. * Free a 0-order page
  2300. */
  2301. void free_unref_page(struct page *page)
  2302. {
  2303. unsigned long flags;
  2304. unsigned long pfn = page_to_pfn(page);
  2305. if (!free_unref_page_prepare(page, pfn))
  2306. return;
  2307. local_irq_save(flags);
  2308. free_unref_page_commit(page, pfn);
  2309. local_irq_restore(flags);
  2310. }
  2311. /*
  2312. * Free a list of 0-order pages
  2313. */
  2314. void free_unref_page_list(struct list_head *list)
  2315. {
  2316. struct page *page, *next;
  2317. unsigned long flags, pfn;
  2318. int batch_count = 0;
  2319. /* Prepare pages for freeing */
  2320. list_for_each_entry_safe(page, next, list, lru) {
  2321. pfn = page_to_pfn(page);
  2322. if (!free_unref_page_prepare(page, pfn))
  2323. list_del(&page->lru);
  2324. set_page_private(page, pfn);
  2325. }
  2326. local_irq_save(flags);
  2327. list_for_each_entry_safe(page, next, list, lru) {
  2328. unsigned long pfn = page_private(page);
  2329. set_page_private(page, 0);
  2330. trace_mm_page_free_batched(page);
  2331. free_unref_page_commit(page, pfn);
  2332. /*
  2333. * Guard against excessive IRQ disabled times when we get
  2334. * a large list of pages to free.
  2335. */
  2336. if (++batch_count == SWAP_CLUSTER_MAX) {
  2337. local_irq_restore(flags);
  2338. batch_count = 0;
  2339. local_irq_save(flags);
  2340. }
  2341. }
  2342. local_irq_restore(flags);
  2343. }
  2344. /*
  2345. * split_page takes a non-compound higher-order page, and splits it into
  2346. * n (1<<order) sub-pages: page[0..n]
  2347. * Each sub-page must be freed individually.
  2348. *
  2349. * Note: this is probably too low level an operation for use in drivers.
  2350. * Please consult with lkml before using this in your driver.
  2351. */
  2352. void split_page(struct page *page, unsigned int order)
  2353. {
  2354. int i;
  2355. VM_BUG_ON_PAGE(PageCompound(page), page);
  2356. VM_BUG_ON_PAGE(!page_count(page), page);
  2357. for (i = 1; i < (1 << order); i++)
  2358. set_page_refcounted(page + i);
  2359. split_page_owner(page, order);
  2360. }
  2361. EXPORT_SYMBOL_GPL(split_page);
  2362. int __isolate_free_page(struct page *page, unsigned int order)
  2363. {
  2364. unsigned long watermark;
  2365. struct zone *zone;
  2366. int mt;
  2367. BUG_ON(!PageBuddy(page));
  2368. zone = page_zone(page);
  2369. mt = get_pageblock_migratetype(page);
  2370. if (!is_migrate_isolate(mt)) {
  2371. /*
  2372. * Obey watermarks as if the page was being allocated. We can
  2373. * emulate a high-order watermark check with a raised order-0
  2374. * watermark, because we already know our high-order page
  2375. * exists.
  2376. */
  2377. watermark = min_wmark_pages(zone) + (1UL << order);
  2378. if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
  2379. return 0;
  2380. __mod_zone_freepage_state(zone, -(1UL << order), mt);
  2381. }
  2382. /* Remove page from free list */
  2383. list_del(&page->lru);
  2384. zone->free_area[order].nr_free--;
  2385. rmv_page_order(page);
  2386. /*
  2387. * Set the pageblock if the isolated page is at least half of a
  2388. * pageblock
  2389. */
  2390. if (order >= pageblock_order - 1) {
  2391. struct page *endpage = page + (1 << order) - 1;
  2392. for (; page < endpage; page += pageblock_nr_pages) {
  2393. int mt = get_pageblock_migratetype(page);
  2394. if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
  2395. && !is_migrate_highatomic(mt))
  2396. set_pageblock_migratetype(page,
  2397. MIGRATE_MOVABLE);
  2398. }
  2399. }
  2400. return 1UL << order;
  2401. }
  2402. /*
  2403. * Update NUMA hit/miss statistics
  2404. *
  2405. * Must be called with interrupts disabled.
  2406. */
  2407. static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
  2408. {
  2409. #ifdef CONFIG_NUMA
  2410. enum numa_stat_item local_stat = NUMA_LOCAL;
  2411. /* skip numa counters update if numa stats is disabled */
  2412. if (!static_branch_likely(&vm_numa_stat_key))
  2413. return;
  2414. if (z->node != numa_node_id())
  2415. local_stat = NUMA_OTHER;
  2416. if (z->node == preferred_zone->node)
  2417. __inc_numa_state(z, NUMA_HIT);
  2418. else {
  2419. __inc_numa_state(z, NUMA_MISS);
  2420. __inc_numa_state(preferred_zone, NUMA_FOREIGN);
  2421. }
  2422. __inc_numa_state(z, local_stat);
  2423. #endif
  2424. }
  2425. /* Remove page from the per-cpu list, caller must protect the list */
  2426. static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
  2427. struct per_cpu_pages *pcp,
  2428. struct list_head *list)
  2429. {
  2430. struct page *page;
  2431. do {
  2432. if (list_empty(list)) {
  2433. pcp->count += rmqueue_bulk(zone, 0,
  2434. pcp->batch, list,
  2435. migratetype);
  2436. if (unlikely(list_empty(list)))
  2437. return NULL;
  2438. }
  2439. page = list_first_entry(list, struct page, lru);
  2440. list_del(&page->lru);
  2441. pcp->count--;
  2442. } while (check_new_pcp(page));
  2443. return page;
  2444. }
  2445. /* Lock and remove page from the per-cpu list */
  2446. static struct page *rmqueue_pcplist(struct zone *preferred_zone,
  2447. struct zone *zone, unsigned int order,
  2448. gfp_t gfp_flags, int migratetype)
  2449. {
  2450. struct per_cpu_pages *pcp;
  2451. struct list_head *list;
  2452. struct page *page;
  2453. unsigned long flags;
  2454. local_irq_save(flags);
  2455. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  2456. list = &pcp->lists[migratetype];
  2457. page = __rmqueue_pcplist(zone, migratetype, pcp, list);
  2458. if (page) {
  2459. __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
  2460. zone_statistics(preferred_zone, zone);
  2461. }
  2462. local_irq_restore(flags);
  2463. return page;
  2464. }
  2465. /*
  2466. * Allocate a page from the given zone. Use pcplists for order-0 allocations.
  2467. */
  2468. static inline
  2469. struct page *rmqueue(struct zone *preferred_zone,
  2470. struct zone *zone, unsigned int order,
  2471. gfp_t gfp_flags, unsigned int alloc_flags,
  2472. int migratetype)
  2473. {
  2474. unsigned long flags;
  2475. struct page *page;
  2476. if (likely(order == 0)) {
  2477. page = rmqueue_pcplist(preferred_zone, zone, order,
  2478. gfp_flags, migratetype);
  2479. goto out;
  2480. }
  2481. /*
  2482. * We most definitely don't want callers attempting to
  2483. * allocate greater than order-1 page units with __GFP_NOFAIL.
  2484. */
  2485. WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
  2486. spin_lock_irqsave(&zone->lock, flags);
  2487. do {
  2488. page = NULL;
  2489. if (alloc_flags & ALLOC_HARDER) {
  2490. page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
  2491. if (page)
  2492. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  2493. }
  2494. if (!page)
  2495. page = __rmqueue(zone, order, migratetype);
  2496. } while (page && check_new_pages(page, order));
  2497. spin_unlock(&zone->lock);
  2498. if (!page)
  2499. goto failed;
  2500. __mod_zone_freepage_state(zone, -(1 << order),
  2501. get_pcppage_migratetype(page));
  2502. __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
  2503. zone_statistics(preferred_zone, zone);
  2504. local_irq_restore(flags);
  2505. out:
  2506. VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
  2507. return page;
  2508. failed:
  2509. local_irq_restore(flags);
  2510. return NULL;
  2511. }
  2512. #ifdef CONFIG_FAIL_PAGE_ALLOC
  2513. static struct {
  2514. struct fault_attr attr;
  2515. bool ignore_gfp_highmem;
  2516. bool ignore_gfp_reclaim;
  2517. u32 min_order;
  2518. } fail_page_alloc = {
  2519. .attr = FAULT_ATTR_INITIALIZER,
  2520. .ignore_gfp_reclaim = true,
  2521. .ignore_gfp_highmem = true,
  2522. .min_order = 1,
  2523. };
  2524. static int __init setup_fail_page_alloc(char *str)
  2525. {
  2526. return setup_fault_attr(&fail_page_alloc.attr, str);
  2527. }
  2528. __setup("fail_page_alloc=", setup_fail_page_alloc);
  2529. static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2530. {
  2531. if (order < fail_page_alloc.min_order)
  2532. return false;
  2533. if (gfp_mask & __GFP_NOFAIL)
  2534. return false;
  2535. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  2536. return false;
  2537. if (fail_page_alloc.ignore_gfp_reclaim &&
  2538. (gfp_mask & __GFP_DIRECT_RECLAIM))
  2539. return false;
  2540. return should_fail(&fail_page_alloc.attr, 1 << order);
  2541. }
  2542. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  2543. static int __init fail_page_alloc_debugfs(void)
  2544. {
  2545. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  2546. struct dentry *dir;
  2547. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  2548. &fail_page_alloc.attr);
  2549. if (IS_ERR(dir))
  2550. return PTR_ERR(dir);
  2551. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  2552. &fail_page_alloc.ignore_gfp_reclaim))
  2553. goto fail;
  2554. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  2555. &fail_page_alloc.ignore_gfp_highmem))
  2556. goto fail;
  2557. if (!debugfs_create_u32("min-order", mode, dir,
  2558. &fail_page_alloc.min_order))
  2559. goto fail;
  2560. return 0;
  2561. fail:
  2562. debugfs_remove_recursive(dir);
  2563. return -ENOMEM;
  2564. }
  2565. late_initcall(fail_page_alloc_debugfs);
  2566. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  2567. #else /* CONFIG_FAIL_PAGE_ALLOC */
  2568. static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2569. {
  2570. return false;
  2571. }
  2572. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  2573. /*
  2574. * Return true if free base pages are above 'mark'. For high-order checks it
  2575. * will return true of the order-0 watermark is reached and there is at least
  2576. * one free page of a suitable size. Checking now avoids taking the zone lock
  2577. * to check in the allocation paths if no pages are free.
  2578. */
  2579. bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  2580. int classzone_idx, unsigned int alloc_flags,
  2581. long free_pages)
  2582. {
  2583. long min = mark;
  2584. int o;
  2585. const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
  2586. /* free_pages may go negative - that's OK */
  2587. free_pages -= (1 << order) - 1;
  2588. if (alloc_flags & ALLOC_HIGH)
  2589. min -= min / 2;
  2590. /*
  2591. * If the caller does not have rights to ALLOC_HARDER then subtract
  2592. * the high-atomic reserves. This will over-estimate the size of the
  2593. * atomic reserve but it avoids a search.
  2594. */
  2595. if (likely(!alloc_harder)) {
  2596. free_pages -= z->nr_reserved_highatomic;
  2597. } else {
  2598. /*
  2599. * OOM victims can try even harder than normal ALLOC_HARDER
  2600. * users on the grounds that it's definitely going to be in
  2601. * the exit path shortly and free memory. Any allocation it
  2602. * makes during the free path will be small and short-lived.
  2603. */
  2604. if (alloc_flags & ALLOC_OOM)
  2605. min -= min / 2;
  2606. else
  2607. min -= min / 4;
  2608. }
  2609. #ifdef CONFIG_CMA
  2610. /* If allocation can't use CMA areas don't use free CMA pages */
  2611. if (!(alloc_flags & ALLOC_CMA))
  2612. free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
  2613. #endif
  2614. /*
  2615. * Check watermarks for an order-0 allocation request. If these
  2616. * are not met, then a high-order request also cannot go ahead
  2617. * even if a suitable page happened to be free.
  2618. */
  2619. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  2620. return false;
  2621. /* If this is an order-0 request then the watermark is fine */
  2622. if (!order)
  2623. return true;
  2624. /* For a high-order request, check at least one suitable page is free */
  2625. for (o = order; o < MAX_ORDER; o++) {
  2626. struct free_area *area = &z->free_area[o];
  2627. int mt;
  2628. if (!area->nr_free)
  2629. continue;
  2630. for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
  2631. if (!list_empty(&area->free_list[mt]))
  2632. return true;
  2633. }
  2634. #ifdef CONFIG_CMA
  2635. if ((alloc_flags & ALLOC_CMA) &&
  2636. !list_empty(&area->free_list[MIGRATE_CMA])) {
  2637. return true;
  2638. }
  2639. #endif
  2640. if (alloc_harder &&
  2641. !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
  2642. return true;
  2643. }
  2644. return false;
  2645. }
  2646. bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  2647. int classzone_idx, unsigned int alloc_flags)
  2648. {
  2649. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2650. zone_page_state(z, NR_FREE_PAGES));
  2651. }
  2652. static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
  2653. unsigned long mark, int classzone_idx, unsigned int alloc_flags)
  2654. {
  2655. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2656. long cma_pages = 0;
  2657. #ifdef CONFIG_CMA
  2658. /* If allocation can't use CMA areas don't use free CMA pages */
  2659. if (!(alloc_flags & ALLOC_CMA))
  2660. cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
  2661. #endif
  2662. /*
  2663. * Fast check for order-0 only. If this fails then the reserves
  2664. * need to be calculated. There is a corner case where the check
  2665. * passes but only the high-order atomic reserve are free. If
  2666. * the caller is !atomic then it'll uselessly search the free
  2667. * list. That corner case is then slower but it is harmless.
  2668. */
  2669. if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
  2670. return true;
  2671. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2672. free_pages);
  2673. }
  2674. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  2675. unsigned long mark, int classzone_idx)
  2676. {
  2677. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2678. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  2679. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  2680. return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
  2681. free_pages);
  2682. }
  2683. #ifdef CONFIG_NUMA
  2684. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2685. {
  2686. return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
  2687. RECLAIM_DISTANCE;
  2688. }
  2689. #else /* CONFIG_NUMA */
  2690. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2691. {
  2692. return true;
  2693. }
  2694. #endif /* CONFIG_NUMA */
  2695. /*
  2696. * get_page_from_freelist goes through the zonelist trying to allocate
  2697. * a page.
  2698. */
  2699. static struct page *
  2700. get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
  2701. const struct alloc_context *ac)
  2702. {
  2703. struct zoneref *z = ac->preferred_zoneref;
  2704. struct zone *zone;
  2705. struct pglist_data *last_pgdat_dirty_limit = NULL;
  2706. /*
  2707. * Scan zonelist, looking for a zone with enough free.
  2708. * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
  2709. */
  2710. for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  2711. ac->nodemask) {
  2712. struct page *page;
  2713. unsigned long mark;
  2714. if (cpusets_enabled() &&
  2715. (alloc_flags & ALLOC_CPUSET) &&
  2716. !__cpuset_zone_allowed(zone, gfp_mask))
  2717. continue;
  2718. /*
  2719. * When allocating a page cache page for writing, we
  2720. * want to get it from a node that is within its dirty
  2721. * limit, such that no single node holds more than its
  2722. * proportional share of globally allowed dirty pages.
  2723. * The dirty limits take into account the node's
  2724. * lowmem reserves and high watermark so that kswapd
  2725. * should be able to balance it without having to
  2726. * write pages from its LRU list.
  2727. *
  2728. * XXX: For now, allow allocations to potentially
  2729. * exceed the per-node dirty limit in the slowpath
  2730. * (spread_dirty_pages unset) before going into reclaim,
  2731. * which is important when on a NUMA setup the allowed
  2732. * nodes are together not big enough to reach the
  2733. * global limit. The proper fix for these situations
  2734. * will require awareness of nodes in the
  2735. * dirty-throttling and the flusher threads.
  2736. */
  2737. if (ac->spread_dirty_pages) {
  2738. if (last_pgdat_dirty_limit == zone->zone_pgdat)
  2739. continue;
  2740. if (!node_dirty_ok(zone->zone_pgdat)) {
  2741. last_pgdat_dirty_limit = zone->zone_pgdat;
  2742. continue;
  2743. }
  2744. }
  2745. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  2746. if (!zone_watermark_fast(zone, order, mark,
  2747. ac_classzone_idx(ac), alloc_flags)) {
  2748. int ret;
  2749. /* Checked here to keep the fast path fast */
  2750. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  2751. if (alloc_flags & ALLOC_NO_WATERMARKS)
  2752. goto try_this_zone;
  2753. if (node_reclaim_mode == 0 ||
  2754. !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
  2755. continue;
  2756. ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
  2757. switch (ret) {
  2758. case NODE_RECLAIM_NOSCAN:
  2759. /* did not scan */
  2760. continue;
  2761. case NODE_RECLAIM_FULL:
  2762. /* scanned but unreclaimable */
  2763. continue;
  2764. default:
  2765. /* did we reclaim enough */
  2766. if (zone_watermark_ok(zone, order, mark,
  2767. ac_classzone_idx(ac), alloc_flags))
  2768. goto try_this_zone;
  2769. continue;
  2770. }
  2771. }
  2772. try_this_zone:
  2773. page = rmqueue(ac->preferred_zoneref->zone, zone, order,
  2774. gfp_mask, alloc_flags, ac->migratetype);
  2775. if (page) {
  2776. prep_new_page(page, order, gfp_mask, alloc_flags);
  2777. /*
  2778. * If this is a high-order atomic allocation then check
  2779. * if the pageblock should be reserved for the future
  2780. */
  2781. if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
  2782. reserve_highatomic_pageblock(page, zone, order);
  2783. return page;
  2784. }
  2785. }
  2786. return NULL;
  2787. }
  2788. /*
  2789. * Large machines with many possible nodes should not always dump per-node
  2790. * meminfo in irq context.
  2791. */
  2792. static inline bool should_suppress_show_mem(void)
  2793. {
  2794. bool ret = false;
  2795. #if NODES_SHIFT > 8
  2796. ret = in_interrupt();
  2797. #endif
  2798. return ret;
  2799. }
  2800. static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
  2801. {
  2802. unsigned int filter = SHOW_MEM_FILTER_NODES;
  2803. static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
  2804. if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
  2805. return;
  2806. /*
  2807. * This documents exceptions given to allocations in certain
  2808. * contexts that are allowed to allocate outside current's set
  2809. * of allowed nodes.
  2810. */
  2811. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2812. if (tsk_is_oom_victim(current) ||
  2813. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  2814. filter &= ~SHOW_MEM_FILTER_NODES;
  2815. if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
  2816. filter &= ~SHOW_MEM_FILTER_NODES;
  2817. show_mem(filter, nodemask);
  2818. }
  2819. void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
  2820. {
  2821. struct va_format vaf;
  2822. va_list args;
  2823. static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
  2824. DEFAULT_RATELIMIT_BURST);
  2825. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
  2826. return;
  2827. va_start(args, fmt);
  2828. vaf.fmt = fmt;
  2829. vaf.va = &args;
  2830. pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl\n",
  2831. current->comm, &vaf, gfp_mask, &gfp_mask,
  2832. nodemask_pr_args(nodemask));
  2833. va_end(args);
  2834. cpuset_print_current_mems_allowed();
  2835. dump_stack();
  2836. warn_alloc_show_mem(gfp_mask, nodemask);
  2837. }
  2838. static inline struct page *
  2839. __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
  2840. unsigned int alloc_flags,
  2841. const struct alloc_context *ac)
  2842. {
  2843. struct page *page;
  2844. page = get_page_from_freelist(gfp_mask, order,
  2845. alloc_flags|ALLOC_CPUSET, ac);
  2846. /*
  2847. * fallback to ignore cpuset restriction if our nodes
  2848. * are depleted
  2849. */
  2850. if (!page)
  2851. page = get_page_from_freelist(gfp_mask, order,
  2852. alloc_flags, ac);
  2853. return page;
  2854. }
  2855. static inline struct page *
  2856. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  2857. const struct alloc_context *ac, unsigned long *did_some_progress)
  2858. {
  2859. struct oom_control oc = {
  2860. .zonelist = ac->zonelist,
  2861. .nodemask = ac->nodemask,
  2862. .memcg = NULL,
  2863. .gfp_mask = gfp_mask,
  2864. .order = order,
  2865. };
  2866. struct page *page;
  2867. *did_some_progress = 0;
  2868. /*
  2869. * Acquire the oom lock. If that fails, somebody else is
  2870. * making progress for us.
  2871. */
  2872. if (!mutex_trylock(&oom_lock)) {
  2873. *did_some_progress = 1;
  2874. schedule_timeout_uninterruptible(1);
  2875. return NULL;
  2876. }
  2877. /*
  2878. * Go through the zonelist yet one more time, keep very high watermark
  2879. * here, this is only to catch a parallel oom killing, we must fail if
  2880. * we're still under heavy pressure. But make sure that this reclaim
  2881. * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
  2882. * allocation which will never fail due to oom_lock already held.
  2883. */
  2884. page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
  2885. ~__GFP_DIRECT_RECLAIM, order,
  2886. ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
  2887. if (page)
  2888. goto out;
  2889. /* Coredumps can quickly deplete all memory reserves */
  2890. if (current->flags & PF_DUMPCORE)
  2891. goto out;
  2892. /* The OOM killer will not help higher order allocs */
  2893. if (order > PAGE_ALLOC_COSTLY_ORDER)
  2894. goto out;
  2895. /*
  2896. * We have already exhausted all our reclaim opportunities without any
  2897. * success so it is time to admit defeat. We will skip the OOM killer
  2898. * because it is very likely that the caller has a more reasonable
  2899. * fallback than shooting a random task.
  2900. */
  2901. if (gfp_mask & __GFP_RETRY_MAYFAIL)
  2902. goto out;
  2903. /* The OOM killer does not needlessly kill tasks for lowmem */
  2904. if (ac->high_zoneidx < ZONE_NORMAL)
  2905. goto out;
  2906. if (pm_suspended_storage())
  2907. goto out;
  2908. /*
  2909. * XXX: GFP_NOFS allocations should rather fail than rely on
  2910. * other request to make a forward progress.
  2911. * We are in an unfortunate situation where out_of_memory cannot
  2912. * do much for this context but let's try it to at least get
  2913. * access to memory reserved if the current task is killed (see
  2914. * out_of_memory). Once filesystems are ready to handle allocation
  2915. * failures more gracefully we should just bail out here.
  2916. */
  2917. /* The OOM killer may not free memory on a specific node */
  2918. if (gfp_mask & __GFP_THISNODE)
  2919. goto out;
  2920. /* Exhausted what can be done so it's blamo time */
  2921. if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
  2922. *did_some_progress = 1;
  2923. /*
  2924. * Help non-failing allocations by giving them access to memory
  2925. * reserves
  2926. */
  2927. if (gfp_mask & __GFP_NOFAIL)
  2928. page = __alloc_pages_cpuset_fallback(gfp_mask, order,
  2929. ALLOC_NO_WATERMARKS, ac);
  2930. }
  2931. out:
  2932. mutex_unlock(&oom_lock);
  2933. return page;
  2934. }
  2935. /*
  2936. * Maximum number of compaction retries wit a progress before OOM
  2937. * killer is consider as the only way to move forward.
  2938. */
  2939. #define MAX_COMPACT_RETRIES 16
  2940. #ifdef CONFIG_COMPACTION
  2941. /* Try memory compaction for high-order allocations before reclaim */
  2942. static struct page *
  2943. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2944. unsigned int alloc_flags, const struct alloc_context *ac,
  2945. enum compact_priority prio, enum compact_result *compact_result)
  2946. {
  2947. struct page *page;
  2948. unsigned int noreclaim_flag;
  2949. if (!order)
  2950. return NULL;
  2951. noreclaim_flag = memalloc_noreclaim_save();
  2952. *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
  2953. prio);
  2954. memalloc_noreclaim_restore(noreclaim_flag);
  2955. if (*compact_result <= COMPACT_INACTIVE)
  2956. return NULL;
  2957. /*
  2958. * At least in one zone compaction wasn't deferred or skipped, so let's
  2959. * count a compaction stall
  2960. */
  2961. count_vm_event(COMPACTSTALL);
  2962. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  2963. if (page) {
  2964. struct zone *zone = page_zone(page);
  2965. zone->compact_blockskip_flush = false;
  2966. compaction_defer_reset(zone, order, true);
  2967. count_vm_event(COMPACTSUCCESS);
  2968. return page;
  2969. }
  2970. /*
  2971. * It's bad if compaction run occurs and fails. The most likely reason
  2972. * is that pages exist, but not enough to satisfy watermarks.
  2973. */
  2974. count_vm_event(COMPACTFAIL);
  2975. cond_resched();
  2976. return NULL;
  2977. }
  2978. static inline bool
  2979. should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
  2980. enum compact_result compact_result,
  2981. enum compact_priority *compact_priority,
  2982. int *compaction_retries)
  2983. {
  2984. int max_retries = MAX_COMPACT_RETRIES;
  2985. int min_priority;
  2986. bool ret = false;
  2987. int retries = *compaction_retries;
  2988. enum compact_priority priority = *compact_priority;
  2989. if (!order)
  2990. return false;
  2991. if (compaction_made_progress(compact_result))
  2992. (*compaction_retries)++;
  2993. /*
  2994. * compaction considers all the zone as desperately out of memory
  2995. * so it doesn't really make much sense to retry except when the
  2996. * failure could be caused by insufficient priority
  2997. */
  2998. if (compaction_failed(compact_result))
  2999. goto check_priority;
  3000. /*
  3001. * make sure the compaction wasn't deferred or didn't bail out early
  3002. * due to locks contention before we declare that we should give up.
  3003. * But do not retry if the given zonelist is not suitable for
  3004. * compaction.
  3005. */
  3006. if (compaction_withdrawn(compact_result)) {
  3007. ret = compaction_zonelist_suitable(ac, order, alloc_flags);
  3008. goto out;
  3009. }
  3010. /*
  3011. * !costly requests are much more important than __GFP_RETRY_MAYFAIL
  3012. * costly ones because they are de facto nofail and invoke OOM
  3013. * killer to move on while costly can fail and users are ready
  3014. * to cope with that. 1/4 retries is rather arbitrary but we
  3015. * would need much more detailed feedback from compaction to
  3016. * make a better decision.
  3017. */
  3018. if (order > PAGE_ALLOC_COSTLY_ORDER)
  3019. max_retries /= 4;
  3020. if (*compaction_retries <= max_retries) {
  3021. ret = true;
  3022. goto out;
  3023. }
  3024. /*
  3025. * Make sure there are attempts at the highest priority if we exhausted
  3026. * all retries or failed at the lower priorities.
  3027. */
  3028. check_priority:
  3029. min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
  3030. MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
  3031. if (*compact_priority > min_priority) {
  3032. (*compact_priority)--;
  3033. *compaction_retries = 0;
  3034. ret = true;
  3035. }
  3036. out:
  3037. trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
  3038. return ret;
  3039. }
  3040. #else
  3041. static inline struct page *
  3042. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  3043. unsigned int alloc_flags, const struct alloc_context *ac,
  3044. enum compact_priority prio, enum compact_result *compact_result)
  3045. {
  3046. *compact_result = COMPACT_SKIPPED;
  3047. return NULL;
  3048. }
  3049. static inline bool
  3050. should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
  3051. enum compact_result compact_result,
  3052. enum compact_priority *compact_priority,
  3053. int *compaction_retries)
  3054. {
  3055. struct zone *zone;
  3056. struct zoneref *z;
  3057. if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
  3058. return false;
  3059. /*
  3060. * There are setups with compaction disabled which would prefer to loop
  3061. * inside the allocator rather than hit the oom killer prematurely.
  3062. * Let's give them a good hope and keep retrying while the order-0
  3063. * watermarks are OK.
  3064. */
  3065. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  3066. ac->nodemask) {
  3067. if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
  3068. ac_classzone_idx(ac), alloc_flags))
  3069. return true;
  3070. }
  3071. return false;
  3072. }
  3073. #endif /* CONFIG_COMPACTION */
  3074. #ifdef CONFIG_LOCKDEP
  3075. struct lockdep_map __fs_reclaim_map =
  3076. STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
  3077. static bool __need_fs_reclaim(gfp_t gfp_mask)
  3078. {
  3079. gfp_mask = current_gfp_context(gfp_mask);
  3080. /* no reclaim without waiting on it */
  3081. if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
  3082. return false;
  3083. /* this guy won't enter reclaim */
  3084. if ((current->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
  3085. return false;
  3086. /* We're only interested __GFP_FS allocations for now */
  3087. if (!(gfp_mask & __GFP_FS))
  3088. return false;
  3089. if (gfp_mask & __GFP_NOLOCKDEP)
  3090. return false;
  3091. return true;
  3092. }
  3093. void fs_reclaim_acquire(gfp_t gfp_mask)
  3094. {
  3095. if (__need_fs_reclaim(gfp_mask))
  3096. lock_map_acquire(&__fs_reclaim_map);
  3097. }
  3098. EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
  3099. void fs_reclaim_release(gfp_t gfp_mask)
  3100. {
  3101. if (__need_fs_reclaim(gfp_mask))
  3102. lock_map_release(&__fs_reclaim_map);
  3103. }
  3104. EXPORT_SYMBOL_GPL(fs_reclaim_release);
  3105. #endif
  3106. /* Perform direct synchronous page reclaim */
  3107. static int
  3108. __perform_reclaim(gfp_t gfp_mask, unsigned int order,
  3109. const struct alloc_context *ac)
  3110. {
  3111. struct reclaim_state reclaim_state;
  3112. int progress;
  3113. unsigned int noreclaim_flag;
  3114. cond_resched();
  3115. /* We now go into synchronous reclaim */
  3116. cpuset_memory_pressure_bump();
  3117. noreclaim_flag = memalloc_noreclaim_save();
  3118. fs_reclaim_acquire(gfp_mask);
  3119. reclaim_state.reclaimed_slab = 0;
  3120. current->reclaim_state = &reclaim_state;
  3121. progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
  3122. ac->nodemask);
  3123. current->reclaim_state = NULL;
  3124. fs_reclaim_release(gfp_mask);
  3125. memalloc_noreclaim_restore(noreclaim_flag);
  3126. cond_resched();
  3127. return progress;
  3128. }
  3129. /* The really slow allocator path where we enter direct reclaim */
  3130. static inline struct page *
  3131. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  3132. unsigned int alloc_flags, const struct alloc_context *ac,
  3133. unsigned long *did_some_progress)
  3134. {
  3135. struct page *page = NULL;
  3136. bool drained = false;
  3137. *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
  3138. if (unlikely(!(*did_some_progress)))
  3139. return NULL;
  3140. retry:
  3141. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3142. /*
  3143. * If an allocation failed after direct reclaim, it could be because
  3144. * pages are pinned on the per-cpu lists or in high alloc reserves.
  3145. * Shrink them them and try again
  3146. */
  3147. if (!page && !drained) {
  3148. unreserve_highatomic_pageblock(ac, false);
  3149. drain_all_pages(NULL);
  3150. drained = true;
  3151. goto retry;
  3152. }
  3153. return page;
  3154. }
  3155. static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
  3156. {
  3157. struct zoneref *z;
  3158. struct zone *zone;
  3159. pg_data_t *last_pgdat = NULL;
  3160. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
  3161. ac->high_zoneidx, ac->nodemask) {
  3162. if (last_pgdat != zone->zone_pgdat)
  3163. wakeup_kswapd(zone, order, ac->high_zoneidx);
  3164. last_pgdat = zone->zone_pgdat;
  3165. }
  3166. }
  3167. static inline unsigned int
  3168. gfp_to_alloc_flags(gfp_t gfp_mask)
  3169. {
  3170. unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  3171. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  3172. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  3173. /*
  3174. * The caller may dip into page reserves a bit more if the caller
  3175. * cannot run direct reclaim, or if the caller has realtime scheduling
  3176. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  3177. * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
  3178. */
  3179. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  3180. if (gfp_mask & __GFP_ATOMIC) {
  3181. /*
  3182. * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
  3183. * if it can't schedule.
  3184. */
  3185. if (!(gfp_mask & __GFP_NOMEMALLOC))
  3186. alloc_flags |= ALLOC_HARDER;
  3187. /*
  3188. * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
  3189. * comment for __cpuset_node_allowed().
  3190. */
  3191. alloc_flags &= ~ALLOC_CPUSET;
  3192. } else if (unlikely(rt_task(current)) && !in_interrupt())
  3193. alloc_flags |= ALLOC_HARDER;
  3194. #ifdef CONFIG_CMA
  3195. if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  3196. alloc_flags |= ALLOC_CMA;
  3197. #endif
  3198. return alloc_flags;
  3199. }
  3200. static bool oom_reserves_allowed(struct task_struct *tsk)
  3201. {
  3202. if (!tsk_is_oom_victim(tsk))
  3203. return false;
  3204. /*
  3205. * !MMU doesn't have oom reaper so give access to memory reserves
  3206. * only to the thread with TIF_MEMDIE set
  3207. */
  3208. if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
  3209. return false;
  3210. return true;
  3211. }
  3212. /*
  3213. * Distinguish requests which really need access to full memory
  3214. * reserves from oom victims which can live with a portion of it
  3215. */
  3216. static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
  3217. {
  3218. if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
  3219. return 0;
  3220. if (gfp_mask & __GFP_MEMALLOC)
  3221. return ALLOC_NO_WATERMARKS;
  3222. if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
  3223. return ALLOC_NO_WATERMARKS;
  3224. if (!in_interrupt()) {
  3225. if (current->flags & PF_MEMALLOC)
  3226. return ALLOC_NO_WATERMARKS;
  3227. else if (oom_reserves_allowed(current))
  3228. return ALLOC_OOM;
  3229. }
  3230. return 0;
  3231. }
  3232. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
  3233. {
  3234. return !!__gfp_pfmemalloc_flags(gfp_mask);
  3235. }
  3236. /*
  3237. * Checks whether it makes sense to retry the reclaim to make a forward progress
  3238. * for the given allocation request.
  3239. *
  3240. * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
  3241. * without success, or when we couldn't even meet the watermark if we
  3242. * reclaimed all remaining pages on the LRU lists.
  3243. *
  3244. * Returns true if a retry is viable or false to enter the oom path.
  3245. */
  3246. static inline bool
  3247. should_reclaim_retry(gfp_t gfp_mask, unsigned order,
  3248. struct alloc_context *ac, int alloc_flags,
  3249. bool did_some_progress, int *no_progress_loops)
  3250. {
  3251. struct zone *zone;
  3252. struct zoneref *z;
  3253. /*
  3254. * Costly allocations might have made a progress but this doesn't mean
  3255. * their order will become available due to high fragmentation so
  3256. * always increment the no progress counter for them
  3257. */
  3258. if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
  3259. *no_progress_loops = 0;
  3260. else
  3261. (*no_progress_loops)++;
  3262. /*
  3263. * Make sure we converge to OOM if we cannot make any progress
  3264. * several times in the row.
  3265. */
  3266. if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
  3267. /* Before OOM, exhaust highatomic_reserve */
  3268. return unreserve_highatomic_pageblock(ac, true);
  3269. }
  3270. /*
  3271. * Keep reclaiming pages while there is a chance this will lead
  3272. * somewhere. If none of the target zones can satisfy our allocation
  3273. * request even if all reclaimable pages are considered then we are
  3274. * screwed and have to go OOM.
  3275. */
  3276. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  3277. ac->nodemask) {
  3278. unsigned long available;
  3279. unsigned long reclaimable;
  3280. unsigned long min_wmark = min_wmark_pages(zone);
  3281. bool wmark;
  3282. available = reclaimable = zone_reclaimable_pages(zone);
  3283. available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
  3284. /*
  3285. * Would the allocation succeed if we reclaimed all
  3286. * reclaimable pages?
  3287. */
  3288. wmark = __zone_watermark_ok(zone, order, min_wmark,
  3289. ac_classzone_idx(ac), alloc_flags, available);
  3290. trace_reclaim_retry_zone(z, order, reclaimable,
  3291. available, min_wmark, *no_progress_loops, wmark);
  3292. if (wmark) {
  3293. /*
  3294. * If we didn't make any progress and have a lot of
  3295. * dirty + writeback pages then we should wait for
  3296. * an IO to complete to slow down the reclaim and
  3297. * prevent from pre mature OOM
  3298. */
  3299. if (!did_some_progress) {
  3300. unsigned long write_pending;
  3301. write_pending = zone_page_state_snapshot(zone,
  3302. NR_ZONE_WRITE_PENDING);
  3303. if (2 * write_pending > reclaimable) {
  3304. congestion_wait(BLK_RW_ASYNC, HZ/10);
  3305. return true;
  3306. }
  3307. }
  3308. /*
  3309. * Memory allocation/reclaim might be called from a WQ
  3310. * context and the current implementation of the WQ
  3311. * concurrency control doesn't recognize that
  3312. * a particular WQ is congested if the worker thread is
  3313. * looping without ever sleeping. Therefore we have to
  3314. * do a short sleep here rather than calling
  3315. * cond_resched().
  3316. */
  3317. if (current->flags & PF_WQ_WORKER)
  3318. schedule_timeout_uninterruptible(1);
  3319. else
  3320. cond_resched();
  3321. return true;
  3322. }
  3323. }
  3324. return false;
  3325. }
  3326. static inline bool
  3327. check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
  3328. {
  3329. /*
  3330. * It's possible that cpuset's mems_allowed and the nodemask from
  3331. * mempolicy don't intersect. This should be normally dealt with by
  3332. * policy_nodemask(), but it's possible to race with cpuset update in
  3333. * such a way the check therein was true, and then it became false
  3334. * before we got our cpuset_mems_cookie here.
  3335. * This assumes that for all allocations, ac->nodemask can come only
  3336. * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
  3337. * when it does not intersect with the cpuset restrictions) or the
  3338. * caller can deal with a violated nodemask.
  3339. */
  3340. if (cpusets_enabled() && ac->nodemask &&
  3341. !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
  3342. ac->nodemask = NULL;
  3343. return true;
  3344. }
  3345. /*
  3346. * When updating a task's mems_allowed or mempolicy nodemask, it is
  3347. * possible to race with parallel threads in such a way that our
  3348. * allocation can fail while the mask is being updated. If we are about
  3349. * to fail, check if the cpuset changed during allocation and if so,
  3350. * retry.
  3351. */
  3352. if (read_mems_allowed_retry(cpuset_mems_cookie))
  3353. return true;
  3354. return false;
  3355. }
  3356. static inline struct page *
  3357. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  3358. struct alloc_context *ac)
  3359. {
  3360. bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
  3361. const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
  3362. struct page *page = NULL;
  3363. unsigned int alloc_flags;
  3364. unsigned long did_some_progress;
  3365. enum compact_priority compact_priority;
  3366. enum compact_result compact_result;
  3367. int compaction_retries;
  3368. int no_progress_loops;
  3369. unsigned int cpuset_mems_cookie;
  3370. int reserve_flags;
  3371. /*
  3372. * In the slowpath, we sanity check order to avoid ever trying to
  3373. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  3374. * be using allocators in order of preference for an area that is
  3375. * too large.
  3376. */
  3377. if (order >= MAX_ORDER) {
  3378. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  3379. return NULL;
  3380. }
  3381. /*
  3382. * We also sanity check to catch abuse of atomic reserves being used by
  3383. * callers that are not in atomic context.
  3384. */
  3385. if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
  3386. (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
  3387. gfp_mask &= ~__GFP_ATOMIC;
  3388. retry_cpuset:
  3389. compaction_retries = 0;
  3390. no_progress_loops = 0;
  3391. compact_priority = DEF_COMPACT_PRIORITY;
  3392. cpuset_mems_cookie = read_mems_allowed_begin();
  3393. /*
  3394. * The fast path uses conservative alloc_flags to succeed only until
  3395. * kswapd needs to be woken up, and to avoid the cost of setting up
  3396. * alloc_flags precisely. So we do that now.
  3397. */
  3398. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  3399. /*
  3400. * We need to recalculate the starting point for the zonelist iterator
  3401. * because we might have used different nodemask in the fast path, or
  3402. * there was a cpuset modification and we are retrying - otherwise we
  3403. * could end up iterating over non-eligible zones endlessly.
  3404. */
  3405. ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  3406. ac->high_zoneidx, ac->nodemask);
  3407. if (!ac->preferred_zoneref->zone)
  3408. goto nopage;
  3409. if (gfp_mask & __GFP_KSWAPD_RECLAIM)
  3410. wake_all_kswapds(order, ac);
  3411. /*
  3412. * The adjusted alloc_flags might result in immediate success, so try
  3413. * that first
  3414. */
  3415. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3416. if (page)
  3417. goto got_pg;
  3418. /*
  3419. * For costly allocations, try direct compaction first, as it's likely
  3420. * that we have enough base pages and don't need to reclaim. For non-
  3421. * movable high-order allocations, do that as well, as compaction will
  3422. * try prevent permanent fragmentation by migrating from blocks of the
  3423. * same migratetype.
  3424. * Don't try this for allocations that are allowed to ignore
  3425. * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
  3426. */
  3427. if (can_direct_reclaim &&
  3428. (costly_order ||
  3429. (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
  3430. && !gfp_pfmemalloc_allowed(gfp_mask)) {
  3431. page = __alloc_pages_direct_compact(gfp_mask, order,
  3432. alloc_flags, ac,
  3433. INIT_COMPACT_PRIORITY,
  3434. &compact_result);
  3435. if (page)
  3436. goto got_pg;
  3437. /*
  3438. * Checks for costly allocations with __GFP_NORETRY, which
  3439. * includes THP page fault allocations
  3440. */
  3441. if (costly_order && (gfp_mask & __GFP_NORETRY)) {
  3442. /*
  3443. * If compaction is deferred for high-order allocations,
  3444. * it is because sync compaction recently failed. If
  3445. * this is the case and the caller requested a THP
  3446. * allocation, we do not want to heavily disrupt the
  3447. * system, so we fail the allocation instead of entering
  3448. * direct reclaim.
  3449. */
  3450. if (compact_result == COMPACT_DEFERRED)
  3451. goto nopage;
  3452. /*
  3453. * Looks like reclaim/compaction is worth trying, but
  3454. * sync compaction could be very expensive, so keep
  3455. * using async compaction.
  3456. */
  3457. compact_priority = INIT_COMPACT_PRIORITY;
  3458. }
  3459. }
  3460. retry:
  3461. /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
  3462. if (gfp_mask & __GFP_KSWAPD_RECLAIM)
  3463. wake_all_kswapds(order, ac);
  3464. reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
  3465. if (reserve_flags)
  3466. alloc_flags = reserve_flags;
  3467. /*
  3468. * Reset the zonelist iterators if memory policies can be ignored.
  3469. * These allocations are high priority and system rather than user
  3470. * orientated.
  3471. */
  3472. if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
  3473. ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
  3474. ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  3475. ac->high_zoneidx, ac->nodemask);
  3476. }
  3477. /* Attempt with potentially adjusted zonelist and alloc_flags */
  3478. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3479. if (page)
  3480. goto got_pg;
  3481. /* Caller is not willing to reclaim, we can't balance anything */
  3482. if (!can_direct_reclaim)
  3483. goto nopage;
  3484. /* Avoid recursion of direct reclaim */
  3485. if (current->flags & PF_MEMALLOC)
  3486. goto nopage;
  3487. /* Try direct reclaim and then allocating */
  3488. page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
  3489. &did_some_progress);
  3490. if (page)
  3491. goto got_pg;
  3492. /* Try direct compaction and then allocating */
  3493. page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
  3494. compact_priority, &compact_result);
  3495. if (page)
  3496. goto got_pg;
  3497. /* Do not loop if specifically requested */
  3498. if (gfp_mask & __GFP_NORETRY)
  3499. goto nopage;
  3500. /*
  3501. * Do not retry costly high order allocations unless they are
  3502. * __GFP_RETRY_MAYFAIL
  3503. */
  3504. if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
  3505. goto nopage;
  3506. if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
  3507. did_some_progress > 0, &no_progress_loops))
  3508. goto retry;
  3509. /*
  3510. * It doesn't make any sense to retry for the compaction if the order-0
  3511. * reclaim is not able to make any progress because the current
  3512. * implementation of the compaction depends on the sufficient amount
  3513. * of free memory (see __compaction_suitable)
  3514. */
  3515. if (did_some_progress > 0 &&
  3516. should_compact_retry(ac, order, alloc_flags,
  3517. compact_result, &compact_priority,
  3518. &compaction_retries))
  3519. goto retry;
  3520. /* Deal with possible cpuset update races before we start OOM killing */
  3521. if (check_retry_cpuset(cpuset_mems_cookie, ac))
  3522. goto retry_cpuset;
  3523. /* Reclaim has failed us, start killing things */
  3524. page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
  3525. if (page)
  3526. goto got_pg;
  3527. /* Avoid allocations with no watermarks from looping endlessly */
  3528. if (tsk_is_oom_victim(current) &&
  3529. (alloc_flags == ALLOC_OOM ||
  3530. (gfp_mask & __GFP_NOMEMALLOC)))
  3531. goto nopage;
  3532. /* Retry as long as the OOM killer is making progress */
  3533. if (did_some_progress) {
  3534. no_progress_loops = 0;
  3535. goto retry;
  3536. }
  3537. nopage:
  3538. /* Deal with possible cpuset update races before we fail */
  3539. if (check_retry_cpuset(cpuset_mems_cookie, ac))
  3540. goto retry_cpuset;
  3541. /*
  3542. * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
  3543. * we always retry
  3544. */
  3545. if (gfp_mask & __GFP_NOFAIL) {
  3546. /*
  3547. * All existing users of the __GFP_NOFAIL are blockable, so warn
  3548. * of any new users that actually require GFP_NOWAIT
  3549. */
  3550. if (WARN_ON_ONCE(!can_direct_reclaim))
  3551. goto fail;
  3552. /*
  3553. * PF_MEMALLOC request from this context is rather bizarre
  3554. * because we cannot reclaim anything and only can loop waiting
  3555. * for somebody to do a work for us
  3556. */
  3557. WARN_ON_ONCE(current->flags & PF_MEMALLOC);
  3558. /*
  3559. * non failing costly orders are a hard requirement which we
  3560. * are not prepared for much so let's warn about these users
  3561. * so that we can identify them and convert them to something
  3562. * else.
  3563. */
  3564. WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
  3565. /*
  3566. * Help non-failing allocations by giving them access to memory
  3567. * reserves but do not use ALLOC_NO_WATERMARKS because this
  3568. * could deplete whole memory reserves which would just make
  3569. * the situation worse
  3570. */
  3571. page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
  3572. if (page)
  3573. goto got_pg;
  3574. cond_resched();
  3575. goto retry;
  3576. }
  3577. fail:
  3578. warn_alloc(gfp_mask, ac->nodemask,
  3579. "page allocation failure: order:%u", order);
  3580. got_pg:
  3581. return page;
  3582. }
  3583. static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
  3584. int preferred_nid, nodemask_t *nodemask,
  3585. struct alloc_context *ac, gfp_t *alloc_mask,
  3586. unsigned int *alloc_flags)
  3587. {
  3588. ac->high_zoneidx = gfp_zone(gfp_mask);
  3589. ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
  3590. ac->nodemask = nodemask;
  3591. ac->migratetype = gfpflags_to_migratetype(gfp_mask);
  3592. if (cpusets_enabled()) {
  3593. *alloc_mask |= __GFP_HARDWALL;
  3594. if (!ac->nodemask)
  3595. ac->nodemask = &cpuset_current_mems_allowed;
  3596. else
  3597. *alloc_flags |= ALLOC_CPUSET;
  3598. }
  3599. fs_reclaim_acquire(gfp_mask);
  3600. fs_reclaim_release(gfp_mask);
  3601. might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
  3602. if (should_fail_alloc_page(gfp_mask, order))
  3603. return false;
  3604. if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
  3605. *alloc_flags |= ALLOC_CMA;
  3606. return true;
  3607. }
  3608. /* Determine whether to spread dirty pages and what the first usable zone */
  3609. static inline void finalise_ac(gfp_t gfp_mask,
  3610. unsigned int order, struct alloc_context *ac)
  3611. {
  3612. /* Dirty zone balancing only done in the fast path */
  3613. ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
  3614. /*
  3615. * The preferred zone is used for statistics but crucially it is
  3616. * also used as the starting point for the zonelist iterator. It
  3617. * may get reset for allocations that ignore memory policies.
  3618. */
  3619. ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  3620. ac->high_zoneidx, ac->nodemask);
  3621. }
  3622. /*
  3623. * This is the 'heart' of the zoned buddy allocator.
  3624. */
  3625. struct page *
  3626. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
  3627. nodemask_t *nodemask)
  3628. {
  3629. struct page *page;
  3630. unsigned int alloc_flags = ALLOC_WMARK_LOW;
  3631. gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
  3632. struct alloc_context ac = { };
  3633. gfp_mask &= gfp_allowed_mask;
  3634. alloc_mask = gfp_mask;
  3635. if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
  3636. return NULL;
  3637. finalise_ac(gfp_mask, order, &ac);
  3638. /* First allocation attempt */
  3639. page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
  3640. if (likely(page))
  3641. goto out;
  3642. /*
  3643. * Apply scoped allocation constraints. This is mainly about GFP_NOFS
  3644. * resp. GFP_NOIO which has to be inherited for all allocation requests
  3645. * from a particular context which has been marked by
  3646. * memalloc_no{fs,io}_{save,restore}.
  3647. */
  3648. alloc_mask = current_gfp_context(gfp_mask);
  3649. ac.spread_dirty_pages = false;
  3650. /*
  3651. * Restore the original nodemask if it was potentially replaced with
  3652. * &cpuset_current_mems_allowed to optimize the fast-path attempt.
  3653. */
  3654. if (unlikely(ac.nodemask != nodemask))
  3655. ac.nodemask = nodemask;
  3656. page = __alloc_pages_slowpath(alloc_mask, order, &ac);
  3657. out:
  3658. if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
  3659. unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
  3660. __free_pages(page, order);
  3661. page = NULL;
  3662. }
  3663. trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
  3664. return page;
  3665. }
  3666. EXPORT_SYMBOL(__alloc_pages_nodemask);
  3667. /*
  3668. * Common helper functions.
  3669. */
  3670. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  3671. {
  3672. struct page *page;
  3673. /*
  3674. * __get_free_pages() returns a 32-bit address, which cannot represent
  3675. * a highmem page
  3676. */
  3677. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  3678. page = alloc_pages(gfp_mask, order);
  3679. if (!page)
  3680. return 0;
  3681. return (unsigned long) page_address(page);
  3682. }
  3683. EXPORT_SYMBOL(__get_free_pages);
  3684. unsigned long get_zeroed_page(gfp_t gfp_mask)
  3685. {
  3686. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  3687. }
  3688. EXPORT_SYMBOL(get_zeroed_page);
  3689. void __free_pages(struct page *page, unsigned int order)
  3690. {
  3691. if (put_page_testzero(page)) {
  3692. if (order == 0)
  3693. free_unref_page(page);
  3694. else
  3695. __free_pages_ok(page, order);
  3696. }
  3697. }
  3698. EXPORT_SYMBOL(__free_pages);
  3699. void free_pages(unsigned long addr, unsigned int order)
  3700. {
  3701. if (addr != 0) {
  3702. VM_BUG_ON(!virt_addr_valid((void *)addr));
  3703. __free_pages(virt_to_page((void *)addr), order);
  3704. }
  3705. }
  3706. EXPORT_SYMBOL(free_pages);
  3707. /*
  3708. * Page Fragment:
  3709. * An arbitrary-length arbitrary-offset area of memory which resides
  3710. * within a 0 or higher order page. Multiple fragments within that page
  3711. * are individually refcounted, in the page's reference counter.
  3712. *
  3713. * The page_frag functions below provide a simple allocation framework for
  3714. * page fragments. This is used by the network stack and network device
  3715. * drivers to provide a backing region of memory for use as either an
  3716. * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  3717. */
  3718. static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
  3719. gfp_t gfp_mask)
  3720. {
  3721. struct page *page = NULL;
  3722. gfp_t gfp = gfp_mask;
  3723. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3724. gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
  3725. __GFP_NOMEMALLOC;
  3726. page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
  3727. PAGE_FRAG_CACHE_MAX_ORDER);
  3728. nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
  3729. #endif
  3730. if (unlikely(!page))
  3731. page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
  3732. nc->va = page ? page_address(page) : NULL;
  3733. return page;
  3734. }
  3735. void __page_frag_cache_drain(struct page *page, unsigned int count)
  3736. {
  3737. VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
  3738. if (page_ref_sub_and_test(page, count)) {
  3739. unsigned int order = compound_order(page);
  3740. if (order == 0)
  3741. free_unref_page(page);
  3742. else
  3743. __free_pages_ok(page, order);
  3744. }
  3745. }
  3746. EXPORT_SYMBOL(__page_frag_cache_drain);
  3747. void *page_frag_alloc(struct page_frag_cache *nc,
  3748. unsigned int fragsz, gfp_t gfp_mask)
  3749. {
  3750. unsigned int size = PAGE_SIZE;
  3751. struct page *page;
  3752. int offset;
  3753. if (unlikely(!nc->va)) {
  3754. refill:
  3755. page = __page_frag_cache_refill(nc, gfp_mask);
  3756. if (!page)
  3757. return NULL;
  3758. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3759. /* if size can vary use size else just use PAGE_SIZE */
  3760. size = nc->size;
  3761. #endif
  3762. /* Even if we own the page, we do not use atomic_set().
  3763. * This would break get_page_unless_zero() users.
  3764. */
  3765. page_ref_add(page, size - 1);
  3766. /* reset page count bias and offset to start of new frag */
  3767. nc->pfmemalloc = page_is_pfmemalloc(page);
  3768. nc->pagecnt_bias = size;
  3769. nc->offset = size;
  3770. }
  3771. offset = nc->offset - fragsz;
  3772. if (unlikely(offset < 0)) {
  3773. page = virt_to_page(nc->va);
  3774. if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
  3775. goto refill;
  3776. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3777. /* if size can vary use size else just use PAGE_SIZE */
  3778. size = nc->size;
  3779. #endif
  3780. /* OK, page count is 0, we can safely set it */
  3781. set_page_count(page, size);
  3782. /* reset page count bias and offset to start of new frag */
  3783. nc->pagecnt_bias = size;
  3784. offset = size - fragsz;
  3785. }
  3786. nc->pagecnt_bias--;
  3787. nc->offset = offset;
  3788. return nc->va + offset;
  3789. }
  3790. EXPORT_SYMBOL(page_frag_alloc);
  3791. /*
  3792. * Frees a page fragment allocated out of either a compound or order 0 page.
  3793. */
  3794. void page_frag_free(void *addr)
  3795. {
  3796. struct page *page = virt_to_head_page(addr);
  3797. if (unlikely(put_page_testzero(page)))
  3798. __free_pages_ok(page, compound_order(page));
  3799. }
  3800. EXPORT_SYMBOL(page_frag_free);
  3801. static void *make_alloc_exact(unsigned long addr, unsigned int order,
  3802. size_t size)
  3803. {
  3804. if (addr) {
  3805. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  3806. unsigned long used = addr + PAGE_ALIGN(size);
  3807. split_page(virt_to_page((void *)addr), order);
  3808. while (used < alloc_end) {
  3809. free_page(used);
  3810. used += PAGE_SIZE;
  3811. }
  3812. }
  3813. return (void *)addr;
  3814. }
  3815. /**
  3816. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  3817. * @size: the number of bytes to allocate
  3818. * @gfp_mask: GFP flags for the allocation
  3819. *
  3820. * This function is similar to alloc_pages(), except that it allocates the
  3821. * minimum number of pages to satisfy the request. alloc_pages() can only
  3822. * allocate memory in power-of-two pages.
  3823. *
  3824. * This function is also limited by MAX_ORDER.
  3825. *
  3826. * Memory allocated by this function must be released by free_pages_exact().
  3827. */
  3828. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  3829. {
  3830. unsigned int order = get_order(size);
  3831. unsigned long addr;
  3832. addr = __get_free_pages(gfp_mask, order);
  3833. return make_alloc_exact(addr, order, size);
  3834. }
  3835. EXPORT_SYMBOL(alloc_pages_exact);
  3836. /**
  3837. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  3838. * pages on a node.
  3839. * @nid: the preferred node ID where memory should be allocated
  3840. * @size: the number of bytes to allocate
  3841. * @gfp_mask: GFP flags for the allocation
  3842. *
  3843. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  3844. * back.
  3845. */
  3846. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  3847. {
  3848. unsigned int order = get_order(size);
  3849. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  3850. if (!p)
  3851. return NULL;
  3852. return make_alloc_exact((unsigned long)page_address(p), order, size);
  3853. }
  3854. /**
  3855. * free_pages_exact - release memory allocated via alloc_pages_exact()
  3856. * @virt: the value returned by alloc_pages_exact.
  3857. * @size: size of allocation, same value as passed to alloc_pages_exact().
  3858. *
  3859. * Release the memory allocated by a previous call to alloc_pages_exact.
  3860. */
  3861. void free_pages_exact(void *virt, size_t size)
  3862. {
  3863. unsigned long addr = (unsigned long)virt;
  3864. unsigned long end = addr + PAGE_ALIGN(size);
  3865. while (addr < end) {
  3866. free_page(addr);
  3867. addr += PAGE_SIZE;
  3868. }
  3869. }
  3870. EXPORT_SYMBOL(free_pages_exact);
  3871. /**
  3872. * nr_free_zone_pages - count number of pages beyond high watermark
  3873. * @offset: The zone index of the highest zone
  3874. *
  3875. * nr_free_zone_pages() counts the number of counts pages which are beyond the
  3876. * high watermark within all zones at or below a given zone index. For each
  3877. * zone, the number of pages is calculated as:
  3878. *
  3879. * nr_free_zone_pages = managed_pages - high_pages
  3880. */
  3881. static unsigned long nr_free_zone_pages(int offset)
  3882. {
  3883. struct zoneref *z;
  3884. struct zone *zone;
  3885. /* Just pick one node, since fallback list is circular */
  3886. unsigned long sum = 0;
  3887. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  3888. for_each_zone_zonelist(zone, z, zonelist, offset) {
  3889. unsigned long size = zone->managed_pages;
  3890. unsigned long high = high_wmark_pages(zone);
  3891. if (size > high)
  3892. sum += size - high;
  3893. }
  3894. return sum;
  3895. }
  3896. /**
  3897. * nr_free_buffer_pages - count number of pages beyond high watermark
  3898. *
  3899. * nr_free_buffer_pages() counts the number of pages which are beyond the high
  3900. * watermark within ZONE_DMA and ZONE_NORMAL.
  3901. */
  3902. unsigned long nr_free_buffer_pages(void)
  3903. {
  3904. return nr_free_zone_pages(gfp_zone(GFP_USER));
  3905. }
  3906. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  3907. /**
  3908. * nr_free_pagecache_pages - count number of pages beyond high watermark
  3909. *
  3910. * nr_free_pagecache_pages() counts the number of pages which are beyond the
  3911. * high watermark within all zones.
  3912. */
  3913. unsigned long nr_free_pagecache_pages(void)
  3914. {
  3915. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  3916. }
  3917. static inline void show_node(struct zone *zone)
  3918. {
  3919. if (IS_ENABLED(CONFIG_NUMA))
  3920. printk("Node %d ", zone_to_nid(zone));
  3921. }
  3922. long si_mem_available(void)
  3923. {
  3924. long available;
  3925. unsigned long pagecache;
  3926. unsigned long wmark_low = 0;
  3927. unsigned long pages[NR_LRU_LISTS];
  3928. struct zone *zone;
  3929. int lru;
  3930. for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
  3931. pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
  3932. for_each_zone(zone)
  3933. wmark_low += zone->watermark[WMARK_LOW];
  3934. /*
  3935. * Estimate the amount of memory available for userspace allocations,
  3936. * without causing swapping.
  3937. */
  3938. available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
  3939. /*
  3940. * Not all the page cache can be freed, otherwise the system will
  3941. * start swapping. Assume at least half of the page cache, or the
  3942. * low watermark worth of cache, needs to stay.
  3943. */
  3944. pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
  3945. pagecache -= min(pagecache / 2, wmark_low);
  3946. available += pagecache;
  3947. /*
  3948. * Part of the reclaimable slab consists of items that are in use,
  3949. * and cannot be freed. Cap this estimate at the low watermark.
  3950. */
  3951. available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
  3952. min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
  3953. wmark_low);
  3954. if (available < 0)
  3955. available = 0;
  3956. return available;
  3957. }
  3958. EXPORT_SYMBOL_GPL(si_mem_available);
  3959. void si_meminfo(struct sysinfo *val)
  3960. {
  3961. val->totalram = totalram_pages;
  3962. val->sharedram = global_node_page_state(NR_SHMEM);
  3963. val->freeram = global_zone_page_state(NR_FREE_PAGES);
  3964. val->bufferram = nr_blockdev_pages();
  3965. val->totalhigh = totalhigh_pages;
  3966. val->freehigh = nr_free_highpages();
  3967. val->mem_unit = PAGE_SIZE;
  3968. }
  3969. EXPORT_SYMBOL(si_meminfo);
  3970. #ifdef CONFIG_NUMA
  3971. void si_meminfo_node(struct sysinfo *val, int nid)
  3972. {
  3973. int zone_type; /* needs to be signed */
  3974. unsigned long managed_pages = 0;
  3975. unsigned long managed_highpages = 0;
  3976. unsigned long free_highpages = 0;
  3977. pg_data_t *pgdat = NODE_DATA(nid);
  3978. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
  3979. managed_pages += pgdat->node_zones[zone_type].managed_pages;
  3980. val->totalram = managed_pages;
  3981. val->sharedram = node_page_state(pgdat, NR_SHMEM);
  3982. val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
  3983. #ifdef CONFIG_HIGHMEM
  3984. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  3985. struct zone *zone = &pgdat->node_zones[zone_type];
  3986. if (is_highmem(zone)) {
  3987. managed_highpages += zone->managed_pages;
  3988. free_highpages += zone_page_state(zone, NR_FREE_PAGES);
  3989. }
  3990. }
  3991. val->totalhigh = managed_highpages;
  3992. val->freehigh = free_highpages;
  3993. #else
  3994. val->totalhigh = managed_highpages;
  3995. val->freehigh = free_highpages;
  3996. #endif
  3997. val->mem_unit = PAGE_SIZE;
  3998. }
  3999. #endif
  4000. /*
  4001. * Determine whether the node should be displayed or not, depending on whether
  4002. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  4003. */
  4004. static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
  4005. {
  4006. if (!(flags & SHOW_MEM_FILTER_NODES))
  4007. return false;
  4008. /*
  4009. * no node mask - aka implicit memory numa policy. Do not bother with
  4010. * the synchronization - read_mems_allowed_begin - because we do not
  4011. * have to be precise here.
  4012. */
  4013. if (!nodemask)
  4014. nodemask = &cpuset_current_mems_allowed;
  4015. return !node_isset(nid, *nodemask);
  4016. }
  4017. #define K(x) ((x) << (PAGE_SHIFT-10))
  4018. static void show_migration_types(unsigned char type)
  4019. {
  4020. static const char types[MIGRATE_TYPES] = {
  4021. [MIGRATE_UNMOVABLE] = 'U',
  4022. [MIGRATE_MOVABLE] = 'M',
  4023. [MIGRATE_RECLAIMABLE] = 'E',
  4024. [MIGRATE_HIGHATOMIC] = 'H',
  4025. #ifdef CONFIG_CMA
  4026. [MIGRATE_CMA] = 'C',
  4027. #endif
  4028. #ifdef CONFIG_MEMORY_ISOLATION
  4029. [MIGRATE_ISOLATE] = 'I',
  4030. #endif
  4031. };
  4032. char tmp[MIGRATE_TYPES + 1];
  4033. char *p = tmp;
  4034. int i;
  4035. for (i = 0; i < MIGRATE_TYPES; i++) {
  4036. if (type & (1 << i))
  4037. *p++ = types[i];
  4038. }
  4039. *p = '\0';
  4040. printk(KERN_CONT "(%s) ", tmp);
  4041. }
  4042. /*
  4043. * Show free area list (used inside shift_scroll-lock stuff)
  4044. * We also calculate the percentage fragmentation. We do this by counting the
  4045. * memory on each free list with the exception of the first item on the list.
  4046. *
  4047. * Bits in @filter:
  4048. * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
  4049. * cpuset.
  4050. */
  4051. void show_free_areas(unsigned int filter, nodemask_t *nodemask)
  4052. {
  4053. unsigned long free_pcp = 0;
  4054. int cpu;
  4055. struct zone *zone;
  4056. pg_data_t *pgdat;
  4057. for_each_populated_zone(zone) {
  4058. if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
  4059. continue;
  4060. for_each_online_cpu(cpu)
  4061. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  4062. }
  4063. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  4064. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  4065. " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
  4066. " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  4067. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
  4068. " free:%lu free_pcp:%lu free_cma:%lu\n",
  4069. global_node_page_state(NR_ACTIVE_ANON),
  4070. global_node_page_state(NR_INACTIVE_ANON),
  4071. global_node_page_state(NR_ISOLATED_ANON),
  4072. global_node_page_state(NR_ACTIVE_FILE),
  4073. global_node_page_state(NR_INACTIVE_FILE),
  4074. global_node_page_state(NR_ISOLATED_FILE),
  4075. global_node_page_state(NR_UNEVICTABLE),
  4076. global_node_page_state(NR_FILE_DIRTY),
  4077. global_node_page_state(NR_WRITEBACK),
  4078. global_node_page_state(NR_UNSTABLE_NFS),
  4079. global_node_page_state(NR_SLAB_RECLAIMABLE),
  4080. global_node_page_state(NR_SLAB_UNRECLAIMABLE),
  4081. global_node_page_state(NR_FILE_MAPPED),
  4082. global_node_page_state(NR_SHMEM),
  4083. global_zone_page_state(NR_PAGETABLE),
  4084. global_zone_page_state(NR_BOUNCE),
  4085. global_zone_page_state(NR_FREE_PAGES),
  4086. free_pcp,
  4087. global_zone_page_state(NR_FREE_CMA_PAGES));
  4088. for_each_online_pgdat(pgdat) {
  4089. if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
  4090. continue;
  4091. printk("Node %d"
  4092. " active_anon:%lukB"
  4093. " inactive_anon:%lukB"
  4094. " active_file:%lukB"
  4095. " inactive_file:%lukB"
  4096. " unevictable:%lukB"
  4097. " isolated(anon):%lukB"
  4098. " isolated(file):%lukB"
  4099. " mapped:%lukB"
  4100. " dirty:%lukB"
  4101. " writeback:%lukB"
  4102. " shmem:%lukB"
  4103. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4104. " shmem_thp: %lukB"
  4105. " shmem_pmdmapped: %lukB"
  4106. " anon_thp: %lukB"
  4107. #endif
  4108. " writeback_tmp:%lukB"
  4109. " unstable:%lukB"
  4110. " all_unreclaimable? %s"
  4111. "\n",
  4112. pgdat->node_id,
  4113. K(node_page_state(pgdat, NR_ACTIVE_ANON)),
  4114. K(node_page_state(pgdat, NR_INACTIVE_ANON)),
  4115. K(node_page_state(pgdat, NR_ACTIVE_FILE)),
  4116. K(node_page_state(pgdat, NR_INACTIVE_FILE)),
  4117. K(node_page_state(pgdat, NR_UNEVICTABLE)),
  4118. K(node_page_state(pgdat, NR_ISOLATED_ANON)),
  4119. K(node_page_state(pgdat, NR_ISOLATED_FILE)),
  4120. K(node_page_state(pgdat, NR_FILE_MAPPED)),
  4121. K(node_page_state(pgdat, NR_FILE_DIRTY)),
  4122. K(node_page_state(pgdat, NR_WRITEBACK)),
  4123. K(node_page_state(pgdat, NR_SHMEM)),
  4124. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4125. K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
  4126. K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
  4127. * HPAGE_PMD_NR),
  4128. K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
  4129. #endif
  4130. K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
  4131. K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
  4132. pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
  4133. "yes" : "no");
  4134. }
  4135. for_each_populated_zone(zone) {
  4136. int i;
  4137. if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
  4138. continue;
  4139. free_pcp = 0;
  4140. for_each_online_cpu(cpu)
  4141. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  4142. show_node(zone);
  4143. printk(KERN_CONT
  4144. "%s"
  4145. " free:%lukB"
  4146. " min:%lukB"
  4147. " low:%lukB"
  4148. " high:%lukB"
  4149. " active_anon:%lukB"
  4150. " inactive_anon:%lukB"
  4151. " active_file:%lukB"
  4152. " inactive_file:%lukB"
  4153. " unevictable:%lukB"
  4154. " writepending:%lukB"
  4155. " present:%lukB"
  4156. " managed:%lukB"
  4157. " mlocked:%lukB"
  4158. " kernel_stack:%lukB"
  4159. " pagetables:%lukB"
  4160. " bounce:%lukB"
  4161. " free_pcp:%lukB"
  4162. " local_pcp:%ukB"
  4163. " free_cma:%lukB"
  4164. "\n",
  4165. zone->name,
  4166. K(zone_page_state(zone, NR_FREE_PAGES)),
  4167. K(min_wmark_pages(zone)),
  4168. K(low_wmark_pages(zone)),
  4169. K(high_wmark_pages(zone)),
  4170. K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
  4171. K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
  4172. K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
  4173. K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
  4174. K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
  4175. K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
  4176. K(zone->present_pages),
  4177. K(zone->managed_pages),
  4178. K(zone_page_state(zone, NR_MLOCK)),
  4179. zone_page_state(zone, NR_KERNEL_STACK_KB),
  4180. K(zone_page_state(zone, NR_PAGETABLE)),
  4181. K(zone_page_state(zone, NR_BOUNCE)),
  4182. K(free_pcp),
  4183. K(this_cpu_read(zone->pageset->pcp.count)),
  4184. K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
  4185. printk("lowmem_reserve[]:");
  4186. for (i = 0; i < MAX_NR_ZONES; i++)
  4187. printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
  4188. printk(KERN_CONT "\n");
  4189. }
  4190. for_each_populated_zone(zone) {
  4191. unsigned int order;
  4192. unsigned long nr[MAX_ORDER], flags, total = 0;
  4193. unsigned char types[MAX_ORDER];
  4194. if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
  4195. continue;
  4196. show_node(zone);
  4197. printk(KERN_CONT "%s: ", zone->name);
  4198. spin_lock_irqsave(&zone->lock, flags);
  4199. for (order = 0; order < MAX_ORDER; order++) {
  4200. struct free_area *area = &zone->free_area[order];
  4201. int type;
  4202. nr[order] = area->nr_free;
  4203. total += nr[order] << order;
  4204. types[order] = 0;
  4205. for (type = 0; type < MIGRATE_TYPES; type++) {
  4206. if (!list_empty(&area->free_list[type]))
  4207. types[order] |= 1 << type;
  4208. }
  4209. }
  4210. spin_unlock_irqrestore(&zone->lock, flags);
  4211. for (order = 0; order < MAX_ORDER; order++) {
  4212. printk(KERN_CONT "%lu*%lukB ",
  4213. nr[order], K(1UL) << order);
  4214. if (nr[order])
  4215. show_migration_types(types[order]);
  4216. }
  4217. printk(KERN_CONT "= %lukB\n", K(total));
  4218. }
  4219. hugetlb_show_meminfo();
  4220. printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
  4221. show_swap_cache_info();
  4222. }
  4223. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  4224. {
  4225. zoneref->zone = zone;
  4226. zoneref->zone_idx = zone_idx(zone);
  4227. }
  4228. /*
  4229. * Builds allocation fallback zone lists.
  4230. *
  4231. * Add all populated zones of a node to the zonelist.
  4232. */
  4233. static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
  4234. {
  4235. struct zone *zone;
  4236. enum zone_type zone_type = MAX_NR_ZONES;
  4237. int nr_zones = 0;
  4238. do {
  4239. zone_type--;
  4240. zone = pgdat->node_zones + zone_type;
  4241. if (managed_zone(zone)) {
  4242. zoneref_set_zone(zone, &zonerefs[nr_zones++]);
  4243. check_highest_zone(zone_type);
  4244. }
  4245. } while (zone_type);
  4246. return nr_zones;
  4247. }
  4248. #ifdef CONFIG_NUMA
  4249. static int __parse_numa_zonelist_order(char *s)
  4250. {
  4251. /*
  4252. * We used to support different zonlists modes but they turned
  4253. * out to be just not useful. Let's keep the warning in place
  4254. * if somebody still use the cmd line parameter so that we do
  4255. * not fail it silently
  4256. */
  4257. if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
  4258. pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
  4259. return -EINVAL;
  4260. }
  4261. return 0;
  4262. }
  4263. static __init int setup_numa_zonelist_order(char *s)
  4264. {
  4265. if (!s)
  4266. return 0;
  4267. return __parse_numa_zonelist_order(s);
  4268. }
  4269. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  4270. char numa_zonelist_order[] = "Node";
  4271. /*
  4272. * sysctl handler for numa_zonelist_order
  4273. */
  4274. int numa_zonelist_order_handler(struct ctl_table *table, int write,
  4275. void __user *buffer, size_t *length,
  4276. loff_t *ppos)
  4277. {
  4278. char *str;
  4279. int ret;
  4280. if (!write)
  4281. return proc_dostring(table, write, buffer, length, ppos);
  4282. str = memdup_user_nul(buffer, 16);
  4283. if (IS_ERR(str))
  4284. return PTR_ERR(str);
  4285. ret = __parse_numa_zonelist_order(str);
  4286. kfree(str);
  4287. return ret;
  4288. }
  4289. #define MAX_NODE_LOAD (nr_online_nodes)
  4290. static int node_load[MAX_NUMNODES];
  4291. /**
  4292. * find_next_best_node - find the next node that should appear in a given node's fallback list
  4293. * @node: node whose fallback list we're appending
  4294. * @used_node_mask: nodemask_t of already used nodes
  4295. *
  4296. * We use a number of factors to determine which is the next node that should
  4297. * appear on a given node's fallback list. The node should not have appeared
  4298. * already in @node's fallback list, and it should be the next closest node
  4299. * according to the distance array (which contains arbitrary distance values
  4300. * from each node to each node in the system), and should also prefer nodes
  4301. * with no CPUs, since presumably they'll have very little allocation pressure
  4302. * on them otherwise.
  4303. * It returns -1 if no node is found.
  4304. */
  4305. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  4306. {
  4307. int n, val;
  4308. int min_val = INT_MAX;
  4309. int best_node = NUMA_NO_NODE;
  4310. const struct cpumask *tmp = cpumask_of_node(0);
  4311. /* Use the local node if we haven't already */
  4312. if (!node_isset(node, *used_node_mask)) {
  4313. node_set(node, *used_node_mask);
  4314. return node;
  4315. }
  4316. for_each_node_state(n, N_MEMORY) {
  4317. /* Don't want a node to appear more than once */
  4318. if (node_isset(n, *used_node_mask))
  4319. continue;
  4320. /* Use the distance array to find the distance */
  4321. val = node_distance(node, n);
  4322. /* Penalize nodes under us ("prefer the next node") */
  4323. val += (n < node);
  4324. /* Give preference to headless and unused nodes */
  4325. tmp = cpumask_of_node(n);
  4326. if (!cpumask_empty(tmp))
  4327. val += PENALTY_FOR_NODE_WITH_CPUS;
  4328. /* Slight preference for less loaded node */
  4329. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  4330. val += node_load[n];
  4331. if (val < min_val) {
  4332. min_val = val;
  4333. best_node = n;
  4334. }
  4335. }
  4336. if (best_node >= 0)
  4337. node_set(best_node, *used_node_mask);
  4338. return best_node;
  4339. }
  4340. /*
  4341. * Build zonelists ordered by node and zones within node.
  4342. * This results in maximum locality--normal zone overflows into local
  4343. * DMA zone, if any--but risks exhausting DMA zone.
  4344. */
  4345. static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
  4346. unsigned nr_nodes)
  4347. {
  4348. struct zoneref *zonerefs;
  4349. int i;
  4350. zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
  4351. for (i = 0; i < nr_nodes; i++) {
  4352. int nr_zones;
  4353. pg_data_t *node = NODE_DATA(node_order[i]);
  4354. nr_zones = build_zonerefs_node(node, zonerefs);
  4355. zonerefs += nr_zones;
  4356. }
  4357. zonerefs->zone = NULL;
  4358. zonerefs->zone_idx = 0;
  4359. }
  4360. /*
  4361. * Build gfp_thisnode zonelists
  4362. */
  4363. static void build_thisnode_zonelists(pg_data_t *pgdat)
  4364. {
  4365. struct zoneref *zonerefs;
  4366. int nr_zones;
  4367. zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
  4368. nr_zones = build_zonerefs_node(pgdat, zonerefs);
  4369. zonerefs += nr_zones;
  4370. zonerefs->zone = NULL;
  4371. zonerefs->zone_idx = 0;
  4372. }
  4373. /*
  4374. * Build zonelists ordered by zone and nodes within zones.
  4375. * This results in conserving DMA zone[s] until all Normal memory is
  4376. * exhausted, but results in overflowing to remote node while memory
  4377. * may still exist in local DMA zone.
  4378. */
  4379. static void build_zonelists(pg_data_t *pgdat)
  4380. {
  4381. static int node_order[MAX_NUMNODES];
  4382. int node, load, nr_nodes = 0;
  4383. nodemask_t used_mask;
  4384. int local_node, prev_node;
  4385. /* NUMA-aware ordering of nodes */
  4386. local_node = pgdat->node_id;
  4387. load = nr_online_nodes;
  4388. prev_node = local_node;
  4389. nodes_clear(used_mask);
  4390. memset(node_order, 0, sizeof(node_order));
  4391. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  4392. /*
  4393. * We don't want to pressure a particular node.
  4394. * So adding penalty to the first node in same
  4395. * distance group to make it round-robin.
  4396. */
  4397. if (node_distance(local_node, node) !=
  4398. node_distance(local_node, prev_node))
  4399. node_load[node] = load;
  4400. node_order[nr_nodes++] = node;
  4401. prev_node = node;
  4402. load--;
  4403. }
  4404. build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
  4405. build_thisnode_zonelists(pgdat);
  4406. }
  4407. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  4408. /*
  4409. * Return node id of node used for "local" allocations.
  4410. * I.e., first node id of first zone in arg node's generic zonelist.
  4411. * Used for initializing percpu 'numa_mem', which is used primarily
  4412. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  4413. */
  4414. int local_memory_node(int node)
  4415. {
  4416. struct zoneref *z;
  4417. z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  4418. gfp_zone(GFP_KERNEL),
  4419. NULL);
  4420. return z->zone->node;
  4421. }
  4422. #endif
  4423. static void setup_min_unmapped_ratio(void);
  4424. static void setup_min_slab_ratio(void);
  4425. #else /* CONFIG_NUMA */
  4426. static void build_zonelists(pg_data_t *pgdat)
  4427. {
  4428. int node, local_node;
  4429. struct zoneref *zonerefs;
  4430. int nr_zones;
  4431. local_node = pgdat->node_id;
  4432. zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
  4433. nr_zones = build_zonerefs_node(pgdat, zonerefs);
  4434. zonerefs += nr_zones;
  4435. /*
  4436. * Now we build the zonelist so that it contains the zones
  4437. * of all the other nodes.
  4438. * We don't want to pressure a particular node, so when
  4439. * building the zones for node N, we make sure that the
  4440. * zones coming right after the local ones are those from
  4441. * node N+1 (modulo N)
  4442. */
  4443. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  4444. if (!node_online(node))
  4445. continue;
  4446. nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
  4447. zonerefs += nr_zones;
  4448. }
  4449. for (node = 0; node < local_node; node++) {
  4450. if (!node_online(node))
  4451. continue;
  4452. nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
  4453. zonerefs += nr_zones;
  4454. }
  4455. zonerefs->zone = NULL;
  4456. zonerefs->zone_idx = 0;
  4457. }
  4458. #endif /* CONFIG_NUMA */
  4459. /*
  4460. * Boot pageset table. One per cpu which is going to be used for all
  4461. * zones and all nodes. The parameters will be set in such a way
  4462. * that an item put on a list will immediately be handed over to
  4463. * the buddy list. This is safe since pageset manipulation is done
  4464. * with interrupts disabled.
  4465. *
  4466. * The boot_pagesets must be kept even after bootup is complete for
  4467. * unused processors and/or zones. They do play a role for bootstrapping
  4468. * hotplugged processors.
  4469. *
  4470. * zoneinfo_show() and maybe other functions do
  4471. * not check if the processor is online before following the pageset pointer.
  4472. * Other parts of the kernel may not check if the zone is available.
  4473. */
  4474. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  4475. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  4476. static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
  4477. static void __build_all_zonelists(void *data)
  4478. {
  4479. int nid;
  4480. int __maybe_unused cpu;
  4481. pg_data_t *self = data;
  4482. static DEFINE_SPINLOCK(lock);
  4483. spin_lock(&lock);
  4484. #ifdef CONFIG_NUMA
  4485. memset(node_load, 0, sizeof(node_load));
  4486. #endif
  4487. /*
  4488. * This node is hotadded and no memory is yet present. So just
  4489. * building zonelists is fine - no need to touch other nodes.
  4490. */
  4491. if (self && !node_online(self->node_id)) {
  4492. build_zonelists(self);
  4493. } else {
  4494. for_each_online_node(nid) {
  4495. pg_data_t *pgdat = NODE_DATA(nid);
  4496. build_zonelists(pgdat);
  4497. }
  4498. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  4499. /*
  4500. * We now know the "local memory node" for each node--
  4501. * i.e., the node of the first zone in the generic zonelist.
  4502. * Set up numa_mem percpu variable for on-line cpus. During
  4503. * boot, only the boot cpu should be on-line; we'll init the
  4504. * secondary cpus' numa_mem as they come on-line. During
  4505. * node/memory hotplug, we'll fixup all on-line cpus.
  4506. */
  4507. for_each_online_cpu(cpu)
  4508. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  4509. #endif
  4510. }
  4511. spin_unlock(&lock);
  4512. }
  4513. static noinline void __init
  4514. build_all_zonelists_init(void)
  4515. {
  4516. int cpu;
  4517. __build_all_zonelists(NULL);
  4518. /*
  4519. * Initialize the boot_pagesets that are going to be used
  4520. * for bootstrapping processors. The real pagesets for
  4521. * each zone will be allocated later when the per cpu
  4522. * allocator is available.
  4523. *
  4524. * boot_pagesets are used also for bootstrapping offline
  4525. * cpus if the system is already booted because the pagesets
  4526. * are needed to initialize allocators on a specific cpu too.
  4527. * F.e. the percpu allocator needs the page allocator which
  4528. * needs the percpu allocator in order to allocate its pagesets
  4529. * (a chicken-egg dilemma).
  4530. */
  4531. for_each_possible_cpu(cpu)
  4532. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  4533. mminit_verify_zonelist();
  4534. cpuset_init_current_mems_allowed();
  4535. }
  4536. /*
  4537. * unless system_state == SYSTEM_BOOTING.
  4538. *
  4539. * __ref due to call of __init annotated helper build_all_zonelists_init
  4540. * [protected by SYSTEM_BOOTING].
  4541. */
  4542. void __ref build_all_zonelists(pg_data_t *pgdat)
  4543. {
  4544. if (system_state == SYSTEM_BOOTING) {
  4545. build_all_zonelists_init();
  4546. } else {
  4547. __build_all_zonelists(pgdat);
  4548. /* cpuset refresh routine should be here */
  4549. }
  4550. vm_total_pages = nr_free_pagecache_pages();
  4551. /*
  4552. * Disable grouping by mobility if the number of pages in the
  4553. * system is too low to allow the mechanism to work. It would be
  4554. * more accurate, but expensive to check per-zone. This check is
  4555. * made on memory-hotadd so a system can start with mobility
  4556. * disabled and enable it later
  4557. */
  4558. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  4559. page_group_by_mobility_disabled = 1;
  4560. else
  4561. page_group_by_mobility_disabled = 0;
  4562. pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n",
  4563. nr_online_nodes,
  4564. page_group_by_mobility_disabled ? "off" : "on",
  4565. vm_total_pages);
  4566. #ifdef CONFIG_NUMA
  4567. pr_info("Policy zone: %s\n", zone_names[policy_zone]);
  4568. #endif
  4569. }
  4570. /*
  4571. * Initially all pages are reserved - free ones are freed
  4572. * up by free_all_bootmem() once the early boot process is
  4573. * done. Non-atomic initialization, single-pass.
  4574. */
  4575. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  4576. unsigned long start_pfn, enum memmap_context context)
  4577. {
  4578. struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
  4579. unsigned long end_pfn = start_pfn + size;
  4580. pg_data_t *pgdat = NODE_DATA(nid);
  4581. unsigned long pfn;
  4582. unsigned long nr_initialised = 0;
  4583. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4584. struct memblock_region *r = NULL, *tmp;
  4585. #endif
  4586. if (highest_memmap_pfn < end_pfn - 1)
  4587. highest_memmap_pfn = end_pfn - 1;
  4588. /*
  4589. * Honor reservation requested by the driver for this ZONE_DEVICE
  4590. * memory
  4591. */
  4592. if (altmap && start_pfn == altmap->base_pfn)
  4593. start_pfn += altmap->reserve;
  4594. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  4595. /*
  4596. * There can be holes in boot-time mem_map[]s handed to this
  4597. * function. They do not exist on hotplugged memory.
  4598. */
  4599. if (context != MEMMAP_EARLY)
  4600. goto not_early;
  4601. if (!early_pfn_valid(pfn)) {
  4602. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4603. /*
  4604. * Skip to the pfn preceding the next valid one (or
  4605. * end_pfn), such that we hit a valid pfn (or end_pfn)
  4606. * on our next iteration of the loop.
  4607. */
  4608. pfn = memblock_next_valid_pfn(pfn, end_pfn) - 1;
  4609. #endif
  4610. continue;
  4611. }
  4612. if (!early_pfn_in_nid(pfn, nid))
  4613. continue;
  4614. if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
  4615. break;
  4616. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4617. /*
  4618. * Check given memblock attribute by firmware which can affect
  4619. * kernel memory layout. If zone==ZONE_MOVABLE but memory is
  4620. * mirrored, it's an overlapped memmap init. skip it.
  4621. */
  4622. if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
  4623. if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
  4624. for_each_memblock(memory, tmp)
  4625. if (pfn < memblock_region_memory_end_pfn(tmp))
  4626. break;
  4627. r = tmp;
  4628. }
  4629. if (pfn >= memblock_region_memory_base_pfn(r) &&
  4630. memblock_is_mirror(r)) {
  4631. /* already initialized as NORMAL */
  4632. pfn = memblock_region_memory_end_pfn(r);
  4633. continue;
  4634. }
  4635. }
  4636. #endif
  4637. not_early:
  4638. /*
  4639. * Mark the block movable so that blocks are reserved for
  4640. * movable at startup. This will force kernel allocations
  4641. * to reserve their blocks rather than leaking throughout
  4642. * the address space during boot when many long-lived
  4643. * kernel allocations are made.
  4644. *
  4645. * bitmap is created for zone's valid pfn range. but memmap
  4646. * can be created for invalid pages (for alignment)
  4647. * check here not to call set_pageblock_migratetype() against
  4648. * pfn out of zone.
  4649. */
  4650. if (!(pfn & (pageblock_nr_pages - 1))) {
  4651. struct page *page = pfn_to_page(pfn);
  4652. __init_single_page(page, pfn, zone, nid);
  4653. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4654. cond_resched();
  4655. } else {
  4656. __init_single_pfn(pfn, zone, nid);
  4657. }
  4658. }
  4659. }
  4660. static void __meminit zone_init_free_lists(struct zone *zone)
  4661. {
  4662. unsigned int order, t;
  4663. for_each_migratetype_order(order, t) {
  4664. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  4665. zone->free_area[order].nr_free = 0;
  4666. }
  4667. }
  4668. #ifndef __HAVE_ARCH_MEMMAP_INIT
  4669. #define memmap_init(size, nid, zone, start_pfn) \
  4670. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  4671. #endif
  4672. static int zone_batchsize(struct zone *zone)
  4673. {
  4674. #ifdef CONFIG_MMU
  4675. int batch;
  4676. /*
  4677. * The per-cpu-pages pools are set to around 1000th of the
  4678. * size of the zone. But no more than 1/2 of a meg.
  4679. *
  4680. * OK, so we don't know how big the cache is. So guess.
  4681. */
  4682. batch = zone->managed_pages / 1024;
  4683. if (batch * PAGE_SIZE > 512 * 1024)
  4684. batch = (512 * 1024) / PAGE_SIZE;
  4685. batch /= 4; /* We effectively *= 4 below */
  4686. if (batch < 1)
  4687. batch = 1;
  4688. /*
  4689. * Clamp the batch to a 2^n - 1 value. Having a power
  4690. * of 2 value was found to be more likely to have
  4691. * suboptimal cache aliasing properties in some cases.
  4692. *
  4693. * For example if 2 tasks are alternately allocating
  4694. * batches of pages, one task can end up with a lot
  4695. * of pages of one half of the possible page colors
  4696. * and the other with pages of the other colors.
  4697. */
  4698. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  4699. return batch;
  4700. #else
  4701. /* The deferral and batching of frees should be suppressed under NOMMU
  4702. * conditions.
  4703. *
  4704. * The problem is that NOMMU needs to be able to allocate large chunks
  4705. * of contiguous memory as there's no hardware page translation to
  4706. * assemble apparent contiguous memory from discontiguous pages.
  4707. *
  4708. * Queueing large contiguous runs of pages for batching, however,
  4709. * causes the pages to actually be freed in smaller chunks. As there
  4710. * can be a significant delay between the individual batches being
  4711. * recycled, this leads to the once large chunks of space being
  4712. * fragmented and becoming unavailable for high-order allocations.
  4713. */
  4714. return 0;
  4715. #endif
  4716. }
  4717. /*
  4718. * pcp->high and pcp->batch values are related and dependent on one another:
  4719. * ->batch must never be higher then ->high.
  4720. * The following function updates them in a safe manner without read side
  4721. * locking.
  4722. *
  4723. * Any new users of pcp->batch and pcp->high should ensure they can cope with
  4724. * those fields changing asynchronously (acording the the above rule).
  4725. *
  4726. * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  4727. * outside of boot time (or some other assurance that no concurrent updaters
  4728. * exist).
  4729. */
  4730. static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
  4731. unsigned long batch)
  4732. {
  4733. /* start with a fail safe value for batch */
  4734. pcp->batch = 1;
  4735. smp_wmb();
  4736. /* Update high, then batch, in order */
  4737. pcp->high = high;
  4738. smp_wmb();
  4739. pcp->batch = batch;
  4740. }
  4741. /* a companion to pageset_set_high() */
  4742. static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
  4743. {
  4744. pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
  4745. }
  4746. static void pageset_init(struct per_cpu_pageset *p)
  4747. {
  4748. struct per_cpu_pages *pcp;
  4749. int migratetype;
  4750. memset(p, 0, sizeof(*p));
  4751. pcp = &p->pcp;
  4752. pcp->count = 0;
  4753. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  4754. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  4755. }
  4756. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  4757. {
  4758. pageset_init(p);
  4759. pageset_set_batch(p, batch);
  4760. }
  4761. /*
  4762. * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
  4763. * to the value high for the pageset p.
  4764. */
  4765. static void pageset_set_high(struct per_cpu_pageset *p,
  4766. unsigned long high)
  4767. {
  4768. unsigned long batch = max(1UL, high / 4);
  4769. if ((high / 4) > (PAGE_SHIFT * 8))
  4770. batch = PAGE_SHIFT * 8;
  4771. pageset_update(&p->pcp, high, batch);
  4772. }
  4773. static void pageset_set_high_and_batch(struct zone *zone,
  4774. struct per_cpu_pageset *pcp)
  4775. {
  4776. if (percpu_pagelist_fraction)
  4777. pageset_set_high(pcp,
  4778. (zone->managed_pages /
  4779. percpu_pagelist_fraction));
  4780. else
  4781. pageset_set_batch(pcp, zone_batchsize(zone));
  4782. }
  4783. static void __meminit zone_pageset_init(struct zone *zone, int cpu)
  4784. {
  4785. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  4786. pageset_init(pcp);
  4787. pageset_set_high_and_batch(zone, pcp);
  4788. }
  4789. void __meminit setup_zone_pageset(struct zone *zone)
  4790. {
  4791. int cpu;
  4792. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  4793. for_each_possible_cpu(cpu)
  4794. zone_pageset_init(zone, cpu);
  4795. }
  4796. /*
  4797. * Allocate per cpu pagesets and initialize them.
  4798. * Before this call only boot pagesets were available.
  4799. */
  4800. void __init setup_per_cpu_pageset(void)
  4801. {
  4802. struct pglist_data *pgdat;
  4803. struct zone *zone;
  4804. for_each_populated_zone(zone)
  4805. setup_zone_pageset(zone);
  4806. for_each_online_pgdat(pgdat)
  4807. pgdat->per_cpu_nodestats =
  4808. alloc_percpu(struct per_cpu_nodestat);
  4809. }
  4810. static __meminit void zone_pcp_init(struct zone *zone)
  4811. {
  4812. /*
  4813. * per cpu subsystem is not up at this point. The following code
  4814. * relies on the ability of the linker to provide the
  4815. * offset of a (static) per cpu variable into the per cpu area.
  4816. */
  4817. zone->pageset = &boot_pageset;
  4818. if (populated_zone(zone))
  4819. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  4820. zone->name, zone->present_pages,
  4821. zone_batchsize(zone));
  4822. }
  4823. void __meminit init_currently_empty_zone(struct zone *zone,
  4824. unsigned long zone_start_pfn,
  4825. unsigned long size)
  4826. {
  4827. struct pglist_data *pgdat = zone->zone_pgdat;
  4828. pgdat->nr_zones = zone_idx(zone) + 1;
  4829. zone->zone_start_pfn = zone_start_pfn;
  4830. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  4831. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  4832. pgdat->node_id,
  4833. (unsigned long)zone_idx(zone),
  4834. zone_start_pfn, (zone_start_pfn + size));
  4835. zone_init_free_lists(zone);
  4836. zone->initialized = 1;
  4837. }
  4838. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4839. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  4840. /*
  4841. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  4842. */
  4843. int __meminit __early_pfn_to_nid(unsigned long pfn,
  4844. struct mminit_pfnnid_cache *state)
  4845. {
  4846. unsigned long start_pfn, end_pfn;
  4847. int nid;
  4848. if (state->last_start <= pfn && pfn < state->last_end)
  4849. return state->last_nid;
  4850. nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
  4851. if (nid != -1) {
  4852. state->last_start = start_pfn;
  4853. state->last_end = end_pfn;
  4854. state->last_nid = nid;
  4855. }
  4856. return nid;
  4857. }
  4858. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  4859. /**
  4860. * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  4861. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  4862. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
  4863. *
  4864. * If an architecture guarantees that all ranges registered contain no holes
  4865. * and may be freed, this this function may be used instead of calling
  4866. * memblock_free_early_nid() manually.
  4867. */
  4868. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  4869. {
  4870. unsigned long start_pfn, end_pfn;
  4871. int i, this_nid;
  4872. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  4873. start_pfn = min(start_pfn, max_low_pfn);
  4874. end_pfn = min(end_pfn, max_low_pfn);
  4875. if (start_pfn < end_pfn)
  4876. memblock_free_early_nid(PFN_PHYS(start_pfn),
  4877. (end_pfn - start_pfn) << PAGE_SHIFT,
  4878. this_nid);
  4879. }
  4880. }
  4881. /**
  4882. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  4883. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  4884. *
  4885. * If an architecture guarantees that all ranges registered contain no holes and may
  4886. * be freed, this function may be used instead of calling memory_present() manually.
  4887. */
  4888. void __init sparse_memory_present_with_active_regions(int nid)
  4889. {
  4890. unsigned long start_pfn, end_pfn;
  4891. int i, this_nid;
  4892. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  4893. memory_present(this_nid, start_pfn, end_pfn);
  4894. }
  4895. /**
  4896. * get_pfn_range_for_nid - Return the start and end page frames for a node
  4897. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  4898. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  4899. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  4900. *
  4901. * It returns the start and end page frame of a node based on information
  4902. * provided by memblock_set_node(). If called for a node
  4903. * with no available memory, a warning is printed and the start and end
  4904. * PFNs will be 0.
  4905. */
  4906. void __meminit get_pfn_range_for_nid(unsigned int nid,
  4907. unsigned long *start_pfn, unsigned long *end_pfn)
  4908. {
  4909. unsigned long this_start_pfn, this_end_pfn;
  4910. int i;
  4911. *start_pfn = -1UL;
  4912. *end_pfn = 0;
  4913. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  4914. *start_pfn = min(*start_pfn, this_start_pfn);
  4915. *end_pfn = max(*end_pfn, this_end_pfn);
  4916. }
  4917. if (*start_pfn == -1UL)
  4918. *start_pfn = 0;
  4919. }
  4920. /*
  4921. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  4922. * assumption is made that zones within a node are ordered in monotonic
  4923. * increasing memory addresses so that the "highest" populated zone is used
  4924. */
  4925. static void __init find_usable_zone_for_movable(void)
  4926. {
  4927. int zone_index;
  4928. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  4929. if (zone_index == ZONE_MOVABLE)
  4930. continue;
  4931. if (arch_zone_highest_possible_pfn[zone_index] >
  4932. arch_zone_lowest_possible_pfn[zone_index])
  4933. break;
  4934. }
  4935. VM_BUG_ON(zone_index == -1);
  4936. movable_zone = zone_index;
  4937. }
  4938. /*
  4939. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  4940. * because it is sized independent of architecture. Unlike the other zones,
  4941. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  4942. * in each node depending on the size of each node and how evenly kernelcore
  4943. * is distributed. This helper function adjusts the zone ranges
  4944. * provided by the architecture for a given node by using the end of the
  4945. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  4946. * zones within a node are in order of monotonic increases memory addresses
  4947. */
  4948. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  4949. unsigned long zone_type,
  4950. unsigned long node_start_pfn,
  4951. unsigned long node_end_pfn,
  4952. unsigned long *zone_start_pfn,
  4953. unsigned long *zone_end_pfn)
  4954. {
  4955. /* Only adjust if ZONE_MOVABLE is on this node */
  4956. if (zone_movable_pfn[nid]) {
  4957. /* Size ZONE_MOVABLE */
  4958. if (zone_type == ZONE_MOVABLE) {
  4959. *zone_start_pfn = zone_movable_pfn[nid];
  4960. *zone_end_pfn = min(node_end_pfn,
  4961. arch_zone_highest_possible_pfn[movable_zone]);
  4962. /* Adjust for ZONE_MOVABLE starting within this range */
  4963. } else if (!mirrored_kernelcore &&
  4964. *zone_start_pfn < zone_movable_pfn[nid] &&
  4965. *zone_end_pfn > zone_movable_pfn[nid]) {
  4966. *zone_end_pfn = zone_movable_pfn[nid];
  4967. /* Check if this whole range is within ZONE_MOVABLE */
  4968. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  4969. *zone_start_pfn = *zone_end_pfn;
  4970. }
  4971. }
  4972. /*
  4973. * Return the number of pages a zone spans in a node, including holes
  4974. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  4975. */
  4976. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4977. unsigned long zone_type,
  4978. unsigned long node_start_pfn,
  4979. unsigned long node_end_pfn,
  4980. unsigned long *zone_start_pfn,
  4981. unsigned long *zone_end_pfn,
  4982. unsigned long *ignored)
  4983. {
  4984. /* When hotadd a new node from cpu_up(), the node should be empty */
  4985. if (!node_start_pfn && !node_end_pfn)
  4986. return 0;
  4987. /* Get the start and end of the zone */
  4988. *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  4989. *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  4990. adjust_zone_range_for_zone_movable(nid, zone_type,
  4991. node_start_pfn, node_end_pfn,
  4992. zone_start_pfn, zone_end_pfn);
  4993. /* Check that this node has pages within the zone's required range */
  4994. if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
  4995. return 0;
  4996. /* Move the zone boundaries inside the node if necessary */
  4997. *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
  4998. *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
  4999. /* Return the spanned pages */
  5000. return *zone_end_pfn - *zone_start_pfn;
  5001. }
  5002. /*
  5003. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  5004. * then all holes in the requested range will be accounted for.
  5005. */
  5006. unsigned long __meminit __absent_pages_in_range(int nid,
  5007. unsigned long range_start_pfn,
  5008. unsigned long range_end_pfn)
  5009. {
  5010. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  5011. unsigned long start_pfn, end_pfn;
  5012. int i;
  5013. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  5014. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  5015. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  5016. nr_absent -= end_pfn - start_pfn;
  5017. }
  5018. return nr_absent;
  5019. }
  5020. /**
  5021. * absent_pages_in_range - Return number of page frames in holes within a range
  5022. * @start_pfn: The start PFN to start searching for holes
  5023. * @end_pfn: The end PFN to stop searching for holes
  5024. *
  5025. * It returns the number of pages frames in memory holes within a range.
  5026. */
  5027. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  5028. unsigned long end_pfn)
  5029. {
  5030. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  5031. }
  5032. /* Return the number of page frames in holes in a zone on a node */
  5033. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  5034. unsigned long zone_type,
  5035. unsigned long node_start_pfn,
  5036. unsigned long node_end_pfn,
  5037. unsigned long *ignored)
  5038. {
  5039. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  5040. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  5041. unsigned long zone_start_pfn, zone_end_pfn;
  5042. unsigned long nr_absent;
  5043. /* When hotadd a new node from cpu_up(), the node should be empty */
  5044. if (!node_start_pfn && !node_end_pfn)
  5045. return 0;
  5046. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  5047. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  5048. adjust_zone_range_for_zone_movable(nid, zone_type,
  5049. node_start_pfn, node_end_pfn,
  5050. &zone_start_pfn, &zone_end_pfn);
  5051. nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  5052. /*
  5053. * ZONE_MOVABLE handling.
  5054. * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
  5055. * and vice versa.
  5056. */
  5057. if (mirrored_kernelcore && zone_movable_pfn[nid]) {
  5058. unsigned long start_pfn, end_pfn;
  5059. struct memblock_region *r;
  5060. for_each_memblock(memory, r) {
  5061. start_pfn = clamp(memblock_region_memory_base_pfn(r),
  5062. zone_start_pfn, zone_end_pfn);
  5063. end_pfn = clamp(memblock_region_memory_end_pfn(r),
  5064. zone_start_pfn, zone_end_pfn);
  5065. if (zone_type == ZONE_MOVABLE &&
  5066. memblock_is_mirror(r))
  5067. nr_absent += end_pfn - start_pfn;
  5068. if (zone_type == ZONE_NORMAL &&
  5069. !memblock_is_mirror(r))
  5070. nr_absent += end_pfn - start_pfn;
  5071. }
  5072. }
  5073. return nr_absent;
  5074. }
  5075. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5076. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  5077. unsigned long zone_type,
  5078. unsigned long node_start_pfn,
  5079. unsigned long node_end_pfn,
  5080. unsigned long *zone_start_pfn,
  5081. unsigned long *zone_end_pfn,
  5082. unsigned long *zones_size)
  5083. {
  5084. unsigned int zone;
  5085. *zone_start_pfn = node_start_pfn;
  5086. for (zone = 0; zone < zone_type; zone++)
  5087. *zone_start_pfn += zones_size[zone];
  5088. *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
  5089. return zones_size[zone_type];
  5090. }
  5091. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  5092. unsigned long zone_type,
  5093. unsigned long node_start_pfn,
  5094. unsigned long node_end_pfn,
  5095. unsigned long *zholes_size)
  5096. {
  5097. if (!zholes_size)
  5098. return 0;
  5099. return zholes_size[zone_type];
  5100. }
  5101. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5102. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  5103. unsigned long node_start_pfn,
  5104. unsigned long node_end_pfn,
  5105. unsigned long *zones_size,
  5106. unsigned long *zholes_size)
  5107. {
  5108. unsigned long realtotalpages = 0, totalpages = 0;
  5109. enum zone_type i;
  5110. for (i = 0; i < MAX_NR_ZONES; i++) {
  5111. struct zone *zone = pgdat->node_zones + i;
  5112. unsigned long zone_start_pfn, zone_end_pfn;
  5113. unsigned long size, real_size;
  5114. size = zone_spanned_pages_in_node(pgdat->node_id, i,
  5115. node_start_pfn,
  5116. node_end_pfn,
  5117. &zone_start_pfn,
  5118. &zone_end_pfn,
  5119. zones_size);
  5120. real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
  5121. node_start_pfn, node_end_pfn,
  5122. zholes_size);
  5123. if (size)
  5124. zone->zone_start_pfn = zone_start_pfn;
  5125. else
  5126. zone->zone_start_pfn = 0;
  5127. zone->spanned_pages = size;
  5128. zone->present_pages = real_size;
  5129. totalpages += size;
  5130. realtotalpages += real_size;
  5131. }
  5132. pgdat->node_spanned_pages = totalpages;
  5133. pgdat->node_present_pages = realtotalpages;
  5134. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  5135. realtotalpages);
  5136. }
  5137. #ifndef CONFIG_SPARSEMEM
  5138. /*
  5139. * Calculate the size of the zone->blockflags rounded to an unsigned long
  5140. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  5141. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  5142. * round what is now in bits to nearest long in bits, then return it in
  5143. * bytes.
  5144. */
  5145. static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
  5146. {
  5147. unsigned long usemapsize;
  5148. zonesize += zone_start_pfn & (pageblock_nr_pages-1);
  5149. usemapsize = roundup(zonesize, pageblock_nr_pages);
  5150. usemapsize = usemapsize >> pageblock_order;
  5151. usemapsize *= NR_PAGEBLOCK_BITS;
  5152. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  5153. return usemapsize / 8;
  5154. }
  5155. static void __init setup_usemap(struct pglist_data *pgdat,
  5156. struct zone *zone,
  5157. unsigned long zone_start_pfn,
  5158. unsigned long zonesize)
  5159. {
  5160. unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
  5161. zone->pageblock_flags = NULL;
  5162. if (usemapsize)
  5163. zone->pageblock_flags =
  5164. memblock_virt_alloc_node_nopanic(usemapsize,
  5165. pgdat->node_id);
  5166. }
  5167. #else
  5168. static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
  5169. unsigned long zone_start_pfn, unsigned long zonesize) {}
  5170. #endif /* CONFIG_SPARSEMEM */
  5171. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  5172. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  5173. void __paginginit set_pageblock_order(void)
  5174. {
  5175. unsigned int order;
  5176. /* Check that pageblock_nr_pages has not already been setup */
  5177. if (pageblock_order)
  5178. return;
  5179. if (HPAGE_SHIFT > PAGE_SHIFT)
  5180. order = HUGETLB_PAGE_ORDER;
  5181. else
  5182. order = MAX_ORDER - 1;
  5183. /*
  5184. * Assume the largest contiguous order of interest is a huge page.
  5185. * This value may be variable depending on boot parameters on IA64 and
  5186. * powerpc.
  5187. */
  5188. pageblock_order = order;
  5189. }
  5190. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  5191. /*
  5192. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  5193. * is unused as pageblock_order is set at compile-time. See
  5194. * include/linux/pageblock-flags.h for the values of pageblock_order based on
  5195. * the kernel config
  5196. */
  5197. void __paginginit set_pageblock_order(void)
  5198. {
  5199. }
  5200. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  5201. static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  5202. unsigned long present_pages)
  5203. {
  5204. unsigned long pages = spanned_pages;
  5205. /*
  5206. * Provide a more accurate estimation if there are holes within
  5207. * the zone and SPARSEMEM is in use. If there are holes within the
  5208. * zone, each populated memory region may cost us one or two extra
  5209. * memmap pages due to alignment because memmap pages for each
  5210. * populated regions may not be naturally aligned on page boundary.
  5211. * So the (present_pages >> 4) heuristic is a tradeoff for that.
  5212. */
  5213. if (spanned_pages > present_pages + (present_pages >> 4) &&
  5214. IS_ENABLED(CONFIG_SPARSEMEM))
  5215. pages = present_pages;
  5216. return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
  5217. }
  5218. /*
  5219. * Set up the zone data structures:
  5220. * - mark all pages reserved
  5221. * - mark all memory queues empty
  5222. * - clear the memory bitmaps
  5223. *
  5224. * NOTE: pgdat should get zeroed by caller.
  5225. */
  5226. static void __paginginit free_area_init_core(struct pglist_data *pgdat)
  5227. {
  5228. enum zone_type j;
  5229. int nid = pgdat->node_id;
  5230. pgdat_resize_init(pgdat);
  5231. #ifdef CONFIG_NUMA_BALANCING
  5232. spin_lock_init(&pgdat->numabalancing_migrate_lock);
  5233. pgdat->numabalancing_migrate_nr_pages = 0;
  5234. pgdat->numabalancing_migrate_next_window = jiffies;
  5235. #endif
  5236. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  5237. spin_lock_init(&pgdat->split_queue_lock);
  5238. INIT_LIST_HEAD(&pgdat->split_queue);
  5239. pgdat->split_queue_len = 0;
  5240. #endif
  5241. init_waitqueue_head(&pgdat->kswapd_wait);
  5242. init_waitqueue_head(&pgdat->pfmemalloc_wait);
  5243. #ifdef CONFIG_COMPACTION
  5244. init_waitqueue_head(&pgdat->kcompactd_wait);
  5245. #endif
  5246. pgdat_page_ext_init(pgdat);
  5247. spin_lock_init(&pgdat->lru_lock);
  5248. lruvec_init(node_lruvec(pgdat));
  5249. pgdat->per_cpu_nodestats = &boot_nodestats;
  5250. for (j = 0; j < MAX_NR_ZONES; j++) {
  5251. struct zone *zone = pgdat->node_zones + j;
  5252. unsigned long size, realsize, freesize, memmap_pages;
  5253. unsigned long zone_start_pfn = zone->zone_start_pfn;
  5254. size = zone->spanned_pages;
  5255. realsize = freesize = zone->present_pages;
  5256. /*
  5257. * Adjust freesize so that it accounts for how much memory
  5258. * is used by this zone for memmap. This affects the watermark
  5259. * and per-cpu initialisations
  5260. */
  5261. memmap_pages = calc_memmap_size(size, realsize);
  5262. if (!is_highmem_idx(j)) {
  5263. if (freesize >= memmap_pages) {
  5264. freesize -= memmap_pages;
  5265. if (memmap_pages)
  5266. printk(KERN_DEBUG
  5267. " %s zone: %lu pages used for memmap\n",
  5268. zone_names[j], memmap_pages);
  5269. } else
  5270. pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
  5271. zone_names[j], memmap_pages, freesize);
  5272. }
  5273. /* Account for reserved pages */
  5274. if (j == 0 && freesize > dma_reserve) {
  5275. freesize -= dma_reserve;
  5276. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  5277. zone_names[0], dma_reserve);
  5278. }
  5279. if (!is_highmem_idx(j))
  5280. nr_kernel_pages += freesize;
  5281. /* Charge for highmem memmap if there are enough kernel pages */
  5282. else if (nr_kernel_pages > memmap_pages * 2)
  5283. nr_kernel_pages -= memmap_pages;
  5284. nr_all_pages += freesize;
  5285. /*
  5286. * Set an approximate value for lowmem here, it will be adjusted
  5287. * when the bootmem allocator frees pages into the buddy system.
  5288. * And all highmem pages will be managed by the buddy system.
  5289. */
  5290. zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
  5291. #ifdef CONFIG_NUMA
  5292. zone->node = nid;
  5293. #endif
  5294. zone->name = zone_names[j];
  5295. zone->zone_pgdat = pgdat;
  5296. spin_lock_init(&zone->lock);
  5297. zone_seqlock_init(zone);
  5298. zone_pcp_init(zone);
  5299. if (!size)
  5300. continue;
  5301. set_pageblock_order();
  5302. setup_usemap(pgdat, zone, zone_start_pfn, size);
  5303. init_currently_empty_zone(zone, zone_start_pfn, size);
  5304. memmap_init(size, nid, j, zone_start_pfn);
  5305. }
  5306. }
  5307. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  5308. static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
  5309. {
  5310. unsigned long __maybe_unused start = 0;
  5311. unsigned long __maybe_unused offset = 0;
  5312. /* Skip empty nodes */
  5313. if (!pgdat->node_spanned_pages)
  5314. return;
  5315. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  5316. offset = pgdat->node_start_pfn - start;
  5317. /* ia64 gets its own node_mem_map, before this, without bootmem */
  5318. if (!pgdat->node_mem_map) {
  5319. unsigned long size, end;
  5320. struct page *map;
  5321. /*
  5322. * The zone's endpoints aren't required to be MAX_ORDER
  5323. * aligned but the node_mem_map endpoints must be in order
  5324. * for the buddy allocator to function correctly.
  5325. */
  5326. end = pgdat_end_pfn(pgdat);
  5327. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  5328. size = (end - start) * sizeof(struct page);
  5329. map = alloc_remap(pgdat->node_id, size);
  5330. if (!map)
  5331. map = memblock_virt_alloc_node_nopanic(size,
  5332. pgdat->node_id);
  5333. pgdat->node_mem_map = map + offset;
  5334. }
  5335. pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
  5336. __func__, pgdat->node_id, (unsigned long)pgdat,
  5337. (unsigned long)pgdat->node_mem_map);
  5338. #ifndef CONFIG_NEED_MULTIPLE_NODES
  5339. /*
  5340. * With no DISCONTIG, the global mem_map is just set as node 0's
  5341. */
  5342. if (pgdat == NODE_DATA(0)) {
  5343. mem_map = NODE_DATA(0)->node_mem_map;
  5344. #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
  5345. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  5346. mem_map -= offset;
  5347. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5348. }
  5349. #endif
  5350. }
  5351. #else
  5352. static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
  5353. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  5354. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  5355. unsigned long node_start_pfn, unsigned long *zholes_size)
  5356. {
  5357. pg_data_t *pgdat = NODE_DATA(nid);
  5358. unsigned long start_pfn = 0;
  5359. unsigned long end_pfn = 0;
  5360. /* pg_data_t should be reset to zero when it's allocated */
  5361. WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
  5362. pgdat->node_id = nid;
  5363. pgdat->node_start_pfn = node_start_pfn;
  5364. pgdat->per_cpu_nodestats = NULL;
  5365. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  5366. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  5367. pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
  5368. (u64)start_pfn << PAGE_SHIFT,
  5369. end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
  5370. #else
  5371. start_pfn = node_start_pfn;
  5372. #endif
  5373. calculate_node_totalpages(pgdat, start_pfn, end_pfn,
  5374. zones_size, zholes_size);
  5375. alloc_node_mem_map(pgdat);
  5376. reset_deferred_meminit(pgdat);
  5377. free_area_init_core(pgdat);
  5378. }
  5379. #ifdef CONFIG_HAVE_MEMBLOCK
  5380. /*
  5381. * Only struct pages that are backed by physical memory are zeroed and
  5382. * initialized by going through __init_single_page(). But, there are some
  5383. * struct pages which are reserved in memblock allocator and their fields
  5384. * may be accessed (for example page_to_pfn() on some configuration accesses
  5385. * flags). We must explicitly zero those struct pages.
  5386. */
  5387. void __paginginit zero_resv_unavail(void)
  5388. {
  5389. phys_addr_t start, end;
  5390. unsigned long pfn;
  5391. u64 i, pgcnt;
  5392. /*
  5393. * Loop through ranges that are reserved, but do not have reported
  5394. * physical memory backing.
  5395. */
  5396. pgcnt = 0;
  5397. for_each_resv_unavail_range(i, &start, &end) {
  5398. for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
  5399. if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
  5400. continue;
  5401. mm_zero_struct_page(pfn_to_page(pfn));
  5402. pgcnt++;
  5403. }
  5404. }
  5405. /*
  5406. * Struct pages that do not have backing memory. This could be because
  5407. * firmware is using some of this memory, or for some other reasons.
  5408. * Once memblock is changed so such behaviour is not allowed: i.e.
  5409. * list of "reserved" memory must be a subset of list of "memory", then
  5410. * this code can be removed.
  5411. */
  5412. if (pgcnt)
  5413. pr_info("Reserved but unavailable: %lld pages", pgcnt);
  5414. }
  5415. #endif /* CONFIG_HAVE_MEMBLOCK */
  5416. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  5417. #if MAX_NUMNODES > 1
  5418. /*
  5419. * Figure out the number of possible node ids.
  5420. */
  5421. void __init setup_nr_node_ids(void)
  5422. {
  5423. unsigned int highest;
  5424. highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
  5425. nr_node_ids = highest + 1;
  5426. }
  5427. #endif
  5428. /**
  5429. * node_map_pfn_alignment - determine the maximum internode alignment
  5430. *
  5431. * This function should be called after node map is populated and sorted.
  5432. * It calculates the maximum power of two alignment which can distinguish
  5433. * all the nodes.
  5434. *
  5435. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  5436. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  5437. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  5438. * shifted, 1GiB is enough and this function will indicate so.
  5439. *
  5440. * This is used to test whether pfn -> nid mapping of the chosen memory
  5441. * model has fine enough granularity to avoid incorrect mapping for the
  5442. * populated node map.
  5443. *
  5444. * Returns the determined alignment in pfn's. 0 if there is no alignment
  5445. * requirement (single node).
  5446. */
  5447. unsigned long __init node_map_pfn_alignment(void)
  5448. {
  5449. unsigned long accl_mask = 0, last_end = 0;
  5450. unsigned long start, end, mask;
  5451. int last_nid = -1;
  5452. int i, nid;
  5453. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  5454. if (!start || last_nid < 0 || last_nid == nid) {
  5455. last_nid = nid;
  5456. last_end = end;
  5457. continue;
  5458. }
  5459. /*
  5460. * Start with a mask granular enough to pin-point to the
  5461. * start pfn and tick off bits one-by-one until it becomes
  5462. * too coarse to separate the current node from the last.
  5463. */
  5464. mask = ~((1 << __ffs(start)) - 1);
  5465. while (mask && last_end <= (start & (mask << 1)))
  5466. mask <<= 1;
  5467. /* accumulate all internode masks */
  5468. accl_mask |= mask;
  5469. }
  5470. /* convert mask to number of pages */
  5471. return ~accl_mask + 1;
  5472. }
  5473. /* Find the lowest pfn for a node */
  5474. static unsigned long __init find_min_pfn_for_node(int nid)
  5475. {
  5476. unsigned long min_pfn = ULONG_MAX;
  5477. unsigned long start_pfn;
  5478. int i;
  5479. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  5480. min_pfn = min(min_pfn, start_pfn);
  5481. if (min_pfn == ULONG_MAX) {
  5482. pr_warn("Could not find start_pfn for node %d\n", nid);
  5483. return 0;
  5484. }
  5485. return min_pfn;
  5486. }
  5487. /**
  5488. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  5489. *
  5490. * It returns the minimum PFN based on information provided via
  5491. * memblock_set_node().
  5492. */
  5493. unsigned long __init find_min_pfn_with_active_regions(void)
  5494. {
  5495. return find_min_pfn_for_node(MAX_NUMNODES);
  5496. }
  5497. /*
  5498. * early_calculate_totalpages()
  5499. * Sum pages in active regions for movable zone.
  5500. * Populate N_MEMORY for calculating usable_nodes.
  5501. */
  5502. static unsigned long __init early_calculate_totalpages(void)
  5503. {
  5504. unsigned long totalpages = 0;
  5505. unsigned long start_pfn, end_pfn;
  5506. int i, nid;
  5507. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  5508. unsigned long pages = end_pfn - start_pfn;
  5509. totalpages += pages;
  5510. if (pages)
  5511. node_set_state(nid, N_MEMORY);
  5512. }
  5513. return totalpages;
  5514. }
  5515. /*
  5516. * Find the PFN the Movable zone begins in each node. Kernel memory
  5517. * is spread evenly between nodes as long as the nodes have enough
  5518. * memory. When they don't, some nodes will have more kernelcore than
  5519. * others
  5520. */
  5521. static void __init find_zone_movable_pfns_for_nodes(void)
  5522. {
  5523. int i, nid;
  5524. unsigned long usable_startpfn;
  5525. unsigned long kernelcore_node, kernelcore_remaining;
  5526. /* save the state before borrow the nodemask */
  5527. nodemask_t saved_node_state = node_states[N_MEMORY];
  5528. unsigned long totalpages = early_calculate_totalpages();
  5529. int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  5530. struct memblock_region *r;
  5531. /* Need to find movable_zone earlier when movable_node is specified. */
  5532. find_usable_zone_for_movable();
  5533. /*
  5534. * If movable_node is specified, ignore kernelcore and movablecore
  5535. * options.
  5536. */
  5537. if (movable_node_is_enabled()) {
  5538. for_each_memblock(memory, r) {
  5539. if (!memblock_is_hotpluggable(r))
  5540. continue;
  5541. nid = r->nid;
  5542. usable_startpfn = PFN_DOWN(r->base);
  5543. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  5544. min(usable_startpfn, zone_movable_pfn[nid]) :
  5545. usable_startpfn;
  5546. }
  5547. goto out2;
  5548. }
  5549. /*
  5550. * If kernelcore=mirror is specified, ignore movablecore option
  5551. */
  5552. if (mirrored_kernelcore) {
  5553. bool mem_below_4gb_not_mirrored = false;
  5554. for_each_memblock(memory, r) {
  5555. if (memblock_is_mirror(r))
  5556. continue;
  5557. nid = r->nid;
  5558. usable_startpfn = memblock_region_memory_base_pfn(r);
  5559. if (usable_startpfn < 0x100000) {
  5560. mem_below_4gb_not_mirrored = true;
  5561. continue;
  5562. }
  5563. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  5564. min(usable_startpfn, zone_movable_pfn[nid]) :
  5565. usable_startpfn;
  5566. }
  5567. if (mem_below_4gb_not_mirrored)
  5568. pr_warn("This configuration results in unmirrored kernel memory.");
  5569. goto out2;
  5570. }
  5571. /*
  5572. * If movablecore=nn[KMG] was specified, calculate what size of
  5573. * kernelcore that corresponds so that memory usable for
  5574. * any allocation type is evenly spread. If both kernelcore
  5575. * and movablecore are specified, then the value of kernelcore
  5576. * will be used for required_kernelcore if it's greater than
  5577. * what movablecore would have allowed.
  5578. */
  5579. if (required_movablecore) {
  5580. unsigned long corepages;
  5581. /*
  5582. * Round-up so that ZONE_MOVABLE is at least as large as what
  5583. * was requested by the user
  5584. */
  5585. required_movablecore =
  5586. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  5587. required_movablecore = min(totalpages, required_movablecore);
  5588. corepages = totalpages - required_movablecore;
  5589. required_kernelcore = max(required_kernelcore, corepages);
  5590. }
  5591. /*
  5592. * If kernelcore was not specified or kernelcore size is larger
  5593. * than totalpages, there is no ZONE_MOVABLE.
  5594. */
  5595. if (!required_kernelcore || required_kernelcore >= totalpages)
  5596. goto out;
  5597. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  5598. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  5599. restart:
  5600. /* Spread kernelcore memory as evenly as possible throughout nodes */
  5601. kernelcore_node = required_kernelcore / usable_nodes;
  5602. for_each_node_state(nid, N_MEMORY) {
  5603. unsigned long start_pfn, end_pfn;
  5604. /*
  5605. * Recalculate kernelcore_node if the division per node
  5606. * now exceeds what is necessary to satisfy the requested
  5607. * amount of memory for the kernel
  5608. */
  5609. if (required_kernelcore < kernelcore_node)
  5610. kernelcore_node = required_kernelcore / usable_nodes;
  5611. /*
  5612. * As the map is walked, we track how much memory is usable
  5613. * by the kernel using kernelcore_remaining. When it is
  5614. * 0, the rest of the node is usable by ZONE_MOVABLE
  5615. */
  5616. kernelcore_remaining = kernelcore_node;
  5617. /* Go through each range of PFNs within this node */
  5618. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  5619. unsigned long size_pages;
  5620. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  5621. if (start_pfn >= end_pfn)
  5622. continue;
  5623. /* Account for what is only usable for kernelcore */
  5624. if (start_pfn < usable_startpfn) {
  5625. unsigned long kernel_pages;
  5626. kernel_pages = min(end_pfn, usable_startpfn)
  5627. - start_pfn;
  5628. kernelcore_remaining -= min(kernel_pages,
  5629. kernelcore_remaining);
  5630. required_kernelcore -= min(kernel_pages,
  5631. required_kernelcore);
  5632. /* Continue if range is now fully accounted */
  5633. if (end_pfn <= usable_startpfn) {
  5634. /*
  5635. * Push zone_movable_pfn to the end so
  5636. * that if we have to rebalance
  5637. * kernelcore across nodes, we will
  5638. * not double account here
  5639. */
  5640. zone_movable_pfn[nid] = end_pfn;
  5641. continue;
  5642. }
  5643. start_pfn = usable_startpfn;
  5644. }
  5645. /*
  5646. * The usable PFN range for ZONE_MOVABLE is from
  5647. * start_pfn->end_pfn. Calculate size_pages as the
  5648. * number of pages used as kernelcore
  5649. */
  5650. size_pages = end_pfn - start_pfn;
  5651. if (size_pages > kernelcore_remaining)
  5652. size_pages = kernelcore_remaining;
  5653. zone_movable_pfn[nid] = start_pfn + size_pages;
  5654. /*
  5655. * Some kernelcore has been met, update counts and
  5656. * break if the kernelcore for this node has been
  5657. * satisfied
  5658. */
  5659. required_kernelcore -= min(required_kernelcore,
  5660. size_pages);
  5661. kernelcore_remaining -= size_pages;
  5662. if (!kernelcore_remaining)
  5663. break;
  5664. }
  5665. }
  5666. /*
  5667. * If there is still required_kernelcore, we do another pass with one
  5668. * less node in the count. This will push zone_movable_pfn[nid] further
  5669. * along on the nodes that still have memory until kernelcore is
  5670. * satisfied
  5671. */
  5672. usable_nodes--;
  5673. if (usable_nodes && required_kernelcore > usable_nodes)
  5674. goto restart;
  5675. out2:
  5676. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  5677. for (nid = 0; nid < MAX_NUMNODES; nid++)
  5678. zone_movable_pfn[nid] =
  5679. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  5680. out:
  5681. /* restore the node_state */
  5682. node_states[N_MEMORY] = saved_node_state;
  5683. }
  5684. /* Any regular or high memory on that node ? */
  5685. static void check_for_memory(pg_data_t *pgdat, int nid)
  5686. {
  5687. enum zone_type zone_type;
  5688. if (N_MEMORY == N_NORMAL_MEMORY)
  5689. return;
  5690. for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
  5691. struct zone *zone = &pgdat->node_zones[zone_type];
  5692. if (populated_zone(zone)) {
  5693. node_set_state(nid, N_HIGH_MEMORY);
  5694. if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
  5695. zone_type <= ZONE_NORMAL)
  5696. node_set_state(nid, N_NORMAL_MEMORY);
  5697. break;
  5698. }
  5699. }
  5700. }
  5701. /**
  5702. * free_area_init_nodes - Initialise all pg_data_t and zone data
  5703. * @max_zone_pfn: an array of max PFNs for each zone
  5704. *
  5705. * This will call free_area_init_node() for each active node in the system.
  5706. * Using the page ranges provided by memblock_set_node(), the size of each
  5707. * zone in each node and their holes is calculated. If the maximum PFN
  5708. * between two adjacent zones match, it is assumed that the zone is empty.
  5709. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  5710. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  5711. * starts where the previous one ended. For example, ZONE_DMA32 starts
  5712. * at arch_max_dma_pfn.
  5713. */
  5714. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  5715. {
  5716. unsigned long start_pfn, end_pfn;
  5717. int i, nid;
  5718. /* Record where the zone boundaries are */
  5719. memset(arch_zone_lowest_possible_pfn, 0,
  5720. sizeof(arch_zone_lowest_possible_pfn));
  5721. memset(arch_zone_highest_possible_pfn, 0,
  5722. sizeof(arch_zone_highest_possible_pfn));
  5723. start_pfn = find_min_pfn_with_active_regions();
  5724. for (i = 0; i < MAX_NR_ZONES; i++) {
  5725. if (i == ZONE_MOVABLE)
  5726. continue;
  5727. end_pfn = max(max_zone_pfn[i], start_pfn);
  5728. arch_zone_lowest_possible_pfn[i] = start_pfn;
  5729. arch_zone_highest_possible_pfn[i] = end_pfn;
  5730. start_pfn = end_pfn;
  5731. }
  5732. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  5733. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  5734. find_zone_movable_pfns_for_nodes();
  5735. /* Print out the zone ranges */
  5736. pr_info("Zone ranges:\n");
  5737. for (i = 0; i < MAX_NR_ZONES; i++) {
  5738. if (i == ZONE_MOVABLE)
  5739. continue;
  5740. pr_info(" %-8s ", zone_names[i]);
  5741. if (arch_zone_lowest_possible_pfn[i] ==
  5742. arch_zone_highest_possible_pfn[i])
  5743. pr_cont("empty\n");
  5744. else
  5745. pr_cont("[mem %#018Lx-%#018Lx]\n",
  5746. (u64)arch_zone_lowest_possible_pfn[i]
  5747. << PAGE_SHIFT,
  5748. ((u64)arch_zone_highest_possible_pfn[i]
  5749. << PAGE_SHIFT) - 1);
  5750. }
  5751. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  5752. pr_info("Movable zone start for each node\n");
  5753. for (i = 0; i < MAX_NUMNODES; i++) {
  5754. if (zone_movable_pfn[i])
  5755. pr_info(" Node %d: %#018Lx\n", i,
  5756. (u64)zone_movable_pfn[i] << PAGE_SHIFT);
  5757. }
  5758. /* Print out the early node map */
  5759. pr_info("Early memory node ranges\n");
  5760. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  5761. pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
  5762. (u64)start_pfn << PAGE_SHIFT,
  5763. ((u64)end_pfn << PAGE_SHIFT) - 1);
  5764. /* Initialise every node */
  5765. mminit_verify_pageflags_layout();
  5766. setup_nr_node_ids();
  5767. for_each_online_node(nid) {
  5768. pg_data_t *pgdat = NODE_DATA(nid);
  5769. free_area_init_node(nid, NULL,
  5770. find_min_pfn_for_node(nid), NULL);
  5771. /* Any memory on that node */
  5772. if (pgdat->node_present_pages)
  5773. node_set_state(nid, N_MEMORY);
  5774. check_for_memory(pgdat, nid);
  5775. }
  5776. zero_resv_unavail();
  5777. }
  5778. static int __init cmdline_parse_core(char *p, unsigned long *core)
  5779. {
  5780. unsigned long long coremem;
  5781. if (!p)
  5782. return -EINVAL;
  5783. coremem = memparse(p, &p);
  5784. *core = coremem >> PAGE_SHIFT;
  5785. /* Paranoid check that UL is enough for the coremem value */
  5786. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  5787. return 0;
  5788. }
  5789. /*
  5790. * kernelcore=size sets the amount of memory for use for allocations that
  5791. * cannot be reclaimed or migrated.
  5792. */
  5793. static int __init cmdline_parse_kernelcore(char *p)
  5794. {
  5795. /* parse kernelcore=mirror */
  5796. if (parse_option_str(p, "mirror")) {
  5797. mirrored_kernelcore = true;
  5798. return 0;
  5799. }
  5800. return cmdline_parse_core(p, &required_kernelcore);
  5801. }
  5802. /*
  5803. * movablecore=size sets the amount of memory for use for allocations that
  5804. * can be reclaimed or migrated.
  5805. */
  5806. static int __init cmdline_parse_movablecore(char *p)
  5807. {
  5808. return cmdline_parse_core(p, &required_movablecore);
  5809. }
  5810. early_param("kernelcore", cmdline_parse_kernelcore);
  5811. early_param("movablecore", cmdline_parse_movablecore);
  5812. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5813. void adjust_managed_page_count(struct page *page, long count)
  5814. {
  5815. spin_lock(&managed_page_count_lock);
  5816. page_zone(page)->managed_pages += count;
  5817. totalram_pages += count;
  5818. #ifdef CONFIG_HIGHMEM
  5819. if (PageHighMem(page))
  5820. totalhigh_pages += count;
  5821. #endif
  5822. spin_unlock(&managed_page_count_lock);
  5823. }
  5824. EXPORT_SYMBOL(adjust_managed_page_count);
  5825. unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
  5826. {
  5827. void *pos;
  5828. unsigned long pages = 0;
  5829. start = (void *)PAGE_ALIGN((unsigned long)start);
  5830. end = (void *)((unsigned long)end & PAGE_MASK);
  5831. for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
  5832. if ((unsigned int)poison <= 0xFF)
  5833. memset(pos, poison, PAGE_SIZE);
  5834. free_reserved_page(virt_to_page(pos));
  5835. }
  5836. if (pages && s)
  5837. pr_info("Freeing %s memory: %ldK\n",
  5838. s, pages << (PAGE_SHIFT - 10));
  5839. return pages;
  5840. }
  5841. EXPORT_SYMBOL(free_reserved_area);
  5842. #ifdef CONFIG_HIGHMEM
  5843. void free_highmem_page(struct page *page)
  5844. {
  5845. __free_reserved_page(page);
  5846. totalram_pages++;
  5847. page_zone(page)->managed_pages++;
  5848. totalhigh_pages++;
  5849. }
  5850. #endif
  5851. void __init mem_init_print_info(const char *str)
  5852. {
  5853. unsigned long physpages, codesize, datasize, rosize, bss_size;
  5854. unsigned long init_code_size, init_data_size;
  5855. physpages = get_num_physpages();
  5856. codesize = _etext - _stext;
  5857. datasize = _edata - _sdata;
  5858. rosize = __end_rodata - __start_rodata;
  5859. bss_size = __bss_stop - __bss_start;
  5860. init_data_size = __init_end - __init_begin;
  5861. init_code_size = _einittext - _sinittext;
  5862. /*
  5863. * Detect special cases and adjust section sizes accordingly:
  5864. * 1) .init.* may be embedded into .data sections
  5865. * 2) .init.text.* may be out of [__init_begin, __init_end],
  5866. * please refer to arch/tile/kernel/vmlinux.lds.S.
  5867. * 3) .rodata.* may be embedded into .text or .data sections.
  5868. */
  5869. #define adj_init_size(start, end, size, pos, adj) \
  5870. do { \
  5871. if (start <= pos && pos < end && size > adj) \
  5872. size -= adj; \
  5873. } while (0)
  5874. adj_init_size(__init_begin, __init_end, init_data_size,
  5875. _sinittext, init_code_size);
  5876. adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
  5877. adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
  5878. adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
  5879. adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
  5880. #undef adj_init_size
  5881. pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
  5882. #ifdef CONFIG_HIGHMEM
  5883. ", %luK highmem"
  5884. #endif
  5885. "%s%s)\n",
  5886. nr_free_pages() << (PAGE_SHIFT - 10),
  5887. physpages << (PAGE_SHIFT - 10),
  5888. codesize >> 10, datasize >> 10, rosize >> 10,
  5889. (init_data_size + init_code_size) >> 10, bss_size >> 10,
  5890. (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
  5891. totalcma_pages << (PAGE_SHIFT - 10),
  5892. #ifdef CONFIG_HIGHMEM
  5893. totalhigh_pages << (PAGE_SHIFT - 10),
  5894. #endif
  5895. str ? ", " : "", str ? str : "");
  5896. }
  5897. /**
  5898. * set_dma_reserve - set the specified number of pages reserved in the first zone
  5899. * @new_dma_reserve: The number of pages to mark reserved
  5900. *
  5901. * The per-cpu batchsize and zone watermarks are determined by managed_pages.
  5902. * In the DMA zone, a significant percentage may be consumed by kernel image
  5903. * and other unfreeable allocations which can skew the watermarks badly. This
  5904. * function may optionally be used to account for unfreeable pages in the
  5905. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  5906. * smaller per-cpu batchsize.
  5907. */
  5908. void __init set_dma_reserve(unsigned long new_dma_reserve)
  5909. {
  5910. dma_reserve = new_dma_reserve;
  5911. }
  5912. void __init free_area_init(unsigned long *zones_size)
  5913. {
  5914. free_area_init_node(0, zones_size,
  5915. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  5916. zero_resv_unavail();
  5917. }
  5918. static int page_alloc_cpu_dead(unsigned int cpu)
  5919. {
  5920. lru_add_drain_cpu(cpu);
  5921. drain_pages(cpu);
  5922. /*
  5923. * Spill the event counters of the dead processor
  5924. * into the current processors event counters.
  5925. * This artificially elevates the count of the current
  5926. * processor.
  5927. */
  5928. vm_events_fold_cpu(cpu);
  5929. /*
  5930. * Zero the differential counters of the dead processor
  5931. * so that the vm statistics are consistent.
  5932. *
  5933. * This is only okay since the processor is dead and cannot
  5934. * race with what we are doing.
  5935. */
  5936. cpu_vm_stats_fold(cpu);
  5937. return 0;
  5938. }
  5939. void __init page_alloc_init(void)
  5940. {
  5941. int ret;
  5942. ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
  5943. "mm/page_alloc:dead", NULL,
  5944. page_alloc_cpu_dead);
  5945. WARN_ON(ret < 0);
  5946. }
  5947. /*
  5948. * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
  5949. * or min_free_kbytes changes.
  5950. */
  5951. static void calculate_totalreserve_pages(void)
  5952. {
  5953. struct pglist_data *pgdat;
  5954. unsigned long reserve_pages = 0;
  5955. enum zone_type i, j;
  5956. for_each_online_pgdat(pgdat) {
  5957. pgdat->totalreserve_pages = 0;
  5958. for (i = 0; i < MAX_NR_ZONES; i++) {
  5959. struct zone *zone = pgdat->node_zones + i;
  5960. long max = 0;
  5961. /* Find valid and maximum lowmem_reserve in the zone */
  5962. for (j = i; j < MAX_NR_ZONES; j++) {
  5963. if (zone->lowmem_reserve[j] > max)
  5964. max = zone->lowmem_reserve[j];
  5965. }
  5966. /* we treat the high watermark as reserved pages. */
  5967. max += high_wmark_pages(zone);
  5968. if (max > zone->managed_pages)
  5969. max = zone->managed_pages;
  5970. pgdat->totalreserve_pages += max;
  5971. reserve_pages += max;
  5972. }
  5973. }
  5974. totalreserve_pages = reserve_pages;
  5975. }
  5976. /*
  5977. * setup_per_zone_lowmem_reserve - called whenever
  5978. * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
  5979. * has a correct pages reserved value, so an adequate number of
  5980. * pages are left in the zone after a successful __alloc_pages().
  5981. */
  5982. static void setup_per_zone_lowmem_reserve(void)
  5983. {
  5984. struct pglist_data *pgdat;
  5985. enum zone_type j, idx;
  5986. for_each_online_pgdat(pgdat) {
  5987. for (j = 0; j < MAX_NR_ZONES; j++) {
  5988. struct zone *zone = pgdat->node_zones + j;
  5989. unsigned long managed_pages = zone->managed_pages;
  5990. zone->lowmem_reserve[j] = 0;
  5991. idx = j;
  5992. while (idx) {
  5993. struct zone *lower_zone;
  5994. idx--;
  5995. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  5996. sysctl_lowmem_reserve_ratio[idx] = 1;
  5997. lower_zone = pgdat->node_zones + idx;
  5998. lower_zone->lowmem_reserve[j] = managed_pages /
  5999. sysctl_lowmem_reserve_ratio[idx];
  6000. managed_pages += lower_zone->managed_pages;
  6001. }
  6002. }
  6003. }
  6004. /* update totalreserve_pages */
  6005. calculate_totalreserve_pages();
  6006. }
  6007. static void __setup_per_zone_wmarks(void)
  6008. {
  6009. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  6010. unsigned long lowmem_pages = 0;
  6011. struct zone *zone;
  6012. unsigned long flags;
  6013. /* Calculate total number of !ZONE_HIGHMEM pages */
  6014. for_each_zone(zone) {
  6015. if (!is_highmem(zone))
  6016. lowmem_pages += zone->managed_pages;
  6017. }
  6018. for_each_zone(zone) {
  6019. u64 tmp;
  6020. spin_lock_irqsave(&zone->lock, flags);
  6021. tmp = (u64)pages_min * zone->managed_pages;
  6022. do_div(tmp, lowmem_pages);
  6023. if (is_highmem(zone)) {
  6024. /*
  6025. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  6026. * need highmem pages, so cap pages_min to a small
  6027. * value here.
  6028. *
  6029. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  6030. * deltas control asynch page reclaim, and so should
  6031. * not be capped for highmem.
  6032. */
  6033. unsigned long min_pages;
  6034. min_pages = zone->managed_pages / 1024;
  6035. min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
  6036. zone->watermark[WMARK_MIN] = min_pages;
  6037. } else {
  6038. /*
  6039. * If it's a lowmem zone, reserve a number of pages
  6040. * proportionate to the zone's size.
  6041. */
  6042. zone->watermark[WMARK_MIN] = tmp;
  6043. }
  6044. /*
  6045. * Set the kswapd watermarks distance according to the
  6046. * scale factor in proportion to available memory, but
  6047. * ensure a minimum size on small systems.
  6048. */
  6049. tmp = max_t(u64, tmp >> 2,
  6050. mult_frac(zone->managed_pages,
  6051. watermark_scale_factor, 10000));
  6052. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
  6053. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
  6054. spin_unlock_irqrestore(&zone->lock, flags);
  6055. }
  6056. /* update totalreserve_pages */
  6057. calculate_totalreserve_pages();
  6058. }
  6059. /**
  6060. * setup_per_zone_wmarks - called when min_free_kbytes changes
  6061. * or when memory is hot-{added|removed}
  6062. *
  6063. * Ensures that the watermark[min,low,high] values for each zone are set
  6064. * correctly with respect to min_free_kbytes.
  6065. */
  6066. void setup_per_zone_wmarks(void)
  6067. {
  6068. static DEFINE_SPINLOCK(lock);
  6069. spin_lock(&lock);
  6070. __setup_per_zone_wmarks();
  6071. spin_unlock(&lock);
  6072. }
  6073. /*
  6074. * Initialise min_free_kbytes.
  6075. *
  6076. * For small machines we want it small (128k min). For large machines
  6077. * we want it large (64MB max). But it is not linear, because network
  6078. * bandwidth does not increase linearly with machine size. We use
  6079. *
  6080. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  6081. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  6082. *
  6083. * which yields
  6084. *
  6085. * 16MB: 512k
  6086. * 32MB: 724k
  6087. * 64MB: 1024k
  6088. * 128MB: 1448k
  6089. * 256MB: 2048k
  6090. * 512MB: 2896k
  6091. * 1024MB: 4096k
  6092. * 2048MB: 5792k
  6093. * 4096MB: 8192k
  6094. * 8192MB: 11584k
  6095. * 16384MB: 16384k
  6096. */
  6097. int __meminit init_per_zone_wmark_min(void)
  6098. {
  6099. unsigned long lowmem_kbytes;
  6100. int new_min_free_kbytes;
  6101. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  6102. new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  6103. if (new_min_free_kbytes > user_min_free_kbytes) {
  6104. min_free_kbytes = new_min_free_kbytes;
  6105. if (min_free_kbytes < 128)
  6106. min_free_kbytes = 128;
  6107. if (min_free_kbytes > 65536)
  6108. min_free_kbytes = 65536;
  6109. } else {
  6110. pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
  6111. new_min_free_kbytes, user_min_free_kbytes);
  6112. }
  6113. setup_per_zone_wmarks();
  6114. refresh_zone_stat_thresholds();
  6115. setup_per_zone_lowmem_reserve();
  6116. #ifdef CONFIG_NUMA
  6117. setup_min_unmapped_ratio();
  6118. setup_min_slab_ratio();
  6119. #endif
  6120. return 0;
  6121. }
  6122. core_initcall(init_per_zone_wmark_min)
  6123. /*
  6124. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  6125. * that we can call two helper functions whenever min_free_kbytes
  6126. * changes.
  6127. */
  6128. int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
  6129. void __user *buffer, size_t *length, loff_t *ppos)
  6130. {
  6131. int rc;
  6132. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6133. if (rc)
  6134. return rc;
  6135. if (write) {
  6136. user_min_free_kbytes = min_free_kbytes;
  6137. setup_per_zone_wmarks();
  6138. }
  6139. return 0;
  6140. }
  6141. int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
  6142. void __user *buffer, size_t *length, loff_t *ppos)
  6143. {
  6144. int rc;
  6145. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6146. if (rc)
  6147. return rc;
  6148. if (write)
  6149. setup_per_zone_wmarks();
  6150. return 0;
  6151. }
  6152. #ifdef CONFIG_NUMA
  6153. static void setup_min_unmapped_ratio(void)
  6154. {
  6155. pg_data_t *pgdat;
  6156. struct zone *zone;
  6157. for_each_online_pgdat(pgdat)
  6158. pgdat->min_unmapped_pages = 0;
  6159. for_each_zone(zone)
  6160. zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
  6161. sysctl_min_unmapped_ratio) / 100;
  6162. }
  6163. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
  6164. void __user *buffer, size_t *length, loff_t *ppos)
  6165. {
  6166. int rc;
  6167. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6168. if (rc)
  6169. return rc;
  6170. setup_min_unmapped_ratio();
  6171. return 0;
  6172. }
  6173. static void setup_min_slab_ratio(void)
  6174. {
  6175. pg_data_t *pgdat;
  6176. struct zone *zone;
  6177. for_each_online_pgdat(pgdat)
  6178. pgdat->min_slab_pages = 0;
  6179. for_each_zone(zone)
  6180. zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
  6181. sysctl_min_slab_ratio) / 100;
  6182. }
  6183. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
  6184. void __user *buffer, size_t *length, loff_t *ppos)
  6185. {
  6186. int rc;
  6187. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6188. if (rc)
  6189. return rc;
  6190. setup_min_slab_ratio();
  6191. return 0;
  6192. }
  6193. #endif
  6194. /*
  6195. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  6196. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  6197. * whenever sysctl_lowmem_reserve_ratio changes.
  6198. *
  6199. * The reserve ratio obviously has absolutely no relation with the
  6200. * minimum watermarks. The lowmem reserve ratio can only make sense
  6201. * if in function of the boot time zone sizes.
  6202. */
  6203. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
  6204. void __user *buffer, size_t *length, loff_t *ppos)
  6205. {
  6206. proc_dointvec_minmax(table, write, buffer, length, ppos);
  6207. setup_per_zone_lowmem_reserve();
  6208. return 0;
  6209. }
  6210. /*
  6211. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  6212. * cpu. It is the fraction of total pages in each zone that a hot per cpu
  6213. * pagelist can have before it gets flushed back to buddy allocator.
  6214. */
  6215. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
  6216. void __user *buffer, size_t *length, loff_t *ppos)
  6217. {
  6218. struct zone *zone;
  6219. int old_percpu_pagelist_fraction;
  6220. int ret;
  6221. mutex_lock(&pcp_batch_high_lock);
  6222. old_percpu_pagelist_fraction = percpu_pagelist_fraction;
  6223. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6224. if (!write || ret < 0)
  6225. goto out;
  6226. /* Sanity checking to avoid pcp imbalance */
  6227. if (percpu_pagelist_fraction &&
  6228. percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
  6229. percpu_pagelist_fraction = old_percpu_pagelist_fraction;
  6230. ret = -EINVAL;
  6231. goto out;
  6232. }
  6233. /* No change? */
  6234. if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
  6235. goto out;
  6236. for_each_populated_zone(zone) {
  6237. unsigned int cpu;
  6238. for_each_possible_cpu(cpu)
  6239. pageset_set_high_and_batch(zone,
  6240. per_cpu_ptr(zone->pageset, cpu));
  6241. }
  6242. out:
  6243. mutex_unlock(&pcp_batch_high_lock);
  6244. return ret;
  6245. }
  6246. #ifdef CONFIG_NUMA
  6247. int hashdist = HASHDIST_DEFAULT;
  6248. static int __init set_hashdist(char *str)
  6249. {
  6250. if (!str)
  6251. return 0;
  6252. hashdist = simple_strtoul(str, &str, 0);
  6253. return 1;
  6254. }
  6255. __setup("hashdist=", set_hashdist);
  6256. #endif
  6257. #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
  6258. /*
  6259. * Returns the number of pages that arch has reserved but
  6260. * is not known to alloc_large_system_hash().
  6261. */
  6262. static unsigned long __init arch_reserved_kernel_pages(void)
  6263. {
  6264. return 0;
  6265. }
  6266. #endif
  6267. /*
  6268. * Adaptive scale is meant to reduce sizes of hash tables on large memory
  6269. * machines. As memory size is increased the scale is also increased but at
  6270. * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
  6271. * quadruples the scale is increased by one, which means the size of hash table
  6272. * only doubles, instead of quadrupling as well.
  6273. * Because 32-bit systems cannot have large physical memory, where this scaling
  6274. * makes sense, it is disabled on such platforms.
  6275. */
  6276. #if __BITS_PER_LONG > 32
  6277. #define ADAPT_SCALE_BASE (64ul << 30)
  6278. #define ADAPT_SCALE_SHIFT 2
  6279. #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
  6280. #endif
  6281. /*
  6282. * allocate a large system hash table from bootmem
  6283. * - it is assumed that the hash table must contain an exact power-of-2
  6284. * quantity of entries
  6285. * - limit is the number of hash buckets, not the total allocation size
  6286. */
  6287. void *__init alloc_large_system_hash(const char *tablename,
  6288. unsigned long bucketsize,
  6289. unsigned long numentries,
  6290. int scale,
  6291. int flags,
  6292. unsigned int *_hash_shift,
  6293. unsigned int *_hash_mask,
  6294. unsigned long low_limit,
  6295. unsigned long high_limit)
  6296. {
  6297. unsigned long long max = high_limit;
  6298. unsigned long log2qty, size;
  6299. void *table = NULL;
  6300. gfp_t gfp_flags;
  6301. /* allow the kernel cmdline to have a say */
  6302. if (!numentries) {
  6303. /* round applicable memory size up to nearest megabyte */
  6304. numentries = nr_kernel_pages;
  6305. numentries -= arch_reserved_kernel_pages();
  6306. /* It isn't necessary when PAGE_SIZE >= 1MB */
  6307. if (PAGE_SHIFT < 20)
  6308. numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
  6309. #if __BITS_PER_LONG > 32
  6310. if (!high_limit) {
  6311. unsigned long adapt;
  6312. for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
  6313. adapt <<= ADAPT_SCALE_SHIFT)
  6314. scale++;
  6315. }
  6316. #endif
  6317. /* limit to 1 bucket per 2^scale bytes of low memory */
  6318. if (scale > PAGE_SHIFT)
  6319. numentries >>= (scale - PAGE_SHIFT);
  6320. else
  6321. numentries <<= (PAGE_SHIFT - scale);
  6322. /* Make sure we've got at least a 0-order allocation.. */
  6323. if (unlikely(flags & HASH_SMALL)) {
  6324. /* Makes no sense without HASH_EARLY */
  6325. WARN_ON(!(flags & HASH_EARLY));
  6326. if (!(numentries >> *_hash_shift)) {
  6327. numentries = 1UL << *_hash_shift;
  6328. BUG_ON(!numentries);
  6329. }
  6330. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  6331. numentries = PAGE_SIZE / bucketsize;
  6332. }
  6333. numentries = roundup_pow_of_two(numentries);
  6334. /* limit allocation size to 1/16 total memory by default */
  6335. if (max == 0) {
  6336. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  6337. do_div(max, bucketsize);
  6338. }
  6339. max = min(max, 0x80000000ULL);
  6340. if (numentries < low_limit)
  6341. numentries = low_limit;
  6342. if (numentries > max)
  6343. numentries = max;
  6344. log2qty = ilog2(numentries);
  6345. gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
  6346. do {
  6347. size = bucketsize << log2qty;
  6348. if (flags & HASH_EARLY) {
  6349. if (flags & HASH_ZERO)
  6350. table = memblock_virt_alloc_nopanic(size, 0);
  6351. else
  6352. table = memblock_virt_alloc_raw(size, 0);
  6353. } else if (hashdist) {
  6354. table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  6355. } else {
  6356. /*
  6357. * If bucketsize is not a power-of-two, we may free
  6358. * some pages at the end of hash table which
  6359. * alloc_pages_exact() automatically does
  6360. */
  6361. if (get_order(size) < MAX_ORDER) {
  6362. table = alloc_pages_exact(size, gfp_flags);
  6363. kmemleak_alloc(table, size, 1, gfp_flags);
  6364. }
  6365. }
  6366. } while (!table && size > PAGE_SIZE && --log2qty);
  6367. if (!table)
  6368. panic("Failed to allocate %s hash table\n", tablename);
  6369. pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
  6370. tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
  6371. if (_hash_shift)
  6372. *_hash_shift = log2qty;
  6373. if (_hash_mask)
  6374. *_hash_mask = (1 << log2qty) - 1;
  6375. return table;
  6376. }
  6377. /*
  6378. * This function checks whether pageblock includes unmovable pages or not.
  6379. * If @count is not zero, it is okay to include less @count unmovable pages
  6380. *
  6381. * PageLRU check without isolation or lru_lock could race so that
  6382. * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
  6383. * check without lock_page also may miss some movable non-lru pages at
  6384. * race condition. So you can't expect this function should be exact.
  6385. */
  6386. bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
  6387. int migratetype,
  6388. bool skip_hwpoisoned_pages)
  6389. {
  6390. unsigned long pfn, iter, found;
  6391. /*
  6392. * For avoiding noise data, lru_add_drain_all() should be called
  6393. * If ZONE_MOVABLE, the zone never contains unmovable pages
  6394. */
  6395. if (zone_idx(zone) == ZONE_MOVABLE)
  6396. return false;
  6397. /*
  6398. * CMA allocations (alloc_contig_range) really need to mark isolate
  6399. * CMA pageblocks even when they are not movable in fact so consider
  6400. * them movable here.
  6401. */
  6402. if (is_migrate_cma(migratetype) &&
  6403. is_migrate_cma(get_pageblock_migratetype(page)))
  6404. return false;
  6405. pfn = page_to_pfn(page);
  6406. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  6407. unsigned long check = pfn + iter;
  6408. if (!pfn_valid_within(check))
  6409. continue;
  6410. page = pfn_to_page(check);
  6411. if (PageReserved(page))
  6412. return true;
  6413. /*
  6414. * Hugepages are not in LRU lists, but they're movable.
  6415. * We need not scan over tail pages bacause we don't
  6416. * handle each tail page individually in migration.
  6417. */
  6418. if (PageHuge(page)) {
  6419. iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
  6420. continue;
  6421. }
  6422. /*
  6423. * We can't use page_count without pin a page
  6424. * because another CPU can free compound page.
  6425. * This check already skips compound tails of THP
  6426. * because their page->_refcount is zero at all time.
  6427. */
  6428. if (!page_ref_count(page)) {
  6429. if (PageBuddy(page))
  6430. iter += (1 << page_order(page)) - 1;
  6431. continue;
  6432. }
  6433. /*
  6434. * The HWPoisoned page may be not in buddy system, and
  6435. * page_count() is not 0.
  6436. */
  6437. if (skip_hwpoisoned_pages && PageHWPoison(page))
  6438. continue;
  6439. if (__PageMovable(page))
  6440. continue;
  6441. if (!PageLRU(page))
  6442. found++;
  6443. /*
  6444. * If there are RECLAIMABLE pages, we need to check
  6445. * it. But now, memory offline itself doesn't call
  6446. * shrink_node_slabs() and it still to be fixed.
  6447. */
  6448. /*
  6449. * If the page is not RAM, page_count()should be 0.
  6450. * we don't need more check. This is an _used_ not-movable page.
  6451. *
  6452. * The problematic thing here is PG_reserved pages. PG_reserved
  6453. * is set to both of a memory hole page and a _used_ kernel
  6454. * page at boot.
  6455. */
  6456. if (found > count)
  6457. return true;
  6458. }
  6459. return false;
  6460. }
  6461. bool is_pageblock_removable_nolock(struct page *page)
  6462. {
  6463. struct zone *zone;
  6464. unsigned long pfn;
  6465. /*
  6466. * We have to be careful here because we are iterating over memory
  6467. * sections which are not zone aware so we might end up outside of
  6468. * the zone but still within the section.
  6469. * We have to take care about the node as well. If the node is offline
  6470. * its NODE_DATA will be NULL - see page_zone.
  6471. */
  6472. if (!node_online(page_to_nid(page)))
  6473. return false;
  6474. zone = page_zone(page);
  6475. pfn = page_to_pfn(page);
  6476. if (!zone_spans_pfn(zone, pfn))
  6477. return false;
  6478. return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
  6479. }
  6480. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  6481. static unsigned long pfn_max_align_down(unsigned long pfn)
  6482. {
  6483. return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
  6484. pageblock_nr_pages) - 1);
  6485. }
  6486. static unsigned long pfn_max_align_up(unsigned long pfn)
  6487. {
  6488. return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
  6489. pageblock_nr_pages));
  6490. }
  6491. /* [start, end) must belong to a single zone. */
  6492. static int __alloc_contig_migrate_range(struct compact_control *cc,
  6493. unsigned long start, unsigned long end)
  6494. {
  6495. /* This function is based on compact_zone() from compaction.c. */
  6496. unsigned long nr_reclaimed;
  6497. unsigned long pfn = start;
  6498. unsigned int tries = 0;
  6499. int ret = 0;
  6500. migrate_prep();
  6501. while (pfn < end || !list_empty(&cc->migratepages)) {
  6502. if (fatal_signal_pending(current)) {
  6503. ret = -EINTR;
  6504. break;
  6505. }
  6506. if (list_empty(&cc->migratepages)) {
  6507. cc->nr_migratepages = 0;
  6508. pfn = isolate_migratepages_range(cc, pfn, end);
  6509. if (!pfn) {
  6510. ret = -EINTR;
  6511. break;
  6512. }
  6513. tries = 0;
  6514. } else if (++tries == 5) {
  6515. ret = ret < 0 ? ret : -EBUSY;
  6516. break;
  6517. }
  6518. nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
  6519. &cc->migratepages);
  6520. cc->nr_migratepages -= nr_reclaimed;
  6521. ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
  6522. NULL, 0, cc->mode, MR_CMA);
  6523. }
  6524. if (ret < 0) {
  6525. putback_movable_pages(&cc->migratepages);
  6526. return ret;
  6527. }
  6528. return 0;
  6529. }
  6530. /**
  6531. * alloc_contig_range() -- tries to allocate given range of pages
  6532. * @start: start PFN to allocate
  6533. * @end: one-past-the-last PFN to allocate
  6534. * @migratetype: migratetype of the underlaying pageblocks (either
  6535. * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
  6536. * in range must have the same migratetype and it must
  6537. * be either of the two.
  6538. * @gfp_mask: GFP mask to use during compaction
  6539. *
  6540. * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
  6541. * aligned, however it's the caller's responsibility to guarantee that
  6542. * we are the only thread that changes migrate type of pageblocks the
  6543. * pages fall in.
  6544. *
  6545. * The PFN range must belong to a single zone.
  6546. *
  6547. * Returns zero on success or negative error code. On success all
  6548. * pages which PFN is in [start, end) are allocated for the caller and
  6549. * need to be freed with free_contig_range().
  6550. */
  6551. int alloc_contig_range(unsigned long start, unsigned long end,
  6552. unsigned migratetype, gfp_t gfp_mask)
  6553. {
  6554. unsigned long outer_start, outer_end;
  6555. unsigned int order;
  6556. int ret = 0;
  6557. struct compact_control cc = {
  6558. .nr_migratepages = 0,
  6559. .order = -1,
  6560. .zone = page_zone(pfn_to_page(start)),
  6561. .mode = MIGRATE_SYNC,
  6562. .ignore_skip_hint = true,
  6563. .no_set_skip_hint = true,
  6564. .gfp_mask = current_gfp_context(gfp_mask),
  6565. };
  6566. INIT_LIST_HEAD(&cc.migratepages);
  6567. /*
  6568. * What we do here is we mark all pageblocks in range as
  6569. * MIGRATE_ISOLATE. Because pageblock and max order pages may
  6570. * have different sizes, and due to the way page allocator
  6571. * work, we align the range to biggest of the two pages so
  6572. * that page allocator won't try to merge buddies from
  6573. * different pageblocks and change MIGRATE_ISOLATE to some
  6574. * other migration type.
  6575. *
  6576. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  6577. * migrate the pages from an unaligned range (ie. pages that
  6578. * we are interested in). This will put all the pages in
  6579. * range back to page allocator as MIGRATE_ISOLATE.
  6580. *
  6581. * When this is done, we take the pages in range from page
  6582. * allocator removing them from the buddy system. This way
  6583. * page allocator will never consider using them.
  6584. *
  6585. * This lets us mark the pageblocks back as
  6586. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  6587. * aligned range but not in the unaligned, original range are
  6588. * put back to page allocator so that buddy can use them.
  6589. */
  6590. ret = start_isolate_page_range(pfn_max_align_down(start),
  6591. pfn_max_align_up(end), migratetype,
  6592. false);
  6593. if (ret)
  6594. return ret;
  6595. /*
  6596. * In case of -EBUSY, we'd like to know which page causes problem.
  6597. * So, just fall through. test_pages_isolated() has a tracepoint
  6598. * which will report the busy page.
  6599. *
  6600. * It is possible that busy pages could become available before
  6601. * the call to test_pages_isolated, and the range will actually be
  6602. * allocated. So, if we fall through be sure to clear ret so that
  6603. * -EBUSY is not accidentally used or returned to caller.
  6604. */
  6605. ret = __alloc_contig_migrate_range(&cc, start, end);
  6606. if (ret && ret != -EBUSY)
  6607. goto done;
  6608. ret =0;
  6609. /*
  6610. * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
  6611. * aligned blocks that are marked as MIGRATE_ISOLATE. What's
  6612. * more, all pages in [start, end) are free in page allocator.
  6613. * What we are going to do is to allocate all pages from
  6614. * [start, end) (that is remove them from page allocator).
  6615. *
  6616. * The only problem is that pages at the beginning and at the
  6617. * end of interesting range may be not aligned with pages that
  6618. * page allocator holds, ie. they can be part of higher order
  6619. * pages. Because of this, we reserve the bigger range and
  6620. * once this is done free the pages we are not interested in.
  6621. *
  6622. * We don't have to hold zone->lock here because the pages are
  6623. * isolated thus they won't get removed from buddy.
  6624. */
  6625. lru_add_drain_all();
  6626. drain_all_pages(cc.zone);
  6627. order = 0;
  6628. outer_start = start;
  6629. while (!PageBuddy(pfn_to_page(outer_start))) {
  6630. if (++order >= MAX_ORDER) {
  6631. outer_start = start;
  6632. break;
  6633. }
  6634. outer_start &= ~0UL << order;
  6635. }
  6636. if (outer_start != start) {
  6637. order = page_order(pfn_to_page(outer_start));
  6638. /*
  6639. * outer_start page could be small order buddy page and
  6640. * it doesn't include start page. Adjust outer_start
  6641. * in this case to report failed page properly
  6642. * on tracepoint in test_pages_isolated()
  6643. */
  6644. if (outer_start + (1UL << order) <= start)
  6645. outer_start = start;
  6646. }
  6647. /* Make sure the range is really isolated. */
  6648. if (test_pages_isolated(outer_start, end, false)) {
  6649. pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
  6650. __func__, outer_start, end);
  6651. ret = -EBUSY;
  6652. goto done;
  6653. }
  6654. /* Grab isolated pages from freelists. */
  6655. outer_end = isolate_freepages_range(&cc, outer_start, end);
  6656. if (!outer_end) {
  6657. ret = -EBUSY;
  6658. goto done;
  6659. }
  6660. /* Free head and tail (if any) */
  6661. if (start != outer_start)
  6662. free_contig_range(outer_start, start - outer_start);
  6663. if (end != outer_end)
  6664. free_contig_range(end, outer_end - end);
  6665. done:
  6666. undo_isolate_page_range(pfn_max_align_down(start),
  6667. pfn_max_align_up(end), migratetype);
  6668. return ret;
  6669. }
  6670. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  6671. {
  6672. unsigned int count = 0;
  6673. for (; nr_pages--; pfn++) {
  6674. struct page *page = pfn_to_page(pfn);
  6675. count += page_count(page) != 1;
  6676. __free_page(page);
  6677. }
  6678. WARN(count != 0, "%d pages are still in use!\n", count);
  6679. }
  6680. #endif
  6681. #ifdef CONFIG_MEMORY_HOTPLUG
  6682. /*
  6683. * The zone indicated has a new number of managed_pages; batch sizes and percpu
  6684. * page high values need to be recalulated.
  6685. */
  6686. void __meminit zone_pcp_update(struct zone *zone)
  6687. {
  6688. unsigned cpu;
  6689. mutex_lock(&pcp_batch_high_lock);
  6690. for_each_possible_cpu(cpu)
  6691. pageset_set_high_and_batch(zone,
  6692. per_cpu_ptr(zone->pageset, cpu));
  6693. mutex_unlock(&pcp_batch_high_lock);
  6694. }
  6695. #endif
  6696. void zone_pcp_reset(struct zone *zone)
  6697. {
  6698. unsigned long flags;
  6699. int cpu;
  6700. struct per_cpu_pageset *pset;
  6701. /* avoid races with drain_pages() */
  6702. local_irq_save(flags);
  6703. if (zone->pageset != &boot_pageset) {
  6704. for_each_online_cpu(cpu) {
  6705. pset = per_cpu_ptr(zone->pageset, cpu);
  6706. drain_zonestat(zone, pset);
  6707. }
  6708. free_percpu(zone->pageset);
  6709. zone->pageset = &boot_pageset;
  6710. }
  6711. local_irq_restore(flags);
  6712. }
  6713. #ifdef CONFIG_MEMORY_HOTREMOVE
  6714. /*
  6715. * All pages in the range must be in a single zone and isolated
  6716. * before calling this.
  6717. */
  6718. void
  6719. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  6720. {
  6721. struct page *page;
  6722. struct zone *zone;
  6723. unsigned int order, i;
  6724. unsigned long pfn;
  6725. unsigned long flags;
  6726. /* find the first valid pfn */
  6727. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  6728. if (pfn_valid(pfn))
  6729. break;
  6730. if (pfn == end_pfn)
  6731. return;
  6732. offline_mem_sections(pfn, end_pfn);
  6733. zone = page_zone(pfn_to_page(pfn));
  6734. spin_lock_irqsave(&zone->lock, flags);
  6735. pfn = start_pfn;
  6736. while (pfn < end_pfn) {
  6737. if (!pfn_valid(pfn)) {
  6738. pfn++;
  6739. continue;
  6740. }
  6741. page = pfn_to_page(pfn);
  6742. /*
  6743. * The HWPoisoned page may be not in buddy system, and
  6744. * page_count() is not 0.
  6745. */
  6746. if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
  6747. pfn++;
  6748. SetPageReserved(page);
  6749. continue;
  6750. }
  6751. BUG_ON(page_count(page));
  6752. BUG_ON(!PageBuddy(page));
  6753. order = page_order(page);
  6754. #ifdef CONFIG_DEBUG_VM
  6755. pr_info("remove from free list %lx %d %lx\n",
  6756. pfn, 1 << order, end_pfn);
  6757. #endif
  6758. list_del(&page->lru);
  6759. rmv_page_order(page);
  6760. zone->free_area[order].nr_free--;
  6761. for (i = 0; i < (1 << order); i++)
  6762. SetPageReserved((page+i));
  6763. pfn += (1 << order);
  6764. }
  6765. spin_unlock_irqrestore(&zone->lock, flags);
  6766. }
  6767. #endif
  6768. bool is_free_buddy_page(struct page *page)
  6769. {
  6770. struct zone *zone = page_zone(page);
  6771. unsigned long pfn = page_to_pfn(page);
  6772. unsigned long flags;
  6773. unsigned int order;
  6774. spin_lock_irqsave(&zone->lock, flags);
  6775. for (order = 0; order < MAX_ORDER; order++) {
  6776. struct page *page_head = page - (pfn & ((1 << order) - 1));
  6777. if (PageBuddy(page_head) && page_order(page_head) >= order)
  6778. break;
  6779. }
  6780. spin_unlock_irqrestore(&zone->lock, flags);
  6781. return order < MAX_ORDER;
  6782. }