page_alloc.c 182 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kmemcheck.h>
  27. #include <linux/module.h>
  28. #include <linux/suspend.h>
  29. #include <linux/pagevec.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/slab.h>
  32. #include <linux/ratelimit.h>
  33. #include <linux/oom.h>
  34. #include <linux/notifier.h>
  35. #include <linux/topology.h>
  36. #include <linux/sysctl.h>
  37. #include <linux/cpu.h>
  38. #include <linux/cpuset.h>
  39. #include <linux/memory_hotplug.h>
  40. #include <linux/nodemask.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/vmstat.h>
  43. #include <linux/mempolicy.h>
  44. #include <linux/stop_machine.h>
  45. #include <linux/sort.h>
  46. #include <linux/pfn.h>
  47. #include <linux/backing-dev.h>
  48. #include <linux/fault-inject.h>
  49. #include <linux/page-isolation.h>
  50. #include <linux/page_cgroup.h>
  51. #include <linux/debugobjects.h>
  52. #include <linux/kmemleak.h>
  53. #include <linux/compaction.h>
  54. #include <trace/events/kmem.h>
  55. #include <linux/prefetch.h>
  56. #include <linux/mm_inline.h>
  57. #include <linux/migrate.h>
  58. #include <linux/page-debug-flags.h>
  59. #include <linux/hugetlb.h>
  60. #include <linux/sched/rt.h>
  61. #include <asm/sections.h>
  62. #include <asm/tlbflush.h>
  63. #include <asm/div64.h>
  64. #include "internal.h"
  65. /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  66. static DEFINE_MUTEX(pcp_batch_high_lock);
  67. #define MIN_PERCPU_PAGELIST_FRACTION (8)
  68. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  69. DEFINE_PER_CPU(int, numa_node);
  70. EXPORT_PER_CPU_SYMBOL(numa_node);
  71. #endif
  72. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  73. /*
  74. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  75. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  76. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  77. * defined in <linux/topology.h>.
  78. */
  79. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  80. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  81. int _node_numa_mem_[MAX_NUMNODES];
  82. #endif
  83. /*
  84. * Array of node states.
  85. */
  86. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  87. [N_POSSIBLE] = NODE_MASK_ALL,
  88. [N_ONLINE] = { { [0] = 1UL } },
  89. #ifndef CONFIG_NUMA
  90. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  91. #ifdef CONFIG_HIGHMEM
  92. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  93. #endif
  94. #ifdef CONFIG_MOVABLE_NODE
  95. [N_MEMORY] = { { [0] = 1UL } },
  96. #endif
  97. [N_CPU] = { { [0] = 1UL } },
  98. #endif /* NUMA */
  99. };
  100. EXPORT_SYMBOL(node_states);
  101. /* Protect totalram_pages and zone->managed_pages */
  102. static DEFINE_SPINLOCK(managed_page_count_lock);
  103. unsigned long totalram_pages __read_mostly;
  104. unsigned long totalreserve_pages __read_mostly;
  105. /*
  106. * When calculating the number of globally allowed dirty pages, there
  107. * is a certain number of per-zone reserves that should not be
  108. * considered dirtyable memory. This is the sum of those reserves
  109. * over all existing zones that contribute dirtyable memory.
  110. */
  111. unsigned long dirty_balance_reserve __read_mostly;
  112. int percpu_pagelist_fraction;
  113. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  114. #ifdef CONFIG_PM_SLEEP
  115. /*
  116. * The following functions are used by the suspend/hibernate code to temporarily
  117. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  118. * while devices are suspended. To avoid races with the suspend/hibernate code,
  119. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  120. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  121. * guaranteed not to run in parallel with that modification).
  122. */
  123. static gfp_t saved_gfp_mask;
  124. void pm_restore_gfp_mask(void)
  125. {
  126. WARN_ON(!mutex_is_locked(&pm_mutex));
  127. if (saved_gfp_mask) {
  128. gfp_allowed_mask = saved_gfp_mask;
  129. saved_gfp_mask = 0;
  130. }
  131. }
  132. void pm_restrict_gfp_mask(void)
  133. {
  134. WARN_ON(!mutex_is_locked(&pm_mutex));
  135. WARN_ON(saved_gfp_mask);
  136. saved_gfp_mask = gfp_allowed_mask;
  137. gfp_allowed_mask &= ~GFP_IOFS;
  138. }
  139. bool pm_suspended_storage(void)
  140. {
  141. if ((gfp_allowed_mask & GFP_IOFS) == GFP_IOFS)
  142. return false;
  143. return true;
  144. }
  145. #endif /* CONFIG_PM_SLEEP */
  146. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  147. int pageblock_order __read_mostly;
  148. #endif
  149. static void __free_pages_ok(struct page *page, unsigned int order);
  150. /*
  151. * results with 256, 32 in the lowmem_reserve sysctl:
  152. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  153. * 1G machine -> (16M dma, 784M normal, 224M high)
  154. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  155. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  156. * HIGHMEM allocation will (224M+784M)/256 of ram reserved in ZONE_DMA
  157. *
  158. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  159. * don't need any ZONE_NORMAL reservation
  160. */
  161. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  162. #ifdef CONFIG_ZONE_DMA
  163. 256,
  164. #endif
  165. #ifdef CONFIG_ZONE_DMA32
  166. 256,
  167. #endif
  168. #ifdef CONFIG_HIGHMEM
  169. 32,
  170. #endif
  171. 32,
  172. };
  173. EXPORT_SYMBOL(totalram_pages);
  174. static char * const zone_names[MAX_NR_ZONES] = {
  175. #ifdef CONFIG_ZONE_DMA
  176. "DMA",
  177. #endif
  178. #ifdef CONFIG_ZONE_DMA32
  179. "DMA32",
  180. #endif
  181. "Normal",
  182. #ifdef CONFIG_HIGHMEM
  183. "HighMem",
  184. #endif
  185. "Movable",
  186. };
  187. int min_free_kbytes = 1024;
  188. int user_min_free_kbytes = -1;
  189. static unsigned long __meminitdata nr_kernel_pages;
  190. static unsigned long __meminitdata nr_all_pages;
  191. static unsigned long __meminitdata dma_reserve;
  192. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  193. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  194. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  195. static unsigned long __initdata required_kernelcore;
  196. static unsigned long __initdata required_movablecore;
  197. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  198. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  199. int movable_zone;
  200. EXPORT_SYMBOL(movable_zone);
  201. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  202. #if MAX_NUMNODES > 1
  203. int nr_node_ids __read_mostly = MAX_NUMNODES;
  204. int nr_online_nodes __read_mostly = 1;
  205. EXPORT_SYMBOL(nr_node_ids);
  206. EXPORT_SYMBOL(nr_online_nodes);
  207. #endif
  208. int page_group_by_mobility_disabled __read_mostly;
  209. void set_pageblock_migratetype(struct page *page, int migratetype)
  210. {
  211. if (unlikely(page_group_by_mobility_disabled &&
  212. migratetype < MIGRATE_PCPTYPES))
  213. migratetype = MIGRATE_UNMOVABLE;
  214. set_pageblock_flags_group(page, (unsigned long)migratetype,
  215. PB_migrate, PB_migrate_end);
  216. }
  217. bool oom_killer_disabled __read_mostly;
  218. #ifdef CONFIG_DEBUG_VM
  219. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  220. {
  221. int ret = 0;
  222. unsigned seq;
  223. unsigned long pfn = page_to_pfn(page);
  224. unsigned long sp, start_pfn;
  225. do {
  226. seq = zone_span_seqbegin(zone);
  227. start_pfn = zone->zone_start_pfn;
  228. sp = zone->spanned_pages;
  229. if (!zone_spans_pfn(zone, pfn))
  230. ret = 1;
  231. } while (zone_span_seqretry(zone, seq));
  232. if (ret)
  233. pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
  234. pfn, zone_to_nid(zone), zone->name,
  235. start_pfn, start_pfn + sp);
  236. return ret;
  237. }
  238. static int page_is_consistent(struct zone *zone, struct page *page)
  239. {
  240. if (!pfn_valid_within(page_to_pfn(page)))
  241. return 0;
  242. if (zone != page_zone(page))
  243. return 0;
  244. return 1;
  245. }
  246. /*
  247. * Temporary debugging check for pages not lying within a given zone.
  248. */
  249. static int bad_range(struct zone *zone, struct page *page)
  250. {
  251. if (page_outside_zone_boundaries(zone, page))
  252. return 1;
  253. if (!page_is_consistent(zone, page))
  254. return 1;
  255. return 0;
  256. }
  257. #else
  258. static inline int bad_range(struct zone *zone, struct page *page)
  259. {
  260. return 0;
  261. }
  262. #endif
  263. static void bad_page(struct page *page, const char *reason,
  264. unsigned long bad_flags)
  265. {
  266. static unsigned long resume;
  267. static unsigned long nr_shown;
  268. static unsigned long nr_unshown;
  269. /* Don't complain about poisoned pages */
  270. if (PageHWPoison(page)) {
  271. page_mapcount_reset(page); /* remove PageBuddy */
  272. return;
  273. }
  274. /*
  275. * Allow a burst of 60 reports, then keep quiet for that minute;
  276. * or allow a steady drip of one report per second.
  277. */
  278. if (nr_shown == 60) {
  279. if (time_before(jiffies, resume)) {
  280. nr_unshown++;
  281. goto out;
  282. }
  283. if (nr_unshown) {
  284. printk(KERN_ALERT
  285. "BUG: Bad page state: %lu messages suppressed\n",
  286. nr_unshown);
  287. nr_unshown = 0;
  288. }
  289. nr_shown = 0;
  290. }
  291. if (nr_shown++ == 0)
  292. resume = jiffies + 60 * HZ;
  293. printk(KERN_ALERT "BUG: Bad page state in process %s pfn:%05lx\n",
  294. current->comm, page_to_pfn(page));
  295. dump_page_badflags(page, reason, bad_flags);
  296. print_modules();
  297. dump_stack();
  298. out:
  299. /* Leave bad fields for debug, except PageBuddy could make trouble */
  300. page_mapcount_reset(page); /* remove PageBuddy */
  301. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  302. }
  303. /*
  304. * Higher-order pages are called "compound pages". They are structured thusly:
  305. *
  306. * The first PAGE_SIZE page is called the "head page".
  307. *
  308. * The remaining PAGE_SIZE pages are called "tail pages".
  309. *
  310. * All pages have PG_compound set. All tail pages have their ->first_page
  311. * pointing at the head page.
  312. *
  313. * The first tail page's ->lru.next holds the address of the compound page's
  314. * put_page() function. Its ->lru.prev holds the order of allocation.
  315. * This usage means that zero-order pages may not be compound.
  316. */
  317. static void free_compound_page(struct page *page)
  318. {
  319. __free_pages_ok(page, compound_order(page));
  320. }
  321. void prep_compound_page(struct page *page, unsigned long order)
  322. {
  323. int i;
  324. int nr_pages = 1 << order;
  325. set_compound_page_dtor(page, free_compound_page);
  326. set_compound_order(page, order);
  327. __SetPageHead(page);
  328. for (i = 1; i < nr_pages; i++) {
  329. struct page *p = page + i;
  330. set_page_count(p, 0);
  331. p->first_page = page;
  332. /* Make sure p->first_page is always valid for PageTail() */
  333. smp_wmb();
  334. __SetPageTail(p);
  335. }
  336. }
  337. /* update __split_huge_page_refcount if you change this function */
  338. static int destroy_compound_page(struct page *page, unsigned long order)
  339. {
  340. int i;
  341. int nr_pages = 1 << order;
  342. int bad = 0;
  343. if (unlikely(compound_order(page) != order)) {
  344. bad_page(page, "wrong compound order", 0);
  345. bad++;
  346. }
  347. __ClearPageHead(page);
  348. for (i = 1; i < nr_pages; i++) {
  349. struct page *p = page + i;
  350. if (unlikely(!PageTail(p))) {
  351. bad_page(page, "PageTail not set", 0);
  352. bad++;
  353. } else if (unlikely(p->first_page != page)) {
  354. bad_page(page, "first_page not consistent", 0);
  355. bad++;
  356. }
  357. __ClearPageTail(p);
  358. }
  359. return bad;
  360. }
  361. static inline void prep_zero_page(struct page *page, unsigned int order,
  362. gfp_t gfp_flags)
  363. {
  364. int i;
  365. /*
  366. * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
  367. * and __GFP_HIGHMEM from hard or soft interrupt context.
  368. */
  369. VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
  370. for (i = 0; i < (1 << order); i++)
  371. clear_highpage(page + i);
  372. }
  373. #ifdef CONFIG_DEBUG_PAGEALLOC
  374. unsigned int _debug_guardpage_minorder;
  375. static int __init debug_guardpage_minorder_setup(char *buf)
  376. {
  377. unsigned long res;
  378. if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
  379. printk(KERN_ERR "Bad debug_guardpage_minorder value\n");
  380. return 0;
  381. }
  382. _debug_guardpage_minorder = res;
  383. printk(KERN_INFO "Setting debug_guardpage_minorder to %lu\n", res);
  384. return 0;
  385. }
  386. __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
  387. static inline void set_page_guard_flag(struct page *page)
  388. {
  389. __set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  390. }
  391. static inline void clear_page_guard_flag(struct page *page)
  392. {
  393. __clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
  394. }
  395. #else
  396. static inline void set_page_guard_flag(struct page *page) { }
  397. static inline void clear_page_guard_flag(struct page *page) { }
  398. #endif
  399. static inline void set_page_order(struct page *page, unsigned int order)
  400. {
  401. set_page_private(page, order);
  402. __SetPageBuddy(page);
  403. }
  404. static inline void rmv_page_order(struct page *page)
  405. {
  406. __ClearPageBuddy(page);
  407. set_page_private(page, 0);
  408. }
  409. /*
  410. * This function checks whether a page is free && is the buddy
  411. * we can do coalesce a page and its buddy if
  412. * (a) the buddy is not in a hole &&
  413. * (b) the buddy is in the buddy system &&
  414. * (c) a page and its buddy have the same order &&
  415. * (d) a page and its buddy are in the same zone.
  416. *
  417. * For recording whether a page is in the buddy system, we set ->_mapcount
  418. * PAGE_BUDDY_MAPCOUNT_VALUE.
  419. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
  420. * serialized by zone->lock.
  421. *
  422. * For recording page's order, we use page_private(page).
  423. */
  424. static inline int page_is_buddy(struct page *page, struct page *buddy,
  425. unsigned int order)
  426. {
  427. if (!pfn_valid_within(page_to_pfn(buddy)))
  428. return 0;
  429. if (page_is_guard(buddy) && page_order(buddy) == order) {
  430. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  431. if (page_zone_id(page) != page_zone_id(buddy))
  432. return 0;
  433. return 1;
  434. }
  435. if (PageBuddy(buddy) && page_order(buddy) == order) {
  436. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  437. /*
  438. * zone check is done late to avoid uselessly
  439. * calculating zone/node ids for pages that could
  440. * never merge.
  441. */
  442. if (page_zone_id(page) != page_zone_id(buddy))
  443. return 0;
  444. return 1;
  445. }
  446. return 0;
  447. }
  448. /*
  449. * Freeing function for a buddy system allocator.
  450. *
  451. * The concept of a buddy system is to maintain direct-mapped table
  452. * (containing bit values) for memory blocks of various "orders".
  453. * The bottom level table contains the map for the smallest allocatable
  454. * units of memory (here, pages), and each level above it describes
  455. * pairs of units from the levels below, hence, "buddies".
  456. * At a high level, all that happens here is marking the table entry
  457. * at the bottom level available, and propagating the changes upward
  458. * as necessary, plus some accounting needed to play nicely with other
  459. * parts of the VM system.
  460. * At each level, we keep a list of pages, which are heads of continuous
  461. * free pages of length of (1 << order) and marked with _mapcount
  462. * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
  463. * field.
  464. * So when we are allocating or freeing one, we can derive the state of the
  465. * other. That is, if we allocate a small block, and both were
  466. * free, the remainder of the region must be split into blocks.
  467. * If a block is freed, and its buddy is also free, then this
  468. * triggers coalescing into a block of larger size.
  469. *
  470. * -- nyc
  471. */
  472. static inline void __free_one_page(struct page *page,
  473. unsigned long pfn,
  474. struct zone *zone, unsigned int order,
  475. int migratetype)
  476. {
  477. unsigned long page_idx;
  478. unsigned long combined_idx;
  479. unsigned long uninitialized_var(buddy_idx);
  480. struct page *buddy;
  481. int max_order = MAX_ORDER;
  482. VM_BUG_ON(!zone_is_initialized(zone));
  483. if (unlikely(PageCompound(page)))
  484. if (unlikely(destroy_compound_page(page, order)))
  485. return;
  486. VM_BUG_ON(migratetype == -1);
  487. if (is_migrate_isolate(migratetype)) {
  488. /*
  489. * We restrict max order of merging to prevent merge
  490. * between freepages on isolate pageblock and normal
  491. * pageblock. Without this, pageblock isolation
  492. * could cause incorrect freepage accounting.
  493. */
  494. max_order = min(MAX_ORDER, pageblock_order + 1);
  495. } else {
  496. __mod_zone_freepage_state(zone, 1 << order, migratetype);
  497. }
  498. page_idx = pfn & ((1 << max_order) - 1);
  499. VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
  500. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  501. while (order < max_order - 1) {
  502. buddy_idx = __find_buddy_index(page_idx, order);
  503. buddy = page + (buddy_idx - page_idx);
  504. if (!page_is_buddy(page, buddy, order))
  505. break;
  506. /*
  507. * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  508. * merge with it and move up one order.
  509. */
  510. if (page_is_guard(buddy)) {
  511. clear_page_guard_flag(buddy);
  512. set_page_private(buddy, 0);
  513. if (!is_migrate_isolate(migratetype)) {
  514. __mod_zone_freepage_state(zone, 1 << order,
  515. migratetype);
  516. }
  517. } else {
  518. list_del(&buddy->lru);
  519. zone->free_area[order].nr_free--;
  520. rmv_page_order(buddy);
  521. }
  522. combined_idx = buddy_idx & page_idx;
  523. page = page + (combined_idx - page_idx);
  524. page_idx = combined_idx;
  525. order++;
  526. }
  527. set_page_order(page, order);
  528. /*
  529. * If this is not the largest possible page, check if the buddy
  530. * of the next-highest order is free. If it is, it's possible
  531. * that pages are being freed that will coalesce soon. In case,
  532. * that is happening, add the free page to the tail of the list
  533. * so it's less likely to be used soon and more likely to be merged
  534. * as a higher order page
  535. */
  536. if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
  537. struct page *higher_page, *higher_buddy;
  538. combined_idx = buddy_idx & page_idx;
  539. higher_page = page + (combined_idx - page_idx);
  540. buddy_idx = __find_buddy_index(combined_idx, order + 1);
  541. higher_buddy = higher_page + (buddy_idx - combined_idx);
  542. if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
  543. list_add_tail(&page->lru,
  544. &zone->free_area[order].free_list[migratetype]);
  545. goto out;
  546. }
  547. }
  548. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  549. out:
  550. zone->free_area[order].nr_free++;
  551. }
  552. static inline int free_pages_check(struct page *page)
  553. {
  554. const char *bad_reason = NULL;
  555. unsigned long bad_flags = 0;
  556. if (unlikely(page_mapcount(page)))
  557. bad_reason = "nonzero mapcount";
  558. if (unlikely(page->mapping != NULL))
  559. bad_reason = "non-NULL mapping";
  560. if (unlikely(atomic_read(&page->_count) != 0))
  561. bad_reason = "nonzero _count";
  562. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
  563. bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
  564. bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
  565. }
  566. if (unlikely(mem_cgroup_bad_page_check(page)))
  567. bad_reason = "cgroup check failed";
  568. if (unlikely(bad_reason)) {
  569. bad_page(page, bad_reason, bad_flags);
  570. return 1;
  571. }
  572. page_cpupid_reset_last(page);
  573. if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  574. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  575. return 0;
  576. }
  577. /*
  578. * Frees a number of pages from the PCP lists
  579. * Assumes all pages on list are in same zone, and of same order.
  580. * count is the number of pages to free.
  581. *
  582. * If the zone was previously in an "all pages pinned" state then look to
  583. * see if this freeing clears that state.
  584. *
  585. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  586. * pinned" detection logic.
  587. */
  588. static void free_pcppages_bulk(struct zone *zone, int count,
  589. struct per_cpu_pages *pcp)
  590. {
  591. int migratetype = 0;
  592. int batch_free = 0;
  593. int to_free = count;
  594. unsigned long nr_scanned;
  595. spin_lock(&zone->lock);
  596. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  597. if (nr_scanned)
  598. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  599. while (to_free) {
  600. struct page *page;
  601. struct list_head *list;
  602. /*
  603. * Remove pages from lists in a round-robin fashion. A
  604. * batch_free count is maintained that is incremented when an
  605. * empty list is encountered. This is so more pages are freed
  606. * off fuller lists instead of spinning excessively around empty
  607. * lists
  608. */
  609. do {
  610. batch_free++;
  611. if (++migratetype == MIGRATE_PCPTYPES)
  612. migratetype = 0;
  613. list = &pcp->lists[migratetype];
  614. } while (list_empty(list));
  615. /* This is the only non-empty list. Free them all. */
  616. if (batch_free == MIGRATE_PCPTYPES)
  617. batch_free = to_free;
  618. do {
  619. int mt; /* migratetype of the to-be-freed page */
  620. page = list_entry(list->prev, struct page, lru);
  621. /* must delete as __free_one_page list manipulates */
  622. list_del(&page->lru);
  623. mt = get_freepage_migratetype(page);
  624. if (unlikely(has_isolate_pageblock(zone)))
  625. mt = get_pageblock_migratetype(page);
  626. /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
  627. __free_one_page(page, page_to_pfn(page), zone, 0, mt);
  628. trace_mm_page_pcpu_drain(page, 0, mt);
  629. } while (--to_free && --batch_free && !list_empty(list));
  630. }
  631. spin_unlock(&zone->lock);
  632. }
  633. static void free_one_page(struct zone *zone,
  634. struct page *page, unsigned long pfn,
  635. unsigned int order,
  636. int migratetype)
  637. {
  638. unsigned long nr_scanned;
  639. spin_lock(&zone->lock);
  640. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  641. if (nr_scanned)
  642. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  643. if (unlikely(has_isolate_pageblock(zone) ||
  644. is_migrate_isolate(migratetype))) {
  645. migratetype = get_pfnblock_migratetype(page, pfn);
  646. }
  647. __free_one_page(page, pfn, zone, order, migratetype);
  648. spin_unlock(&zone->lock);
  649. }
  650. static bool free_pages_prepare(struct page *page, unsigned int order)
  651. {
  652. int i;
  653. int bad = 0;
  654. trace_mm_page_free(page, order);
  655. kmemcheck_free_shadow(page, order);
  656. if (PageAnon(page))
  657. page->mapping = NULL;
  658. for (i = 0; i < (1 << order); i++)
  659. bad += free_pages_check(page + i);
  660. if (bad)
  661. return false;
  662. if (!PageHighMem(page)) {
  663. debug_check_no_locks_freed(page_address(page),
  664. PAGE_SIZE << order);
  665. debug_check_no_obj_freed(page_address(page),
  666. PAGE_SIZE << order);
  667. }
  668. arch_free_page(page, order);
  669. kernel_map_pages(page, 1 << order, 0);
  670. return true;
  671. }
  672. static void __free_pages_ok(struct page *page, unsigned int order)
  673. {
  674. unsigned long flags;
  675. int migratetype;
  676. unsigned long pfn = page_to_pfn(page);
  677. if (!free_pages_prepare(page, order))
  678. return;
  679. migratetype = get_pfnblock_migratetype(page, pfn);
  680. local_irq_save(flags);
  681. __count_vm_events(PGFREE, 1 << order);
  682. set_freepage_migratetype(page, migratetype);
  683. free_one_page(page_zone(page), page, pfn, order, migratetype);
  684. local_irq_restore(flags);
  685. }
  686. void __init __free_pages_bootmem(struct page *page, unsigned int order)
  687. {
  688. unsigned int nr_pages = 1 << order;
  689. struct page *p = page;
  690. unsigned int loop;
  691. prefetchw(p);
  692. for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
  693. prefetchw(p + 1);
  694. __ClearPageReserved(p);
  695. set_page_count(p, 0);
  696. }
  697. __ClearPageReserved(p);
  698. set_page_count(p, 0);
  699. page_zone(page)->managed_pages += nr_pages;
  700. set_page_refcounted(page);
  701. __free_pages(page, order);
  702. }
  703. #ifdef CONFIG_CMA
  704. /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
  705. void __init init_cma_reserved_pageblock(struct page *page)
  706. {
  707. unsigned i = pageblock_nr_pages;
  708. struct page *p = page;
  709. do {
  710. __ClearPageReserved(p);
  711. set_page_count(p, 0);
  712. } while (++p, --i);
  713. set_pageblock_migratetype(page, MIGRATE_CMA);
  714. if (pageblock_order >= MAX_ORDER) {
  715. i = pageblock_nr_pages;
  716. p = page;
  717. do {
  718. set_page_refcounted(p);
  719. __free_pages(p, MAX_ORDER - 1);
  720. p += MAX_ORDER_NR_PAGES;
  721. } while (i -= MAX_ORDER_NR_PAGES);
  722. } else {
  723. set_page_refcounted(page);
  724. __free_pages(page, pageblock_order);
  725. }
  726. adjust_managed_page_count(page, pageblock_nr_pages);
  727. }
  728. #endif
  729. /*
  730. * The order of subdivision here is critical for the IO subsystem.
  731. * Please do not alter this order without good reasons and regression
  732. * testing. Specifically, as large blocks of memory are subdivided,
  733. * the order in which smaller blocks are delivered depends on the order
  734. * they're subdivided in this function. This is the primary factor
  735. * influencing the order in which pages are delivered to the IO
  736. * subsystem according to empirical testing, and this is also justified
  737. * by considering the behavior of a buddy system containing a single
  738. * large block of memory acted on by a series of small allocations.
  739. * This behavior is a critical factor in sglist merging's success.
  740. *
  741. * -- nyc
  742. */
  743. static inline void expand(struct zone *zone, struct page *page,
  744. int low, int high, struct free_area *area,
  745. int migratetype)
  746. {
  747. unsigned long size = 1 << high;
  748. while (high > low) {
  749. area--;
  750. high--;
  751. size >>= 1;
  752. VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  753. #ifdef CONFIG_DEBUG_PAGEALLOC
  754. if (high < debug_guardpage_minorder()) {
  755. /*
  756. * Mark as guard pages (or page), that will allow to
  757. * merge back to allocator when buddy will be freed.
  758. * Corresponding page table entries will not be touched,
  759. * pages will stay not present in virtual address space
  760. */
  761. INIT_LIST_HEAD(&page[size].lru);
  762. set_page_guard_flag(&page[size]);
  763. set_page_private(&page[size], high);
  764. /* Guard pages are not available for any usage */
  765. __mod_zone_freepage_state(zone, -(1 << high),
  766. migratetype);
  767. continue;
  768. }
  769. #endif
  770. list_add(&page[size].lru, &area->free_list[migratetype]);
  771. area->nr_free++;
  772. set_page_order(&page[size], high);
  773. }
  774. }
  775. /*
  776. * This page is about to be returned from the page allocator
  777. */
  778. static inline int check_new_page(struct page *page)
  779. {
  780. const char *bad_reason = NULL;
  781. unsigned long bad_flags = 0;
  782. if (unlikely(page_mapcount(page)))
  783. bad_reason = "nonzero mapcount";
  784. if (unlikely(page->mapping != NULL))
  785. bad_reason = "non-NULL mapping";
  786. if (unlikely(atomic_read(&page->_count) != 0))
  787. bad_reason = "nonzero _count";
  788. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
  789. bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
  790. bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
  791. }
  792. if (unlikely(mem_cgroup_bad_page_check(page)))
  793. bad_reason = "cgroup check failed";
  794. if (unlikely(bad_reason)) {
  795. bad_page(page, bad_reason, bad_flags);
  796. return 1;
  797. }
  798. return 0;
  799. }
  800. static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags)
  801. {
  802. int i;
  803. for (i = 0; i < (1 << order); i++) {
  804. struct page *p = page + i;
  805. if (unlikely(check_new_page(p)))
  806. return 1;
  807. }
  808. set_page_private(page, 0);
  809. set_page_refcounted(page);
  810. arch_alloc_page(page, order);
  811. kernel_map_pages(page, 1 << order, 1);
  812. if (gfp_flags & __GFP_ZERO)
  813. prep_zero_page(page, order, gfp_flags);
  814. if (order && (gfp_flags & __GFP_COMP))
  815. prep_compound_page(page, order);
  816. return 0;
  817. }
  818. /*
  819. * Go through the free lists for the given migratetype and remove
  820. * the smallest available page from the freelists
  821. */
  822. static inline
  823. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  824. int migratetype)
  825. {
  826. unsigned int current_order;
  827. struct free_area *area;
  828. struct page *page;
  829. /* Find a page of the appropriate size in the preferred list */
  830. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  831. area = &(zone->free_area[current_order]);
  832. if (list_empty(&area->free_list[migratetype]))
  833. continue;
  834. page = list_entry(area->free_list[migratetype].next,
  835. struct page, lru);
  836. list_del(&page->lru);
  837. rmv_page_order(page);
  838. area->nr_free--;
  839. expand(zone, page, order, current_order, area, migratetype);
  840. set_freepage_migratetype(page, migratetype);
  841. return page;
  842. }
  843. return NULL;
  844. }
  845. /*
  846. * This array describes the order lists are fallen back to when
  847. * the free lists for the desirable migrate type are depleted
  848. */
  849. static int fallbacks[MIGRATE_TYPES][4] = {
  850. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  851. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
  852. #ifdef CONFIG_CMA
  853. [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  854. [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
  855. #else
  856. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
  857. #endif
  858. [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
  859. #ifdef CONFIG_MEMORY_ISOLATION
  860. [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
  861. #endif
  862. };
  863. /*
  864. * Move the free pages in a range to the free lists of the requested type.
  865. * Note that start_page and end_pages are not aligned on a pageblock
  866. * boundary. If alignment is required, use move_freepages_block()
  867. */
  868. int move_freepages(struct zone *zone,
  869. struct page *start_page, struct page *end_page,
  870. int migratetype)
  871. {
  872. struct page *page;
  873. unsigned long order;
  874. int pages_moved = 0;
  875. #ifndef CONFIG_HOLES_IN_ZONE
  876. /*
  877. * page_zone is not safe to call in this context when
  878. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  879. * anyway as we check zone boundaries in move_freepages_block().
  880. * Remove at a later date when no bug reports exist related to
  881. * grouping pages by mobility
  882. */
  883. VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
  884. #endif
  885. for (page = start_page; page <= end_page;) {
  886. /* Make sure we are not inadvertently changing nodes */
  887. VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
  888. if (!pfn_valid_within(page_to_pfn(page))) {
  889. page++;
  890. continue;
  891. }
  892. if (!PageBuddy(page)) {
  893. page++;
  894. continue;
  895. }
  896. order = page_order(page);
  897. list_move(&page->lru,
  898. &zone->free_area[order].free_list[migratetype]);
  899. set_freepage_migratetype(page, migratetype);
  900. page += 1 << order;
  901. pages_moved += 1 << order;
  902. }
  903. return pages_moved;
  904. }
  905. int move_freepages_block(struct zone *zone, struct page *page,
  906. int migratetype)
  907. {
  908. unsigned long start_pfn, end_pfn;
  909. struct page *start_page, *end_page;
  910. start_pfn = page_to_pfn(page);
  911. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  912. start_page = pfn_to_page(start_pfn);
  913. end_page = start_page + pageblock_nr_pages - 1;
  914. end_pfn = start_pfn + pageblock_nr_pages - 1;
  915. /* Do not cross zone boundaries */
  916. if (!zone_spans_pfn(zone, start_pfn))
  917. start_page = page;
  918. if (!zone_spans_pfn(zone, end_pfn))
  919. return 0;
  920. return move_freepages(zone, start_page, end_page, migratetype);
  921. }
  922. static void change_pageblock_range(struct page *pageblock_page,
  923. int start_order, int migratetype)
  924. {
  925. int nr_pageblocks = 1 << (start_order - pageblock_order);
  926. while (nr_pageblocks--) {
  927. set_pageblock_migratetype(pageblock_page, migratetype);
  928. pageblock_page += pageblock_nr_pages;
  929. }
  930. }
  931. /*
  932. * If breaking a large block of pages, move all free pages to the preferred
  933. * allocation list. If falling back for a reclaimable kernel allocation, be
  934. * more aggressive about taking ownership of free pages.
  935. *
  936. * On the other hand, never change migration type of MIGRATE_CMA pageblocks
  937. * nor move CMA pages to different free lists. We don't want unmovable pages
  938. * to be allocated from MIGRATE_CMA areas.
  939. *
  940. * Returns the new migratetype of the pageblock (or the same old migratetype
  941. * if it was unchanged).
  942. */
  943. static int try_to_steal_freepages(struct zone *zone, struct page *page,
  944. int start_type, int fallback_type)
  945. {
  946. int current_order = page_order(page);
  947. /*
  948. * When borrowing from MIGRATE_CMA, we need to release the excess
  949. * buddy pages to CMA itself. We also ensure the freepage_migratetype
  950. * is set to CMA so it is returned to the correct freelist in case
  951. * the page ends up being not actually allocated from the pcp lists.
  952. */
  953. if (is_migrate_cma(fallback_type))
  954. return fallback_type;
  955. /* Take ownership for orders >= pageblock_order */
  956. if (current_order >= pageblock_order) {
  957. change_pageblock_range(page, current_order, start_type);
  958. return start_type;
  959. }
  960. if (current_order >= pageblock_order / 2 ||
  961. start_type == MIGRATE_RECLAIMABLE ||
  962. page_group_by_mobility_disabled) {
  963. int pages;
  964. pages = move_freepages_block(zone, page, start_type);
  965. /* Claim the whole block if over half of it is free */
  966. if (pages >= (1 << (pageblock_order-1)) ||
  967. page_group_by_mobility_disabled) {
  968. set_pageblock_migratetype(page, start_type);
  969. return start_type;
  970. }
  971. }
  972. return fallback_type;
  973. }
  974. /* Remove an element from the buddy allocator from the fallback list */
  975. static inline struct page *
  976. __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
  977. {
  978. struct free_area *area;
  979. unsigned int current_order;
  980. struct page *page;
  981. int migratetype, new_type, i;
  982. /* Find the largest possible block of pages in the other list */
  983. for (current_order = MAX_ORDER-1;
  984. current_order >= order && current_order <= MAX_ORDER-1;
  985. --current_order) {
  986. for (i = 0;; i++) {
  987. migratetype = fallbacks[start_migratetype][i];
  988. /* MIGRATE_RESERVE handled later if necessary */
  989. if (migratetype == MIGRATE_RESERVE)
  990. break;
  991. area = &(zone->free_area[current_order]);
  992. if (list_empty(&area->free_list[migratetype]))
  993. continue;
  994. page = list_entry(area->free_list[migratetype].next,
  995. struct page, lru);
  996. area->nr_free--;
  997. new_type = try_to_steal_freepages(zone, page,
  998. start_migratetype,
  999. migratetype);
  1000. /* Remove the page from the freelists */
  1001. list_del(&page->lru);
  1002. rmv_page_order(page);
  1003. expand(zone, page, order, current_order, area,
  1004. new_type);
  1005. /* The freepage_migratetype may differ from pageblock's
  1006. * migratetype depending on the decisions in
  1007. * try_to_steal_freepages. This is OK as long as it does
  1008. * not differ for MIGRATE_CMA type.
  1009. */
  1010. set_freepage_migratetype(page, new_type);
  1011. trace_mm_page_alloc_extfrag(page, order, current_order,
  1012. start_migratetype, migratetype, new_type);
  1013. return page;
  1014. }
  1015. }
  1016. return NULL;
  1017. }
  1018. /*
  1019. * Do the hard work of removing an element from the buddy allocator.
  1020. * Call me with the zone->lock already held.
  1021. */
  1022. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  1023. int migratetype)
  1024. {
  1025. struct page *page;
  1026. retry_reserve:
  1027. page = __rmqueue_smallest(zone, order, migratetype);
  1028. if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
  1029. page = __rmqueue_fallback(zone, order, migratetype);
  1030. /*
  1031. * Use MIGRATE_RESERVE rather than fail an allocation. goto
  1032. * is used because __rmqueue_smallest is an inline function
  1033. * and we want just one call site
  1034. */
  1035. if (!page) {
  1036. migratetype = MIGRATE_RESERVE;
  1037. goto retry_reserve;
  1038. }
  1039. }
  1040. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  1041. return page;
  1042. }
  1043. /*
  1044. * Obtain a specified number of elements from the buddy allocator, all under
  1045. * a single hold of the lock, for efficiency. Add them to the supplied list.
  1046. * Returns the number of new pages which were placed at *list.
  1047. */
  1048. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  1049. unsigned long count, struct list_head *list,
  1050. int migratetype, bool cold)
  1051. {
  1052. int i;
  1053. spin_lock(&zone->lock);
  1054. for (i = 0; i < count; ++i) {
  1055. struct page *page = __rmqueue(zone, order, migratetype);
  1056. if (unlikely(page == NULL))
  1057. break;
  1058. /*
  1059. * Split buddy pages returned by expand() are received here
  1060. * in physical page order. The page is added to the callers and
  1061. * list and the list head then moves forward. From the callers
  1062. * perspective, the linked list is ordered by page number in
  1063. * some conditions. This is useful for IO devices that can
  1064. * merge IO requests if the physical pages are ordered
  1065. * properly.
  1066. */
  1067. if (likely(!cold))
  1068. list_add(&page->lru, list);
  1069. else
  1070. list_add_tail(&page->lru, list);
  1071. list = &page->lru;
  1072. if (is_migrate_cma(get_freepage_migratetype(page)))
  1073. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
  1074. -(1 << order));
  1075. }
  1076. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  1077. spin_unlock(&zone->lock);
  1078. return i;
  1079. }
  1080. #ifdef CONFIG_NUMA
  1081. /*
  1082. * Called from the vmstat counter updater to drain pagesets of this
  1083. * currently executing processor on remote nodes after they have
  1084. * expired.
  1085. *
  1086. * Note that this function must be called with the thread pinned to
  1087. * a single processor.
  1088. */
  1089. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  1090. {
  1091. unsigned long flags;
  1092. int to_drain, batch;
  1093. local_irq_save(flags);
  1094. batch = ACCESS_ONCE(pcp->batch);
  1095. to_drain = min(pcp->count, batch);
  1096. if (to_drain > 0) {
  1097. free_pcppages_bulk(zone, to_drain, pcp);
  1098. pcp->count -= to_drain;
  1099. }
  1100. local_irq_restore(flags);
  1101. }
  1102. #endif
  1103. /*
  1104. * Drain pages of the indicated processor.
  1105. *
  1106. * The processor must either be the current processor and the
  1107. * thread pinned to the current processor or a processor that
  1108. * is not online.
  1109. */
  1110. static void drain_pages(unsigned int cpu)
  1111. {
  1112. unsigned long flags;
  1113. struct zone *zone;
  1114. for_each_populated_zone(zone) {
  1115. struct per_cpu_pageset *pset;
  1116. struct per_cpu_pages *pcp;
  1117. local_irq_save(flags);
  1118. pset = per_cpu_ptr(zone->pageset, cpu);
  1119. pcp = &pset->pcp;
  1120. if (pcp->count) {
  1121. free_pcppages_bulk(zone, pcp->count, pcp);
  1122. pcp->count = 0;
  1123. }
  1124. local_irq_restore(flags);
  1125. }
  1126. }
  1127. /*
  1128. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  1129. */
  1130. void drain_local_pages(void *arg)
  1131. {
  1132. drain_pages(smp_processor_id());
  1133. }
  1134. /*
  1135. * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  1136. *
  1137. * Note that this code is protected against sending an IPI to an offline
  1138. * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
  1139. * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
  1140. * nothing keeps CPUs from showing up after we populated the cpumask and
  1141. * before the call to on_each_cpu_mask().
  1142. */
  1143. void drain_all_pages(void)
  1144. {
  1145. int cpu;
  1146. struct per_cpu_pageset *pcp;
  1147. struct zone *zone;
  1148. /*
  1149. * Allocate in the BSS so we wont require allocation in
  1150. * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
  1151. */
  1152. static cpumask_t cpus_with_pcps;
  1153. /*
  1154. * We don't care about racing with CPU hotplug event
  1155. * as offline notification will cause the notified
  1156. * cpu to drain that CPU pcps and on_each_cpu_mask
  1157. * disables preemption as part of its processing
  1158. */
  1159. for_each_online_cpu(cpu) {
  1160. bool has_pcps = false;
  1161. for_each_populated_zone(zone) {
  1162. pcp = per_cpu_ptr(zone->pageset, cpu);
  1163. if (pcp->pcp.count) {
  1164. has_pcps = true;
  1165. break;
  1166. }
  1167. }
  1168. if (has_pcps)
  1169. cpumask_set_cpu(cpu, &cpus_with_pcps);
  1170. else
  1171. cpumask_clear_cpu(cpu, &cpus_with_pcps);
  1172. }
  1173. on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1);
  1174. }
  1175. #ifdef CONFIG_HIBERNATION
  1176. void mark_free_pages(struct zone *zone)
  1177. {
  1178. unsigned long pfn, max_zone_pfn;
  1179. unsigned long flags;
  1180. unsigned int order, t;
  1181. struct list_head *curr;
  1182. if (zone_is_empty(zone))
  1183. return;
  1184. spin_lock_irqsave(&zone->lock, flags);
  1185. max_zone_pfn = zone_end_pfn(zone);
  1186. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  1187. if (pfn_valid(pfn)) {
  1188. struct page *page = pfn_to_page(pfn);
  1189. if (!swsusp_page_is_forbidden(page))
  1190. swsusp_unset_page_free(page);
  1191. }
  1192. for_each_migratetype_order(order, t) {
  1193. list_for_each(curr, &zone->free_area[order].free_list[t]) {
  1194. unsigned long i;
  1195. pfn = page_to_pfn(list_entry(curr, struct page, lru));
  1196. for (i = 0; i < (1UL << order); i++)
  1197. swsusp_set_page_free(pfn_to_page(pfn + i));
  1198. }
  1199. }
  1200. spin_unlock_irqrestore(&zone->lock, flags);
  1201. }
  1202. #endif /* CONFIG_PM */
  1203. /*
  1204. * Free a 0-order page
  1205. * cold == true ? free a cold page : free a hot page
  1206. */
  1207. void free_hot_cold_page(struct page *page, bool cold)
  1208. {
  1209. struct zone *zone = page_zone(page);
  1210. struct per_cpu_pages *pcp;
  1211. unsigned long flags;
  1212. unsigned long pfn = page_to_pfn(page);
  1213. int migratetype;
  1214. if (!free_pages_prepare(page, 0))
  1215. return;
  1216. migratetype = get_pfnblock_migratetype(page, pfn);
  1217. set_freepage_migratetype(page, migratetype);
  1218. local_irq_save(flags);
  1219. __count_vm_event(PGFREE);
  1220. /*
  1221. * We only track unmovable, reclaimable and movable on pcp lists.
  1222. * Free ISOLATE pages back to the allocator because they are being
  1223. * offlined but treat RESERVE as movable pages so we can get those
  1224. * areas back if necessary. Otherwise, we may have to free
  1225. * excessively into the page allocator
  1226. */
  1227. if (migratetype >= MIGRATE_PCPTYPES) {
  1228. if (unlikely(is_migrate_isolate(migratetype))) {
  1229. free_one_page(zone, page, pfn, 0, migratetype);
  1230. goto out;
  1231. }
  1232. migratetype = MIGRATE_MOVABLE;
  1233. }
  1234. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1235. if (!cold)
  1236. list_add(&page->lru, &pcp->lists[migratetype]);
  1237. else
  1238. list_add_tail(&page->lru, &pcp->lists[migratetype]);
  1239. pcp->count++;
  1240. if (pcp->count >= pcp->high) {
  1241. unsigned long batch = ACCESS_ONCE(pcp->batch);
  1242. free_pcppages_bulk(zone, batch, pcp);
  1243. pcp->count -= batch;
  1244. }
  1245. out:
  1246. local_irq_restore(flags);
  1247. }
  1248. /*
  1249. * Free a list of 0-order pages
  1250. */
  1251. void free_hot_cold_page_list(struct list_head *list, bool cold)
  1252. {
  1253. struct page *page, *next;
  1254. list_for_each_entry_safe(page, next, list, lru) {
  1255. trace_mm_page_free_batched(page, cold);
  1256. free_hot_cold_page(page, cold);
  1257. }
  1258. }
  1259. /*
  1260. * split_page takes a non-compound higher-order page, and splits it into
  1261. * n (1<<order) sub-pages: page[0..n]
  1262. * Each sub-page must be freed individually.
  1263. *
  1264. * Note: this is probably too low level an operation for use in drivers.
  1265. * Please consult with lkml before using this in your driver.
  1266. */
  1267. void split_page(struct page *page, unsigned int order)
  1268. {
  1269. int i;
  1270. VM_BUG_ON_PAGE(PageCompound(page), page);
  1271. VM_BUG_ON_PAGE(!page_count(page), page);
  1272. #ifdef CONFIG_KMEMCHECK
  1273. /*
  1274. * Split shadow pages too, because free(page[0]) would
  1275. * otherwise free the whole shadow.
  1276. */
  1277. if (kmemcheck_page_is_tracked(page))
  1278. split_page(virt_to_page(page[0].shadow), order);
  1279. #endif
  1280. for (i = 1; i < (1 << order); i++)
  1281. set_page_refcounted(page + i);
  1282. }
  1283. EXPORT_SYMBOL_GPL(split_page);
  1284. int __isolate_free_page(struct page *page, unsigned int order)
  1285. {
  1286. unsigned long watermark;
  1287. struct zone *zone;
  1288. int mt;
  1289. BUG_ON(!PageBuddy(page));
  1290. zone = page_zone(page);
  1291. mt = get_pageblock_migratetype(page);
  1292. if (!is_migrate_isolate(mt)) {
  1293. /* Obey watermarks as if the page was being allocated */
  1294. watermark = low_wmark_pages(zone) + (1 << order);
  1295. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  1296. return 0;
  1297. __mod_zone_freepage_state(zone, -(1UL << order), mt);
  1298. }
  1299. /* Remove page from free list */
  1300. list_del(&page->lru);
  1301. zone->free_area[order].nr_free--;
  1302. rmv_page_order(page);
  1303. /* Set the pageblock if the isolated page is at least a pageblock */
  1304. if (order >= pageblock_order - 1) {
  1305. struct page *endpage = page + (1 << order) - 1;
  1306. for (; page < endpage; page += pageblock_nr_pages) {
  1307. int mt = get_pageblock_migratetype(page);
  1308. if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
  1309. set_pageblock_migratetype(page,
  1310. MIGRATE_MOVABLE);
  1311. }
  1312. }
  1313. return 1UL << order;
  1314. }
  1315. /*
  1316. * Similar to split_page except the page is already free. As this is only
  1317. * being used for migration, the migratetype of the block also changes.
  1318. * As this is called with interrupts disabled, the caller is responsible
  1319. * for calling arch_alloc_page() and kernel_map_page() after interrupts
  1320. * are enabled.
  1321. *
  1322. * Note: this is probably too low level an operation for use in drivers.
  1323. * Please consult with lkml before using this in your driver.
  1324. */
  1325. int split_free_page(struct page *page)
  1326. {
  1327. unsigned int order;
  1328. int nr_pages;
  1329. order = page_order(page);
  1330. nr_pages = __isolate_free_page(page, order);
  1331. if (!nr_pages)
  1332. return 0;
  1333. /* Split into individual pages */
  1334. set_page_refcounted(page);
  1335. split_page(page, order);
  1336. return nr_pages;
  1337. }
  1338. /*
  1339. * Really, prep_compound_page() should be called from __rmqueue_bulk(). But
  1340. * we cheat by calling it from here, in the order > 0 path. Saves a branch
  1341. * or two.
  1342. */
  1343. static inline
  1344. struct page *buffered_rmqueue(struct zone *preferred_zone,
  1345. struct zone *zone, unsigned int order,
  1346. gfp_t gfp_flags, int migratetype)
  1347. {
  1348. unsigned long flags;
  1349. struct page *page;
  1350. bool cold = ((gfp_flags & __GFP_COLD) != 0);
  1351. again:
  1352. if (likely(order == 0)) {
  1353. struct per_cpu_pages *pcp;
  1354. struct list_head *list;
  1355. local_irq_save(flags);
  1356. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1357. list = &pcp->lists[migratetype];
  1358. if (list_empty(list)) {
  1359. pcp->count += rmqueue_bulk(zone, 0,
  1360. pcp->batch, list,
  1361. migratetype, cold);
  1362. if (unlikely(list_empty(list)))
  1363. goto failed;
  1364. }
  1365. if (cold)
  1366. page = list_entry(list->prev, struct page, lru);
  1367. else
  1368. page = list_entry(list->next, struct page, lru);
  1369. list_del(&page->lru);
  1370. pcp->count--;
  1371. } else {
  1372. if (unlikely(gfp_flags & __GFP_NOFAIL)) {
  1373. /*
  1374. * __GFP_NOFAIL is not to be used in new code.
  1375. *
  1376. * All __GFP_NOFAIL callers should be fixed so that they
  1377. * properly detect and handle allocation failures.
  1378. *
  1379. * We most definitely don't want callers attempting to
  1380. * allocate greater than order-1 page units with
  1381. * __GFP_NOFAIL.
  1382. */
  1383. WARN_ON_ONCE(order > 1);
  1384. }
  1385. spin_lock_irqsave(&zone->lock, flags);
  1386. page = __rmqueue(zone, order, migratetype);
  1387. spin_unlock(&zone->lock);
  1388. if (!page)
  1389. goto failed;
  1390. __mod_zone_freepage_state(zone, -(1 << order),
  1391. get_freepage_migratetype(page));
  1392. }
  1393. __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
  1394. if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
  1395. !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
  1396. set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  1397. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  1398. zone_statistics(preferred_zone, zone, gfp_flags);
  1399. local_irq_restore(flags);
  1400. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  1401. if (prep_new_page(page, order, gfp_flags))
  1402. goto again;
  1403. return page;
  1404. failed:
  1405. local_irq_restore(flags);
  1406. return NULL;
  1407. }
  1408. #ifdef CONFIG_FAIL_PAGE_ALLOC
  1409. static struct {
  1410. struct fault_attr attr;
  1411. u32 ignore_gfp_highmem;
  1412. u32 ignore_gfp_wait;
  1413. u32 min_order;
  1414. } fail_page_alloc = {
  1415. .attr = FAULT_ATTR_INITIALIZER,
  1416. .ignore_gfp_wait = 1,
  1417. .ignore_gfp_highmem = 1,
  1418. .min_order = 1,
  1419. };
  1420. static int __init setup_fail_page_alloc(char *str)
  1421. {
  1422. return setup_fault_attr(&fail_page_alloc.attr, str);
  1423. }
  1424. __setup("fail_page_alloc=", setup_fail_page_alloc);
  1425. static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1426. {
  1427. if (order < fail_page_alloc.min_order)
  1428. return false;
  1429. if (gfp_mask & __GFP_NOFAIL)
  1430. return false;
  1431. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  1432. return false;
  1433. if (fail_page_alloc.ignore_gfp_wait && (gfp_mask & __GFP_WAIT))
  1434. return false;
  1435. return should_fail(&fail_page_alloc.attr, 1 << order);
  1436. }
  1437. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  1438. static int __init fail_page_alloc_debugfs(void)
  1439. {
  1440. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  1441. struct dentry *dir;
  1442. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  1443. &fail_page_alloc.attr);
  1444. if (IS_ERR(dir))
  1445. return PTR_ERR(dir);
  1446. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  1447. &fail_page_alloc.ignore_gfp_wait))
  1448. goto fail;
  1449. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  1450. &fail_page_alloc.ignore_gfp_highmem))
  1451. goto fail;
  1452. if (!debugfs_create_u32("min-order", mode, dir,
  1453. &fail_page_alloc.min_order))
  1454. goto fail;
  1455. return 0;
  1456. fail:
  1457. debugfs_remove_recursive(dir);
  1458. return -ENOMEM;
  1459. }
  1460. late_initcall(fail_page_alloc_debugfs);
  1461. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  1462. #else /* CONFIG_FAIL_PAGE_ALLOC */
  1463. static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  1464. {
  1465. return false;
  1466. }
  1467. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  1468. /*
  1469. * Return true if free pages are above 'mark'. This takes into account the order
  1470. * of the allocation.
  1471. */
  1472. static bool __zone_watermark_ok(struct zone *z, unsigned int order,
  1473. unsigned long mark, int classzone_idx, int alloc_flags,
  1474. long free_pages)
  1475. {
  1476. /* free_pages my go negative - that's OK */
  1477. long min = mark;
  1478. int o;
  1479. long free_cma = 0;
  1480. free_pages -= (1 << order) - 1;
  1481. if (alloc_flags & ALLOC_HIGH)
  1482. min -= min / 2;
  1483. if (alloc_flags & ALLOC_HARDER)
  1484. min -= min / 4;
  1485. #ifdef CONFIG_CMA
  1486. /* If allocation can't use CMA areas don't use free CMA pages */
  1487. if (!(alloc_flags & ALLOC_CMA))
  1488. free_cma = zone_page_state(z, NR_FREE_CMA_PAGES);
  1489. #endif
  1490. if (free_pages - free_cma <= min + z->lowmem_reserve[classzone_idx])
  1491. return false;
  1492. for (o = 0; o < order; o++) {
  1493. /* At the next order, this order's pages become unavailable */
  1494. free_pages -= z->free_area[o].nr_free << o;
  1495. /* Require fewer higher order pages to be free */
  1496. min >>= 1;
  1497. if (free_pages <= min)
  1498. return false;
  1499. }
  1500. return true;
  1501. }
  1502. bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  1503. int classzone_idx, int alloc_flags)
  1504. {
  1505. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  1506. zone_page_state(z, NR_FREE_PAGES));
  1507. }
  1508. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  1509. unsigned long mark, int classzone_idx, int alloc_flags)
  1510. {
  1511. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  1512. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  1513. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  1514. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  1515. free_pages);
  1516. }
  1517. #ifdef CONFIG_NUMA
  1518. /*
  1519. * zlc_setup - Setup for "zonelist cache". Uses cached zone data to
  1520. * skip over zones that are not allowed by the cpuset, or that have
  1521. * been recently (in last second) found to be nearly full. See further
  1522. * comments in mmzone.h. Reduces cache footprint of zonelist scans
  1523. * that have to skip over a lot of full or unallowed zones.
  1524. *
  1525. * If the zonelist cache is present in the passed zonelist, then
  1526. * returns a pointer to the allowed node mask (either the current
  1527. * tasks mems_allowed, or node_states[N_MEMORY].)
  1528. *
  1529. * If the zonelist cache is not available for this zonelist, does
  1530. * nothing and returns NULL.
  1531. *
  1532. * If the fullzones BITMAP in the zonelist cache is stale (more than
  1533. * a second since last zap'd) then we zap it out (clear its bits.)
  1534. *
  1535. * We hold off even calling zlc_setup, until after we've checked the
  1536. * first zone in the zonelist, on the theory that most allocations will
  1537. * be satisfied from that first zone, so best to examine that zone as
  1538. * quickly as we can.
  1539. */
  1540. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1541. {
  1542. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1543. nodemask_t *allowednodes; /* zonelist_cache approximation */
  1544. zlc = zonelist->zlcache_ptr;
  1545. if (!zlc)
  1546. return NULL;
  1547. if (time_after(jiffies, zlc->last_full_zap + HZ)) {
  1548. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1549. zlc->last_full_zap = jiffies;
  1550. }
  1551. allowednodes = !in_interrupt() && (alloc_flags & ALLOC_CPUSET) ?
  1552. &cpuset_current_mems_allowed :
  1553. &node_states[N_MEMORY];
  1554. return allowednodes;
  1555. }
  1556. /*
  1557. * Given 'z' scanning a zonelist, run a couple of quick checks to see
  1558. * if it is worth looking at further for free memory:
  1559. * 1) Check that the zone isn't thought to be full (doesn't have its
  1560. * bit set in the zonelist_cache fullzones BITMAP).
  1561. * 2) Check that the zones node (obtained from the zonelist_cache
  1562. * z_to_n[] mapping) is allowed in the passed in allowednodes mask.
  1563. * Return true (non-zero) if zone is worth looking at further, or
  1564. * else return false (zero) if it is not.
  1565. *
  1566. * This check -ignores- the distinction between various watermarks,
  1567. * such as GFP_HIGH, GFP_ATOMIC, PF_MEMALLOC, ... If a zone is
  1568. * found to be full for any variation of these watermarks, it will
  1569. * be considered full for up to one second by all requests, unless
  1570. * we are so low on memory on all allowed nodes that we are forced
  1571. * into the second scan of the zonelist.
  1572. *
  1573. * In the second scan we ignore this zonelist cache and exactly
  1574. * apply the watermarks to all zones, even it is slower to do so.
  1575. * We are low on memory in the second scan, and should leave no stone
  1576. * unturned looking for a free page.
  1577. */
  1578. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1579. nodemask_t *allowednodes)
  1580. {
  1581. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1582. int i; /* index of *z in zonelist zones */
  1583. int n; /* node that zone *z is on */
  1584. zlc = zonelist->zlcache_ptr;
  1585. if (!zlc)
  1586. return 1;
  1587. i = z - zonelist->_zonerefs;
  1588. n = zlc->z_to_n[i];
  1589. /* This zone is worth trying if it is allowed but not full */
  1590. return node_isset(n, *allowednodes) && !test_bit(i, zlc->fullzones);
  1591. }
  1592. /*
  1593. * Given 'z' scanning a zonelist, set the corresponding bit in
  1594. * zlc->fullzones, so that subsequent attempts to allocate a page
  1595. * from that zone don't waste time re-examining it.
  1596. */
  1597. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1598. {
  1599. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1600. int i; /* index of *z in zonelist zones */
  1601. zlc = zonelist->zlcache_ptr;
  1602. if (!zlc)
  1603. return;
  1604. i = z - zonelist->_zonerefs;
  1605. set_bit(i, zlc->fullzones);
  1606. }
  1607. /*
  1608. * clear all zones full, called after direct reclaim makes progress so that
  1609. * a zone that was recently full is not skipped over for up to a second
  1610. */
  1611. static void zlc_clear_zones_full(struct zonelist *zonelist)
  1612. {
  1613. struct zonelist_cache *zlc; /* cached zonelist speedup info */
  1614. zlc = zonelist->zlcache_ptr;
  1615. if (!zlc)
  1616. return;
  1617. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  1618. }
  1619. static bool zone_local(struct zone *local_zone, struct zone *zone)
  1620. {
  1621. return local_zone->node == zone->node;
  1622. }
  1623. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  1624. {
  1625. return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
  1626. RECLAIM_DISTANCE;
  1627. }
  1628. #else /* CONFIG_NUMA */
  1629. static nodemask_t *zlc_setup(struct zonelist *zonelist, int alloc_flags)
  1630. {
  1631. return NULL;
  1632. }
  1633. static int zlc_zone_worth_trying(struct zonelist *zonelist, struct zoneref *z,
  1634. nodemask_t *allowednodes)
  1635. {
  1636. return 1;
  1637. }
  1638. static void zlc_mark_zone_full(struct zonelist *zonelist, struct zoneref *z)
  1639. {
  1640. }
  1641. static void zlc_clear_zones_full(struct zonelist *zonelist)
  1642. {
  1643. }
  1644. static bool zone_local(struct zone *local_zone, struct zone *zone)
  1645. {
  1646. return true;
  1647. }
  1648. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  1649. {
  1650. return true;
  1651. }
  1652. #endif /* CONFIG_NUMA */
  1653. static void reset_alloc_batches(struct zone *preferred_zone)
  1654. {
  1655. struct zone *zone = preferred_zone->zone_pgdat->node_zones;
  1656. do {
  1657. mod_zone_page_state(zone, NR_ALLOC_BATCH,
  1658. high_wmark_pages(zone) - low_wmark_pages(zone) -
  1659. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  1660. clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  1661. } while (zone++ != preferred_zone);
  1662. }
  1663. /*
  1664. * get_page_from_freelist goes through the zonelist trying to allocate
  1665. * a page.
  1666. */
  1667. static struct page *
  1668. get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
  1669. struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
  1670. struct zone *preferred_zone, int classzone_idx, int migratetype)
  1671. {
  1672. struct zoneref *z;
  1673. struct page *page = NULL;
  1674. struct zone *zone;
  1675. nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
  1676. int zlc_active = 0; /* set if using zonelist_cache */
  1677. int did_zlc_setup = 0; /* just call zlc_setup() one time */
  1678. bool consider_zone_dirty = (alloc_flags & ALLOC_WMARK_LOW) &&
  1679. (gfp_mask & __GFP_WRITE);
  1680. int nr_fair_skipped = 0;
  1681. bool zonelist_rescan;
  1682. zonelist_scan:
  1683. zonelist_rescan = false;
  1684. /*
  1685. * Scan zonelist, looking for a zone with enough free.
  1686. * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
  1687. */
  1688. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  1689. high_zoneidx, nodemask) {
  1690. unsigned long mark;
  1691. if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
  1692. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1693. continue;
  1694. if (cpusets_enabled() &&
  1695. (alloc_flags & ALLOC_CPUSET) &&
  1696. !cpuset_zone_allowed_softwall(zone, gfp_mask))
  1697. continue;
  1698. /*
  1699. * Distribute pages in proportion to the individual
  1700. * zone size to ensure fair page aging. The zone a
  1701. * page was allocated in should have no effect on the
  1702. * time the page has in memory before being reclaimed.
  1703. */
  1704. if (alloc_flags & ALLOC_FAIR) {
  1705. if (!zone_local(preferred_zone, zone))
  1706. break;
  1707. if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
  1708. nr_fair_skipped++;
  1709. continue;
  1710. }
  1711. }
  1712. /*
  1713. * When allocating a page cache page for writing, we
  1714. * want to get it from a zone that is within its dirty
  1715. * limit, such that no single zone holds more than its
  1716. * proportional share of globally allowed dirty pages.
  1717. * The dirty limits take into account the zone's
  1718. * lowmem reserves and high watermark so that kswapd
  1719. * should be able to balance it without having to
  1720. * write pages from its LRU list.
  1721. *
  1722. * This may look like it could increase pressure on
  1723. * lower zones by failing allocations in higher zones
  1724. * before they are full. But the pages that do spill
  1725. * over are limited as the lower zones are protected
  1726. * by this very same mechanism. It should not become
  1727. * a practical burden to them.
  1728. *
  1729. * XXX: For now, allow allocations to potentially
  1730. * exceed the per-zone dirty limit in the slowpath
  1731. * (ALLOC_WMARK_LOW unset) before going into reclaim,
  1732. * which is important when on a NUMA setup the allowed
  1733. * zones are together not big enough to reach the
  1734. * global limit. The proper fix for these situations
  1735. * will require awareness of zones in the
  1736. * dirty-throttling and the flusher threads.
  1737. */
  1738. if (consider_zone_dirty && !zone_dirty_ok(zone))
  1739. continue;
  1740. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  1741. if (!zone_watermark_ok(zone, order, mark,
  1742. classzone_idx, alloc_flags)) {
  1743. int ret;
  1744. /* Checked here to keep the fast path fast */
  1745. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  1746. if (alloc_flags & ALLOC_NO_WATERMARKS)
  1747. goto try_this_zone;
  1748. if (IS_ENABLED(CONFIG_NUMA) &&
  1749. !did_zlc_setup && nr_online_nodes > 1) {
  1750. /*
  1751. * we do zlc_setup if there are multiple nodes
  1752. * and before considering the first zone allowed
  1753. * by the cpuset.
  1754. */
  1755. allowednodes = zlc_setup(zonelist, alloc_flags);
  1756. zlc_active = 1;
  1757. did_zlc_setup = 1;
  1758. }
  1759. if (zone_reclaim_mode == 0 ||
  1760. !zone_allows_reclaim(preferred_zone, zone))
  1761. goto this_zone_full;
  1762. /*
  1763. * As we may have just activated ZLC, check if the first
  1764. * eligible zone has failed zone_reclaim recently.
  1765. */
  1766. if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
  1767. !zlc_zone_worth_trying(zonelist, z, allowednodes))
  1768. continue;
  1769. ret = zone_reclaim(zone, gfp_mask, order);
  1770. switch (ret) {
  1771. case ZONE_RECLAIM_NOSCAN:
  1772. /* did not scan */
  1773. continue;
  1774. case ZONE_RECLAIM_FULL:
  1775. /* scanned but unreclaimable */
  1776. continue;
  1777. default:
  1778. /* did we reclaim enough */
  1779. if (zone_watermark_ok(zone, order, mark,
  1780. classzone_idx, alloc_flags))
  1781. goto try_this_zone;
  1782. /*
  1783. * Failed to reclaim enough to meet watermark.
  1784. * Only mark the zone full if checking the min
  1785. * watermark or if we failed to reclaim just
  1786. * 1<<order pages or else the page allocator
  1787. * fastpath will prematurely mark zones full
  1788. * when the watermark is between the low and
  1789. * min watermarks.
  1790. */
  1791. if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
  1792. ret == ZONE_RECLAIM_SOME)
  1793. goto this_zone_full;
  1794. continue;
  1795. }
  1796. }
  1797. try_this_zone:
  1798. page = buffered_rmqueue(preferred_zone, zone, order,
  1799. gfp_mask, migratetype);
  1800. if (page)
  1801. break;
  1802. this_zone_full:
  1803. if (IS_ENABLED(CONFIG_NUMA) && zlc_active)
  1804. zlc_mark_zone_full(zonelist, z);
  1805. }
  1806. if (page) {
  1807. /*
  1808. * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
  1809. * necessary to allocate the page. The expectation is
  1810. * that the caller is taking steps that will free more
  1811. * memory. The caller should avoid the page being used
  1812. * for !PFMEMALLOC purposes.
  1813. */
  1814. page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
  1815. return page;
  1816. }
  1817. /*
  1818. * The first pass makes sure allocations are spread fairly within the
  1819. * local node. However, the local node might have free pages left
  1820. * after the fairness batches are exhausted, and remote zones haven't
  1821. * even been considered yet. Try once more without fairness, and
  1822. * include remote zones now, before entering the slowpath and waking
  1823. * kswapd: prefer spilling to a remote zone over swapping locally.
  1824. */
  1825. if (alloc_flags & ALLOC_FAIR) {
  1826. alloc_flags &= ~ALLOC_FAIR;
  1827. if (nr_fair_skipped) {
  1828. zonelist_rescan = true;
  1829. reset_alloc_batches(preferred_zone);
  1830. }
  1831. if (nr_online_nodes > 1)
  1832. zonelist_rescan = true;
  1833. }
  1834. if (unlikely(IS_ENABLED(CONFIG_NUMA) && zlc_active)) {
  1835. /* Disable zlc cache for second zonelist scan */
  1836. zlc_active = 0;
  1837. zonelist_rescan = true;
  1838. }
  1839. if (zonelist_rescan)
  1840. goto zonelist_scan;
  1841. return NULL;
  1842. }
  1843. /*
  1844. * Large machines with many possible nodes should not always dump per-node
  1845. * meminfo in irq context.
  1846. */
  1847. static inline bool should_suppress_show_mem(void)
  1848. {
  1849. bool ret = false;
  1850. #if NODES_SHIFT > 8
  1851. ret = in_interrupt();
  1852. #endif
  1853. return ret;
  1854. }
  1855. static DEFINE_RATELIMIT_STATE(nopage_rs,
  1856. DEFAULT_RATELIMIT_INTERVAL,
  1857. DEFAULT_RATELIMIT_BURST);
  1858. void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...)
  1859. {
  1860. unsigned int filter = SHOW_MEM_FILTER_NODES;
  1861. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
  1862. debug_guardpage_minorder() > 0)
  1863. return;
  1864. /*
  1865. * This documents exceptions given to allocations in certain
  1866. * contexts that are allowed to allocate outside current's set
  1867. * of allowed nodes.
  1868. */
  1869. if (!(gfp_mask & __GFP_NOMEMALLOC))
  1870. if (test_thread_flag(TIF_MEMDIE) ||
  1871. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  1872. filter &= ~SHOW_MEM_FILTER_NODES;
  1873. if (in_interrupt() || !(gfp_mask & __GFP_WAIT))
  1874. filter &= ~SHOW_MEM_FILTER_NODES;
  1875. if (fmt) {
  1876. struct va_format vaf;
  1877. va_list args;
  1878. va_start(args, fmt);
  1879. vaf.fmt = fmt;
  1880. vaf.va = &args;
  1881. pr_warn("%pV", &vaf);
  1882. va_end(args);
  1883. }
  1884. pr_warn("%s: page allocation failure: order:%d, mode:0x%x\n",
  1885. current->comm, order, gfp_mask);
  1886. dump_stack();
  1887. if (!should_suppress_show_mem())
  1888. show_mem(filter);
  1889. }
  1890. static inline int
  1891. should_alloc_retry(gfp_t gfp_mask, unsigned int order,
  1892. unsigned long did_some_progress,
  1893. unsigned long pages_reclaimed)
  1894. {
  1895. /* Do not loop if specifically requested */
  1896. if (gfp_mask & __GFP_NORETRY)
  1897. return 0;
  1898. /* Always retry if specifically requested */
  1899. if (gfp_mask & __GFP_NOFAIL)
  1900. return 1;
  1901. /*
  1902. * Suspend converts GFP_KERNEL to __GFP_WAIT which can prevent reclaim
  1903. * making forward progress without invoking OOM. Suspend also disables
  1904. * storage devices so kswapd will not help. Bail if we are suspending.
  1905. */
  1906. if (!did_some_progress && pm_suspended_storage())
  1907. return 0;
  1908. /*
  1909. * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER
  1910. * means __GFP_NOFAIL, but that may not be true in other
  1911. * implementations.
  1912. */
  1913. if (order <= PAGE_ALLOC_COSTLY_ORDER)
  1914. return 1;
  1915. /*
  1916. * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is
  1917. * specified, then we retry until we no longer reclaim any pages
  1918. * (above), or we've reclaimed an order of pages at least as
  1919. * large as the allocation's order. In both cases, if the
  1920. * allocation still fails, we stop retrying.
  1921. */
  1922. if (gfp_mask & __GFP_REPEAT && pages_reclaimed < (1 << order))
  1923. return 1;
  1924. return 0;
  1925. }
  1926. static inline struct page *
  1927. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  1928. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1929. nodemask_t *nodemask, struct zone *preferred_zone,
  1930. int classzone_idx, int migratetype)
  1931. {
  1932. struct page *page;
  1933. /* Acquire the per-zone oom lock for each zone */
  1934. if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
  1935. schedule_timeout_uninterruptible(1);
  1936. return NULL;
  1937. }
  1938. /*
  1939. * PM-freezer should be notified that there might be an OOM killer on
  1940. * its way to kill and wake somebody up. This is too early and we might
  1941. * end up not killing anything but false positives are acceptable.
  1942. * See freeze_processes.
  1943. */
  1944. note_oom_kill();
  1945. /*
  1946. * Go through the zonelist yet one more time, keep very high watermark
  1947. * here, this is only to catch a parallel oom killing, we must fail if
  1948. * we're still under heavy pressure.
  1949. */
  1950. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask,
  1951. order, zonelist, high_zoneidx,
  1952. ALLOC_WMARK_HIGH|ALLOC_CPUSET,
  1953. preferred_zone, classzone_idx, migratetype);
  1954. if (page)
  1955. goto out;
  1956. if (!(gfp_mask & __GFP_NOFAIL)) {
  1957. /* The OOM killer will not help higher order allocs */
  1958. if (order > PAGE_ALLOC_COSTLY_ORDER)
  1959. goto out;
  1960. /* The OOM killer does not needlessly kill tasks for lowmem */
  1961. if (high_zoneidx < ZONE_NORMAL)
  1962. goto out;
  1963. /*
  1964. * GFP_THISNODE contains __GFP_NORETRY and we never hit this.
  1965. * Sanity check for bare calls of __GFP_THISNODE, not real OOM.
  1966. * The caller should handle page allocation failure by itself if
  1967. * it specifies __GFP_THISNODE.
  1968. * Note: Hugepage uses it but will hit PAGE_ALLOC_COSTLY_ORDER.
  1969. */
  1970. if (gfp_mask & __GFP_THISNODE)
  1971. goto out;
  1972. }
  1973. /* Exhausted what can be done so it's blamo time */
  1974. out_of_memory(zonelist, gfp_mask, order, nodemask, false);
  1975. out:
  1976. oom_zonelist_unlock(zonelist, gfp_mask);
  1977. return page;
  1978. }
  1979. #ifdef CONFIG_COMPACTION
  1980. /* Try memory compaction for high-order allocations before reclaim */
  1981. static struct page *
  1982. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  1983. struct zonelist *zonelist, enum zone_type high_zoneidx,
  1984. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  1985. int classzone_idx, int migratetype, enum migrate_mode mode,
  1986. int *contended_compaction, bool *deferred_compaction)
  1987. {
  1988. struct zone *last_compact_zone = NULL;
  1989. unsigned long compact_result;
  1990. struct page *page;
  1991. if (!order)
  1992. return NULL;
  1993. current->flags |= PF_MEMALLOC;
  1994. compact_result = try_to_compact_pages(zonelist, order, gfp_mask,
  1995. nodemask, mode,
  1996. contended_compaction,
  1997. &last_compact_zone);
  1998. current->flags &= ~PF_MEMALLOC;
  1999. switch (compact_result) {
  2000. case COMPACT_DEFERRED:
  2001. *deferred_compaction = true;
  2002. /* fall-through */
  2003. case COMPACT_SKIPPED:
  2004. return NULL;
  2005. default:
  2006. break;
  2007. }
  2008. /*
  2009. * At least in one zone compaction wasn't deferred or skipped, so let's
  2010. * count a compaction stall
  2011. */
  2012. count_vm_event(COMPACTSTALL);
  2013. /* Page migration frees to the PCP lists but we want merging */
  2014. drain_pages(get_cpu());
  2015. put_cpu();
  2016. page = get_page_from_freelist(gfp_mask, nodemask,
  2017. order, zonelist, high_zoneidx,
  2018. alloc_flags & ~ALLOC_NO_WATERMARKS,
  2019. preferred_zone, classzone_idx, migratetype);
  2020. if (page) {
  2021. struct zone *zone = page_zone(page);
  2022. zone->compact_blockskip_flush = false;
  2023. compaction_defer_reset(zone, order, true);
  2024. count_vm_event(COMPACTSUCCESS);
  2025. return page;
  2026. }
  2027. /*
  2028. * last_compact_zone is where try_to_compact_pages thought allocation
  2029. * should succeed, so it did not defer compaction. But here we know
  2030. * that it didn't succeed, so we do the defer.
  2031. */
  2032. if (last_compact_zone && mode != MIGRATE_ASYNC)
  2033. defer_compaction(last_compact_zone, order);
  2034. /*
  2035. * It's bad if compaction run occurs and fails. The most likely reason
  2036. * is that pages exist, but not enough to satisfy watermarks.
  2037. */
  2038. count_vm_event(COMPACTFAIL);
  2039. cond_resched();
  2040. return NULL;
  2041. }
  2042. #else
  2043. static inline struct page *
  2044. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2045. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2046. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  2047. int classzone_idx, int migratetype, enum migrate_mode mode,
  2048. int *contended_compaction, bool *deferred_compaction)
  2049. {
  2050. return NULL;
  2051. }
  2052. #endif /* CONFIG_COMPACTION */
  2053. /* Perform direct synchronous page reclaim */
  2054. static int
  2055. __perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
  2056. nodemask_t *nodemask)
  2057. {
  2058. struct reclaim_state reclaim_state;
  2059. int progress;
  2060. cond_resched();
  2061. /* We now go into synchronous reclaim */
  2062. cpuset_memory_pressure_bump();
  2063. current->flags |= PF_MEMALLOC;
  2064. lockdep_set_current_reclaim_state(gfp_mask);
  2065. reclaim_state.reclaimed_slab = 0;
  2066. current->reclaim_state = &reclaim_state;
  2067. progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
  2068. current->reclaim_state = NULL;
  2069. lockdep_clear_current_reclaim_state();
  2070. current->flags &= ~PF_MEMALLOC;
  2071. cond_resched();
  2072. return progress;
  2073. }
  2074. /* The really slow allocator path where we enter direct reclaim */
  2075. static inline struct page *
  2076. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  2077. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2078. nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
  2079. int classzone_idx, int migratetype, unsigned long *did_some_progress)
  2080. {
  2081. struct page *page = NULL;
  2082. bool drained = false;
  2083. *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
  2084. nodemask);
  2085. if (unlikely(!(*did_some_progress)))
  2086. return NULL;
  2087. /* After successful reclaim, reconsider all zones for allocation */
  2088. if (IS_ENABLED(CONFIG_NUMA))
  2089. zlc_clear_zones_full(zonelist);
  2090. retry:
  2091. page = get_page_from_freelist(gfp_mask, nodemask, order,
  2092. zonelist, high_zoneidx,
  2093. alloc_flags & ~ALLOC_NO_WATERMARKS,
  2094. preferred_zone, classzone_idx,
  2095. migratetype);
  2096. /*
  2097. * If an allocation failed after direct reclaim, it could be because
  2098. * pages are pinned on the per-cpu lists. Drain them and try again
  2099. */
  2100. if (!page && !drained) {
  2101. drain_all_pages();
  2102. drained = true;
  2103. goto retry;
  2104. }
  2105. return page;
  2106. }
  2107. /*
  2108. * This is called in the allocator slow-path if the allocation request is of
  2109. * sufficient urgency to ignore watermarks and take other desperate measures
  2110. */
  2111. static inline struct page *
  2112. __alloc_pages_high_priority(gfp_t gfp_mask, unsigned int order,
  2113. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2114. nodemask_t *nodemask, struct zone *preferred_zone,
  2115. int classzone_idx, int migratetype)
  2116. {
  2117. struct page *page;
  2118. do {
  2119. page = get_page_from_freelist(gfp_mask, nodemask, order,
  2120. zonelist, high_zoneidx, ALLOC_NO_WATERMARKS,
  2121. preferred_zone, classzone_idx, migratetype);
  2122. if (!page && gfp_mask & __GFP_NOFAIL)
  2123. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
  2124. } while (!page && (gfp_mask & __GFP_NOFAIL));
  2125. return page;
  2126. }
  2127. static void wake_all_kswapds(unsigned int order,
  2128. struct zonelist *zonelist,
  2129. enum zone_type high_zoneidx,
  2130. struct zone *preferred_zone,
  2131. nodemask_t *nodemask)
  2132. {
  2133. struct zoneref *z;
  2134. struct zone *zone;
  2135. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  2136. high_zoneidx, nodemask)
  2137. wakeup_kswapd(zone, order, zone_idx(preferred_zone));
  2138. }
  2139. static inline int
  2140. gfp_to_alloc_flags(gfp_t gfp_mask)
  2141. {
  2142. int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  2143. const bool atomic = !(gfp_mask & (__GFP_WAIT | __GFP_NO_KSWAPD));
  2144. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  2145. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  2146. /*
  2147. * The caller may dip into page reserves a bit more if the caller
  2148. * cannot run direct reclaim, or if the caller has realtime scheduling
  2149. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  2150. * set both ALLOC_HARDER (atomic == true) and ALLOC_HIGH (__GFP_HIGH).
  2151. */
  2152. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  2153. if (atomic) {
  2154. /*
  2155. * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
  2156. * if it can't schedule.
  2157. */
  2158. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2159. alloc_flags |= ALLOC_HARDER;
  2160. /*
  2161. * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
  2162. * comment for __cpuset_node_allowed_softwall().
  2163. */
  2164. alloc_flags &= ~ALLOC_CPUSET;
  2165. } else if (unlikely(rt_task(current)) && !in_interrupt())
  2166. alloc_flags |= ALLOC_HARDER;
  2167. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  2168. if (gfp_mask & __GFP_MEMALLOC)
  2169. alloc_flags |= ALLOC_NO_WATERMARKS;
  2170. else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
  2171. alloc_flags |= ALLOC_NO_WATERMARKS;
  2172. else if (!in_interrupt() &&
  2173. ((current->flags & PF_MEMALLOC) ||
  2174. unlikely(test_thread_flag(TIF_MEMDIE))))
  2175. alloc_flags |= ALLOC_NO_WATERMARKS;
  2176. }
  2177. #ifdef CONFIG_CMA
  2178. if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  2179. alloc_flags |= ALLOC_CMA;
  2180. #endif
  2181. return alloc_flags;
  2182. }
  2183. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
  2184. {
  2185. return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
  2186. }
  2187. static inline struct page *
  2188. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  2189. struct zonelist *zonelist, enum zone_type high_zoneidx,
  2190. nodemask_t *nodemask, struct zone *preferred_zone,
  2191. int classzone_idx, int migratetype)
  2192. {
  2193. const gfp_t wait = gfp_mask & __GFP_WAIT;
  2194. struct page *page = NULL;
  2195. int alloc_flags;
  2196. unsigned long pages_reclaimed = 0;
  2197. unsigned long did_some_progress;
  2198. enum migrate_mode migration_mode = MIGRATE_ASYNC;
  2199. bool deferred_compaction = false;
  2200. int contended_compaction = COMPACT_CONTENDED_NONE;
  2201. /*
  2202. * In the slowpath, we sanity check order to avoid ever trying to
  2203. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  2204. * be using allocators in order of preference for an area that is
  2205. * too large.
  2206. */
  2207. if (order >= MAX_ORDER) {
  2208. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  2209. return NULL;
  2210. }
  2211. /*
  2212. * GFP_THISNODE (meaning __GFP_THISNODE, __GFP_NORETRY and
  2213. * __GFP_NOWARN set) should not cause reclaim since the subsystem
  2214. * (f.e. slab) using GFP_THISNODE may choose to trigger reclaim
  2215. * using a larger set of nodes after it has established that the
  2216. * allowed per node queues are empty and that nodes are
  2217. * over allocated.
  2218. */
  2219. if (IS_ENABLED(CONFIG_NUMA) &&
  2220. (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
  2221. goto nopage;
  2222. restart:
  2223. if (!(gfp_mask & __GFP_NO_KSWAPD))
  2224. wake_all_kswapds(order, zonelist, high_zoneidx,
  2225. preferred_zone, nodemask);
  2226. /*
  2227. * OK, we're below the kswapd watermark and have kicked background
  2228. * reclaim. Now things get more complex, so set up alloc_flags according
  2229. * to how we want to proceed.
  2230. */
  2231. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  2232. /*
  2233. * Find the true preferred zone if the allocation is unconstrained by
  2234. * cpusets.
  2235. */
  2236. if (!(alloc_flags & ALLOC_CPUSET) && !nodemask) {
  2237. struct zoneref *preferred_zoneref;
  2238. preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
  2239. NULL, &preferred_zone);
  2240. classzone_idx = zonelist_zone_idx(preferred_zoneref);
  2241. }
  2242. rebalance:
  2243. /* This is the last chance, in general, before the goto nopage. */
  2244. page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
  2245. high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
  2246. preferred_zone, classzone_idx, migratetype);
  2247. if (page)
  2248. goto got_pg;
  2249. /* Allocate without watermarks if the context allows */
  2250. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  2251. /*
  2252. * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
  2253. * the allocation is high priority and these type of
  2254. * allocations are system rather than user orientated
  2255. */
  2256. zonelist = node_zonelist(numa_node_id(), gfp_mask);
  2257. page = __alloc_pages_high_priority(gfp_mask, order,
  2258. zonelist, high_zoneidx, nodemask,
  2259. preferred_zone, classzone_idx, migratetype);
  2260. if (page) {
  2261. goto got_pg;
  2262. }
  2263. }
  2264. /* Atomic allocations - we can't balance anything */
  2265. if (!wait) {
  2266. /*
  2267. * All existing users of the deprecated __GFP_NOFAIL are
  2268. * blockable, so warn of any new users that actually allow this
  2269. * type of allocation to fail.
  2270. */
  2271. WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
  2272. goto nopage;
  2273. }
  2274. /* Avoid recursion of direct reclaim */
  2275. if (current->flags & PF_MEMALLOC)
  2276. goto nopage;
  2277. /* Avoid allocations with no watermarks from looping endlessly */
  2278. if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
  2279. goto nopage;
  2280. /*
  2281. * Try direct compaction. The first pass is asynchronous. Subsequent
  2282. * attempts after direct reclaim are synchronous
  2283. */
  2284. page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
  2285. high_zoneidx, nodemask, alloc_flags,
  2286. preferred_zone,
  2287. classzone_idx, migratetype,
  2288. migration_mode, &contended_compaction,
  2289. &deferred_compaction);
  2290. if (page)
  2291. goto got_pg;
  2292. /* Checks for THP-specific high-order allocations */
  2293. if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) {
  2294. /*
  2295. * If compaction is deferred for high-order allocations, it is
  2296. * because sync compaction recently failed. If this is the case
  2297. * and the caller requested a THP allocation, we do not want
  2298. * to heavily disrupt the system, so we fail the allocation
  2299. * instead of entering direct reclaim.
  2300. */
  2301. if (deferred_compaction)
  2302. goto nopage;
  2303. /*
  2304. * In all zones where compaction was attempted (and not
  2305. * deferred or skipped), lock contention has been detected.
  2306. * For THP allocation we do not want to disrupt the others
  2307. * so we fallback to base pages instead.
  2308. */
  2309. if (contended_compaction == COMPACT_CONTENDED_LOCK)
  2310. goto nopage;
  2311. /*
  2312. * If compaction was aborted due to need_resched(), we do not
  2313. * want to further increase allocation latency, unless it is
  2314. * khugepaged trying to collapse.
  2315. */
  2316. if (contended_compaction == COMPACT_CONTENDED_SCHED
  2317. && !(current->flags & PF_KTHREAD))
  2318. goto nopage;
  2319. }
  2320. /*
  2321. * It can become very expensive to allocate transparent hugepages at
  2322. * fault, so use asynchronous memory compaction for THP unless it is
  2323. * khugepaged trying to collapse.
  2324. */
  2325. if ((gfp_mask & GFP_TRANSHUGE) != GFP_TRANSHUGE ||
  2326. (current->flags & PF_KTHREAD))
  2327. migration_mode = MIGRATE_SYNC_LIGHT;
  2328. /* Try direct reclaim and then allocating */
  2329. page = __alloc_pages_direct_reclaim(gfp_mask, order,
  2330. zonelist, high_zoneidx,
  2331. nodemask,
  2332. alloc_flags, preferred_zone,
  2333. classzone_idx, migratetype,
  2334. &did_some_progress);
  2335. if (page)
  2336. goto got_pg;
  2337. /*
  2338. * If we failed to make any progress reclaiming, then we are
  2339. * running out of options and have to consider going OOM
  2340. */
  2341. if (!did_some_progress) {
  2342. if (oom_gfp_allowed(gfp_mask)) {
  2343. if (oom_killer_disabled)
  2344. goto nopage;
  2345. /* Coredumps can quickly deplete all memory reserves */
  2346. if ((current->flags & PF_DUMPCORE) &&
  2347. !(gfp_mask & __GFP_NOFAIL))
  2348. goto nopage;
  2349. page = __alloc_pages_may_oom(gfp_mask, order,
  2350. zonelist, high_zoneidx,
  2351. nodemask, preferred_zone,
  2352. classzone_idx, migratetype);
  2353. if (page)
  2354. goto got_pg;
  2355. if (!(gfp_mask & __GFP_NOFAIL)) {
  2356. /*
  2357. * The oom killer is not called for high-order
  2358. * allocations that may fail, so if no progress
  2359. * is being made, there are no other options and
  2360. * retrying is unlikely to help.
  2361. */
  2362. if (order > PAGE_ALLOC_COSTLY_ORDER)
  2363. goto nopage;
  2364. /*
  2365. * The oom killer is not called for lowmem
  2366. * allocations to prevent needlessly killing
  2367. * innocent tasks.
  2368. */
  2369. if (high_zoneidx < ZONE_NORMAL)
  2370. goto nopage;
  2371. }
  2372. goto restart;
  2373. }
  2374. }
  2375. /* Check if we should retry the allocation */
  2376. pages_reclaimed += did_some_progress;
  2377. if (should_alloc_retry(gfp_mask, order, did_some_progress,
  2378. pages_reclaimed)) {
  2379. /* Wait for some write requests to complete then retry */
  2380. wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
  2381. goto rebalance;
  2382. } else {
  2383. /*
  2384. * High-order allocations do not necessarily loop after
  2385. * direct reclaim and reclaim/compaction depends on compaction
  2386. * being called after reclaim so call directly if necessary
  2387. */
  2388. page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
  2389. high_zoneidx, nodemask, alloc_flags,
  2390. preferred_zone,
  2391. classzone_idx, migratetype,
  2392. migration_mode, &contended_compaction,
  2393. &deferred_compaction);
  2394. if (page)
  2395. goto got_pg;
  2396. }
  2397. nopage:
  2398. warn_alloc_failed(gfp_mask, order, NULL);
  2399. return page;
  2400. got_pg:
  2401. if (kmemcheck_enabled)
  2402. kmemcheck_pagealloc_alloc(page, order, gfp_mask);
  2403. return page;
  2404. }
  2405. /*
  2406. * This is the 'heart' of the zoned buddy allocator.
  2407. */
  2408. struct page *
  2409. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  2410. struct zonelist *zonelist, nodemask_t *nodemask)
  2411. {
  2412. enum zone_type high_zoneidx = gfp_zone(gfp_mask);
  2413. struct zone *preferred_zone;
  2414. struct zoneref *preferred_zoneref;
  2415. struct page *page = NULL;
  2416. int migratetype = gfpflags_to_migratetype(gfp_mask);
  2417. unsigned int cpuset_mems_cookie;
  2418. int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
  2419. int classzone_idx;
  2420. gfp_mask &= gfp_allowed_mask;
  2421. lockdep_trace_alloc(gfp_mask);
  2422. might_sleep_if(gfp_mask & __GFP_WAIT);
  2423. if (should_fail_alloc_page(gfp_mask, order))
  2424. return NULL;
  2425. /*
  2426. * Check the zones suitable for the gfp_mask contain at least one
  2427. * valid zone. It's possible to have an empty zonelist as a result
  2428. * of GFP_THISNODE and a memoryless node
  2429. */
  2430. if (unlikely(!zonelist->_zonerefs->zone))
  2431. return NULL;
  2432. if (IS_ENABLED(CONFIG_CMA) && migratetype == MIGRATE_MOVABLE)
  2433. alloc_flags |= ALLOC_CMA;
  2434. retry_cpuset:
  2435. cpuset_mems_cookie = read_mems_allowed_begin();
  2436. /* The preferred zone is used for statistics later */
  2437. preferred_zoneref = first_zones_zonelist(zonelist, high_zoneidx,
  2438. nodemask ? : &cpuset_current_mems_allowed,
  2439. &preferred_zone);
  2440. if (!preferred_zone)
  2441. goto out;
  2442. classzone_idx = zonelist_zone_idx(preferred_zoneref);
  2443. /* First allocation attempt */
  2444. page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
  2445. zonelist, high_zoneidx, alloc_flags,
  2446. preferred_zone, classzone_idx, migratetype);
  2447. if (unlikely(!page)) {
  2448. /*
  2449. * Runtime PM, block IO and its error handling path
  2450. * can deadlock because I/O on the device might not
  2451. * complete.
  2452. */
  2453. gfp_mask = memalloc_noio_flags(gfp_mask);
  2454. page = __alloc_pages_slowpath(gfp_mask, order,
  2455. zonelist, high_zoneidx, nodemask,
  2456. preferred_zone, classzone_idx, migratetype);
  2457. }
  2458. trace_mm_page_alloc(page, order, gfp_mask, migratetype);
  2459. out:
  2460. /*
  2461. * When updating a task's mems_allowed, it is possible to race with
  2462. * parallel threads in such a way that an allocation can fail while
  2463. * the mask is being updated. If a page allocation is about to fail,
  2464. * check if the cpuset changed during allocation and if so, retry.
  2465. */
  2466. if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
  2467. goto retry_cpuset;
  2468. return page;
  2469. }
  2470. EXPORT_SYMBOL(__alloc_pages_nodemask);
  2471. /*
  2472. * Common helper functions.
  2473. */
  2474. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  2475. {
  2476. struct page *page;
  2477. /*
  2478. * __get_free_pages() returns a 32-bit address, which cannot represent
  2479. * a highmem page
  2480. */
  2481. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  2482. page = alloc_pages(gfp_mask, order);
  2483. if (!page)
  2484. return 0;
  2485. return (unsigned long) page_address(page);
  2486. }
  2487. EXPORT_SYMBOL(__get_free_pages);
  2488. unsigned long get_zeroed_page(gfp_t gfp_mask)
  2489. {
  2490. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  2491. }
  2492. EXPORT_SYMBOL(get_zeroed_page);
  2493. void __free_pages(struct page *page, unsigned int order)
  2494. {
  2495. if (put_page_testzero(page)) {
  2496. if (order == 0)
  2497. free_hot_cold_page(page, false);
  2498. else
  2499. __free_pages_ok(page, order);
  2500. }
  2501. }
  2502. EXPORT_SYMBOL(__free_pages);
  2503. void free_pages(unsigned long addr, unsigned int order)
  2504. {
  2505. if (addr != 0) {
  2506. VM_BUG_ON(!virt_addr_valid((void *)addr));
  2507. __free_pages(virt_to_page((void *)addr), order);
  2508. }
  2509. }
  2510. EXPORT_SYMBOL(free_pages);
  2511. /*
  2512. * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
  2513. * of the current memory cgroup.
  2514. *
  2515. * It should be used when the caller would like to use kmalloc, but since the
  2516. * allocation is large, it has to fall back to the page allocator.
  2517. */
  2518. struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
  2519. {
  2520. struct page *page;
  2521. struct mem_cgroup *memcg = NULL;
  2522. if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
  2523. return NULL;
  2524. page = alloc_pages(gfp_mask, order);
  2525. memcg_kmem_commit_charge(page, memcg, order);
  2526. return page;
  2527. }
  2528. struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
  2529. {
  2530. struct page *page;
  2531. struct mem_cgroup *memcg = NULL;
  2532. if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
  2533. return NULL;
  2534. page = alloc_pages_node(nid, gfp_mask, order);
  2535. memcg_kmem_commit_charge(page, memcg, order);
  2536. return page;
  2537. }
  2538. /*
  2539. * __free_kmem_pages and free_kmem_pages will free pages allocated with
  2540. * alloc_kmem_pages.
  2541. */
  2542. void __free_kmem_pages(struct page *page, unsigned int order)
  2543. {
  2544. memcg_kmem_uncharge_pages(page, order);
  2545. __free_pages(page, order);
  2546. }
  2547. void free_kmem_pages(unsigned long addr, unsigned int order)
  2548. {
  2549. if (addr != 0) {
  2550. VM_BUG_ON(!virt_addr_valid((void *)addr));
  2551. __free_kmem_pages(virt_to_page((void *)addr), order);
  2552. }
  2553. }
  2554. static void *make_alloc_exact(unsigned long addr, unsigned order, size_t size)
  2555. {
  2556. if (addr) {
  2557. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  2558. unsigned long used = addr + PAGE_ALIGN(size);
  2559. split_page(virt_to_page((void *)addr), order);
  2560. while (used < alloc_end) {
  2561. free_page(used);
  2562. used += PAGE_SIZE;
  2563. }
  2564. }
  2565. return (void *)addr;
  2566. }
  2567. /**
  2568. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  2569. * @size: the number of bytes to allocate
  2570. * @gfp_mask: GFP flags for the allocation
  2571. *
  2572. * This function is similar to alloc_pages(), except that it allocates the
  2573. * minimum number of pages to satisfy the request. alloc_pages() can only
  2574. * allocate memory in power-of-two pages.
  2575. *
  2576. * This function is also limited by MAX_ORDER.
  2577. *
  2578. * Memory allocated by this function must be released by free_pages_exact().
  2579. */
  2580. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  2581. {
  2582. unsigned int order = get_order(size);
  2583. unsigned long addr;
  2584. addr = __get_free_pages(gfp_mask, order);
  2585. return make_alloc_exact(addr, order, size);
  2586. }
  2587. EXPORT_SYMBOL(alloc_pages_exact);
  2588. /**
  2589. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  2590. * pages on a node.
  2591. * @nid: the preferred node ID where memory should be allocated
  2592. * @size: the number of bytes to allocate
  2593. * @gfp_mask: GFP flags for the allocation
  2594. *
  2595. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  2596. * back.
  2597. * Note this is not alloc_pages_exact_node() which allocates on a specific node,
  2598. * but is not exact.
  2599. */
  2600. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  2601. {
  2602. unsigned order = get_order(size);
  2603. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  2604. if (!p)
  2605. return NULL;
  2606. return make_alloc_exact((unsigned long)page_address(p), order, size);
  2607. }
  2608. /**
  2609. * free_pages_exact - release memory allocated via alloc_pages_exact()
  2610. * @virt: the value returned by alloc_pages_exact.
  2611. * @size: size of allocation, same value as passed to alloc_pages_exact().
  2612. *
  2613. * Release the memory allocated by a previous call to alloc_pages_exact.
  2614. */
  2615. void free_pages_exact(void *virt, size_t size)
  2616. {
  2617. unsigned long addr = (unsigned long)virt;
  2618. unsigned long end = addr + PAGE_ALIGN(size);
  2619. while (addr < end) {
  2620. free_page(addr);
  2621. addr += PAGE_SIZE;
  2622. }
  2623. }
  2624. EXPORT_SYMBOL(free_pages_exact);
  2625. /**
  2626. * nr_free_zone_pages - count number of pages beyond high watermark
  2627. * @offset: The zone index of the highest zone
  2628. *
  2629. * nr_free_zone_pages() counts the number of counts pages which are beyond the
  2630. * high watermark within all zones at or below a given zone index. For each
  2631. * zone, the number of pages is calculated as:
  2632. * managed_pages - high_pages
  2633. */
  2634. static unsigned long nr_free_zone_pages(int offset)
  2635. {
  2636. struct zoneref *z;
  2637. struct zone *zone;
  2638. /* Just pick one node, since fallback list is circular */
  2639. unsigned long sum = 0;
  2640. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  2641. for_each_zone_zonelist(zone, z, zonelist, offset) {
  2642. unsigned long size = zone->managed_pages;
  2643. unsigned long high = high_wmark_pages(zone);
  2644. if (size > high)
  2645. sum += size - high;
  2646. }
  2647. return sum;
  2648. }
  2649. /**
  2650. * nr_free_buffer_pages - count number of pages beyond high watermark
  2651. *
  2652. * nr_free_buffer_pages() counts the number of pages which are beyond the high
  2653. * watermark within ZONE_DMA and ZONE_NORMAL.
  2654. */
  2655. unsigned long nr_free_buffer_pages(void)
  2656. {
  2657. return nr_free_zone_pages(gfp_zone(GFP_USER));
  2658. }
  2659. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  2660. /**
  2661. * nr_free_pagecache_pages - count number of pages beyond high watermark
  2662. *
  2663. * nr_free_pagecache_pages() counts the number of pages which are beyond the
  2664. * high watermark within all zones.
  2665. */
  2666. unsigned long nr_free_pagecache_pages(void)
  2667. {
  2668. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  2669. }
  2670. static inline void show_node(struct zone *zone)
  2671. {
  2672. if (IS_ENABLED(CONFIG_NUMA))
  2673. printk("Node %d ", zone_to_nid(zone));
  2674. }
  2675. void si_meminfo(struct sysinfo *val)
  2676. {
  2677. val->totalram = totalram_pages;
  2678. val->sharedram = global_page_state(NR_SHMEM);
  2679. val->freeram = global_page_state(NR_FREE_PAGES);
  2680. val->bufferram = nr_blockdev_pages();
  2681. val->totalhigh = totalhigh_pages;
  2682. val->freehigh = nr_free_highpages();
  2683. val->mem_unit = PAGE_SIZE;
  2684. }
  2685. EXPORT_SYMBOL(si_meminfo);
  2686. #ifdef CONFIG_NUMA
  2687. void si_meminfo_node(struct sysinfo *val, int nid)
  2688. {
  2689. int zone_type; /* needs to be signed */
  2690. unsigned long managed_pages = 0;
  2691. pg_data_t *pgdat = NODE_DATA(nid);
  2692. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
  2693. managed_pages += pgdat->node_zones[zone_type].managed_pages;
  2694. val->totalram = managed_pages;
  2695. val->sharedram = node_page_state(nid, NR_SHMEM);
  2696. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  2697. #ifdef CONFIG_HIGHMEM
  2698. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
  2699. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  2700. NR_FREE_PAGES);
  2701. #else
  2702. val->totalhigh = 0;
  2703. val->freehigh = 0;
  2704. #endif
  2705. val->mem_unit = PAGE_SIZE;
  2706. }
  2707. #endif
  2708. /*
  2709. * Determine whether the node should be displayed or not, depending on whether
  2710. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  2711. */
  2712. bool skip_free_areas_node(unsigned int flags, int nid)
  2713. {
  2714. bool ret = false;
  2715. unsigned int cpuset_mems_cookie;
  2716. if (!(flags & SHOW_MEM_FILTER_NODES))
  2717. goto out;
  2718. do {
  2719. cpuset_mems_cookie = read_mems_allowed_begin();
  2720. ret = !node_isset(nid, cpuset_current_mems_allowed);
  2721. } while (read_mems_allowed_retry(cpuset_mems_cookie));
  2722. out:
  2723. return ret;
  2724. }
  2725. #define K(x) ((x) << (PAGE_SHIFT-10))
  2726. static void show_migration_types(unsigned char type)
  2727. {
  2728. static const char types[MIGRATE_TYPES] = {
  2729. [MIGRATE_UNMOVABLE] = 'U',
  2730. [MIGRATE_RECLAIMABLE] = 'E',
  2731. [MIGRATE_MOVABLE] = 'M',
  2732. [MIGRATE_RESERVE] = 'R',
  2733. #ifdef CONFIG_CMA
  2734. [MIGRATE_CMA] = 'C',
  2735. #endif
  2736. #ifdef CONFIG_MEMORY_ISOLATION
  2737. [MIGRATE_ISOLATE] = 'I',
  2738. #endif
  2739. };
  2740. char tmp[MIGRATE_TYPES + 1];
  2741. char *p = tmp;
  2742. int i;
  2743. for (i = 0; i < MIGRATE_TYPES; i++) {
  2744. if (type & (1 << i))
  2745. *p++ = types[i];
  2746. }
  2747. *p = '\0';
  2748. printk("(%s) ", tmp);
  2749. }
  2750. /*
  2751. * Show free area list (used inside shift_scroll-lock stuff)
  2752. * We also calculate the percentage fragmentation. We do this by counting the
  2753. * memory on each free list with the exception of the first item on the list.
  2754. * Suppresses nodes that are not allowed by current's cpuset if
  2755. * SHOW_MEM_FILTER_NODES is passed.
  2756. */
  2757. void show_free_areas(unsigned int filter)
  2758. {
  2759. int cpu;
  2760. struct zone *zone;
  2761. for_each_populated_zone(zone) {
  2762. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2763. continue;
  2764. show_node(zone);
  2765. printk("%s per-cpu:\n", zone->name);
  2766. for_each_online_cpu(cpu) {
  2767. struct per_cpu_pageset *pageset;
  2768. pageset = per_cpu_ptr(zone->pageset, cpu);
  2769. printk("CPU %4d: hi:%5d, btch:%4d usd:%4d\n",
  2770. cpu, pageset->pcp.high,
  2771. pageset->pcp.batch, pageset->pcp.count);
  2772. }
  2773. }
  2774. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  2775. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  2776. " unevictable:%lu"
  2777. " dirty:%lu writeback:%lu unstable:%lu\n"
  2778. " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  2779. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
  2780. " free_cma:%lu\n",
  2781. global_page_state(NR_ACTIVE_ANON),
  2782. global_page_state(NR_INACTIVE_ANON),
  2783. global_page_state(NR_ISOLATED_ANON),
  2784. global_page_state(NR_ACTIVE_FILE),
  2785. global_page_state(NR_INACTIVE_FILE),
  2786. global_page_state(NR_ISOLATED_FILE),
  2787. global_page_state(NR_UNEVICTABLE),
  2788. global_page_state(NR_FILE_DIRTY),
  2789. global_page_state(NR_WRITEBACK),
  2790. global_page_state(NR_UNSTABLE_NFS),
  2791. global_page_state(NR_FREE_PAGES),
  2792. global_page_state(NR_SLAB_RECLAIMABLE),
  2793. global_page_state(NR_SLAB_UNRECLAIMABLE),
  2794. global_page_state(NR_FILE_MAPPED),
  2795. global_page_state(NR_SHMEM),
  2796. global_page_state(NR_PAGETABLE),
  2797. global_page_state(NR_BOUNCE),
  2798. global_page_state(NR_FREE_CMA_PAGES));
  2799. for_each_populated_zone(zone) {
  2800. int i;
  2801. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2802. continue;
  2803. show_node(zone);
  2804. printk("%s"
  2805. " free:%lukB"
  2806. " min:%lukB"
  2807. " low:%lukB"
  2808. " high:%lukB"
  2809. " active_anon:%lukB"
  2810. " inactive_anon:%lukB"
  2811. " active_file:%lukB"
  2812. " inactive_file:%lukB"
  2813. " unevictable:%lukB"
  2814. " isolated(anon):%lukB"
  2815. " isolated(file):%lukB"
  2816. " present:%lukB"
  2817. " managed:%lukB"
  2818. " mlocked:%lukB"
  2819. " dirty:%lukB"
  2820. " writeback:%lukB"
  2821. " mapped:%lukB"
  2822. " shmem:%lukB"
  2823. " slab_reclaimable:%lukB"
  2824. " slab_unreclaimable:%lukB"
  2825. " kernel_stack:%lukB"
  2826. " pagetables:%lukB"
  2827. " unstable:%lukB"
  2828. " bounce:%lukB"
  2829. " free_cma:%lukB"
  2830. " writeback_tmp:%lukB"
  2831. " pages_scanned:%lu"
  2832. " all_unreclaimable? %s"
  2833. "\n",
  2834. zone->name,
  2835. K(zone_page_state(zone, NR_FREE_PAGES)),
  2836. K(min_wmark_pages(zone)),
  2837. K(low_wmark_pages(zone)),
  2838. K(high_wmark_pages(zone)),
  2839. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  2840. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  2841. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  2842. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  2843. K(zone_page_state(zone, NR_UNEVICTABLE)),
  2844. K(zone_page_state(zone, NR_ISOLATED_ANON)),
  2845. K(zone_page_state(zone, NR_ISOLATED_FILE)),
  2846. K(zone->present_pages),
  2847. K(zone->managed_pages),
  2848. K(zone_page_state(zone, NR_MLOCK)),
  2849. K(zone_page_state(zone, NR_FILE_DIRTY)),
  2850. K(zone_page_state(zone, NR_WRITEBACK)),
  2851. K(zone_page_state(zone, NR_FILE_MAPPED)),
  2852. K(zone_page_state(zone, NR_SHMEM)),
  2853. K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
  2854. K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
  2855. zone_page_state(zone, NR_KERNEL_STACK) *
  2856. THREAD_SIZE / 1024,
  2857. K(zone_page_state(zone, NR_PAGETABLE)),
  2858. K(zone_page_state(zone, NR_UNSTABLE_NFS)),
  2859. K(zone_page_state(zone, NR_BOUNCE)),
  2860. K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
  2861. K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
  2862. K(zone_page_state(zone, NR_PAGES_SCANNED)),
  2863. (!zone_reclaimable(zone) ? "yes" : "no")
  2864. );
  2865. printk("lowmem_reserve[]:");
  2866. for (i = 0; i < MAX_NR_ZONES; i++)
  2867. printk(" %ld", zone->lowmem_reserve[i]);
  2868. printk("\n");
  2869. }
  2870. for_each_populated_zone(zone) {
  2871. unsigned long nr[MAX_ORDER], flags, order, total = 0;
  2872. unsigned char types[MAX_ORDER];
  2873. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  2874. continue;
  2875. show_node(zone);
  2876. printk("%s: ", zone->name);
  2877. spin_lock_irqsave(&zone->lock, flags);
  2878. for (order = 0; order < MAX_ORDER; order++) {
  2879. struct free_area *area = &zone->free_area[order];
  2880. int type;
  2881. nr[order] = area->nr_free;
  2882. total += nr[order] << order;
  2883. types[order] = 0;
  2884. for (type = 0; type < MIGRATE_TYPES; type++) {
  2885. if (!list_empty(&area->free_list[type]))
  2886. types[order] |= 1 << type;
  2887. }
  2888. }
  2889. spin_unlock_irqrestore(&zone->lock, flags);
  2890. for (order = 0; order < MAX_ORDER; order++) {
  2891. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  2892. if (nr[order])
  2893. show_migration_types(types[order]);
  2894. }
  2895. printk("= %lukB\n", K(total));
  2896. }
  2897. hugetlb_show_meminfo();
  2898. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  2899. show_swap_cache_info();
  2900. }
  2901. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  2902. {
  2903. zoneref->zone = zone;
  2904. zoneref->zone_idx = zone_idx(zone);
  2905. }
  2906. /*
  2907. * Builds allocation fallback zone lists.
  2908. *
  2909. * Add all populated zones of a node to the zonelist.
  2910. */
  2911. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  2912. int nr_zones)
  2913. {
  2914. struct zone *zone;
  2915. enum zone_type zone_type = MAX_NR_ZONES;
  2916. do {
  2917. zone_type--;
  2918. zone = pgdat->node_zones + zone_type;
  2919. if (populated_zone(zone)) {
  2920. zoneref_set_zone(zone,
  2921. &zonelist->_zonerefs[nr_zones++]);
  2922. check_highest_zone(zone_type);
  2923. }
  2924. } while (zone_type);
  2925. return nr_zones;
  2926. }
  2927. /*
  2928. * zonelist_order:
  2929. * 0 = automatic detection of better ordering.
  2930. * 1 = order by ([node] distance, -zonetype)
  2931. * 2 = order by (-zonetype, [node] distance)
  2932. *
  2933. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  2934. * the same zonelist. So only NUMA can configure this param.
  2935. */
  2936. #define ZONELIST_ORDER_DEFAULT 0
  2937. #define ZONELIST_ORDER_NODE 1
  2938. #define ZONELIST_ORDER_ZONE 2
  2939. /* zonelist order in the kernel.
  2940. * set_zonelist_order() will set this to NODE or ZONE.
  2941. */
  2942. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2943. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  2944. #ifdef CONFIG_NUMA
  2945. /* The value user specified ....changed by config */
  2946. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2947. /* string for sysctl */
  2948. #define NUMA_ZONELIST_ORDER_LEN 16
  2949. char numa_zonelist_order[16] = "default";
  2950. /*
  2951. * interface for configure zonelist ordering.
  2952. * command line option "numa_zonelist_order"
  2953. * = "[dD]efault - default, automatic configuration.
  2954. * = "[nN]ode - order by node locality, then by zone within node
  2955. * = "[zZ]one - order by zone, then by locality within zone
  2956. */
  2957. static int __parse_numa_zonelist_order(char *s)
  2958. {
  2959. if (*s == 'd' || *s == 'D') {
  2960. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  2961. } else if (*s == 'n' || *s == 'N') {
  2962. user_zonelist_order = ZONELIST_ORDER_NODE;
  2963. } else if (*s == 'z' || *s == 'Z') {
  2964. user_zonelist_order = ZONELIST_ORDER_ZONE;
  2965. } else {
  2966. printk(KERN_WARNING
  2967. "Ignoring invalid numa_zonelist_order value: "
  2968. "%s\n", s);
  2969. return -EINVAL;
  2970. }
  2971. return 0;
  2972. }
  2973. static __init int setup_numa_zonelist_order(char *s)
  2974. {
  2975. int ret;
  2976. if (!s)
  2977. return 0;
  2978. ret = __parse_numa_zonelist_order(s);
  2979. if (ret == 0)
  2980. strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
  2981. return ret;
  2982. }
  2983. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  2984. /*
  2985. * sysctl handler for numa_zonelist_order
  2986. */
  2987. int numa_zonelist_order_handler(struct ctl_table *table, int write,
  2988. void __user *buffer, size_t *length,
  2989. loff_t *ppos)
  2990. {
  2991. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  2992. int ret;
  2993. static DEFINE_MUTEX(zl_order_mutex);
  2994. mutex_lock(&zl_order_mutex);
  2995. if (write) {
  2996. if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
  2997. ret = -EINVAL;
  2998. goto out;
  2999. }
  3000. strcpy(saved_string, (char *)table->data);
  3001. }
  3002. ret = proc_dostring(table, write, buffer, length, ppos);
  3003. if (ret)
  3004. goto out;
  3005. if (write) {
  3006. int oldval = user_zonelist_order;
  3007. ret = __parse_numa_zonelist_order((char *)table->data);
  3008. if (ret) {
  3009. /*
  3010. * bogus value. restore saved string
  3011. */
  3012. strncpy((char *)table->data, saved_string,
  3013. NUMA_ZONELIST_ORDER_LEN);
  3014. user_zonelist_order = oldval;
  3015. } else if (oldval != user_zonelist_order) {
  3016. mutex_lock(&zonelists_mutex);
  3017. build_all_zonelists(NULL, NULL);
  3018. mutex_unlock(&zonelists_mutex);
  3019. }
  3020. }
  3021. out:
  3022. mutex_unlock(&zl_order_mutex);
  3023. return ret;
  3024. }
  3025. #define MAX_NODE_LOAD (nr_online_nodes)
  3026. static int node_load[MAX_NUMNODES];
  3027. /**
  3028. * find_next_best_node - find the next node that should appear in a given node's fallback list
  3029. * @node: node whose fallback list we're appending
  3030. * @used_node_mask: nodemask_t of already used nodes
  3031. *
  3032. * We use a number of factors to determine which is the next node that should
  3033. * appear on a given node's fallback list. The node should not have appeared
  3034. * already in @node's fallback list, and it should be the next closest node
  3035. * according to the distance array (which contains arbitrary distance values
  3036. * from each node to each node in the system), and should also prefer nodes
  3037. * with no CPUs, since presumably they'll have very little allocation pressure
  3038. * on them otherwise.
  3039. * It returns -1 if no node is found.
  3040. */
  3041. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  3042. {
  3043. int n, val;
  3044. int min_val = INT_MAX;
  3045. int best_node = NUMA_NO_NODE;
  3046. const struct cpumask *tmp = cpumask_of_node(0);
  3047. /* Use the local node if we haven't already */
  3048. if (!node_isset(node, *used_node_mask)) {
  3049. node_set(node, *used_node_mask);
  3050. return node;
  3051. }
  3052. for_each_node_state(n, N_MEMORY) {
  3053. /* Don't want a node to appear more than once */
  3054. if (node_isset(n, *used_node_mask))
  3055. continue;
  3056. /* Use the distance array to find the distance */
  3057. val = node_distance(node, n);
  3058. /* Penalize nodes under us ("prefer the next node") */
  3059. val += (n < node);
  3060. /* Give preference to headless and unused nodes */
  3061. tmp = cpumask_of_node(n);
  3062. if (!cpumask_empty(tmp))
  3063. val += PENALTY_FOR_NODE_WITH_CPUS;
  3064. /* Slight preference for less loaded node */
  3065. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  3066. val += node_load[n];
  3067. if (val < min_val) {
  3068. min_val = val;
  3069. best_node = n;
  3070. }
  3071. }
  3072. if (best_node >= 0)
  3073. node_set(best_node, *used_node_mask);
  3074. return best_node;
  3075. }
  3076. /*
  3077. * Build zonelists ordered by node and zones within node.
  3078. * This results in maximum locality--normal zone overflows into local
  3079. * DMA zone, if any--but risks exhausting DMA zone.
  3080. */
  3081. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  3082. {
  3083. int j;
  3084. struct zonelist *zonelist;
  3085. zonelist = &pgdat->node_zonelists[0];
  3086. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  3087. ;
  3088. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3089. zonelist->_zonerefs[j].zone = NULL;
  3090. zonelist->_zonerefs[j].zone_idx = 0;
  3091. }
  3092. /*
  3093. * Build gfp_thisnode zonelists
  3094. */
  3095. static void build_thisnode_zonelists(pg_data_t *pgdat)
  3096. {
  3097. int j;
  3098. struct zonelist *zonelist;
  3099. zonelist = &pgdat->node_zonelists[1];
  3100. j = build_zonelists_node(pgdat, zonelist, 0);
  3101. zonelist->_zonerefs[j].zone = NULL;
  3102. zonelist->_zonerefs[j].zone_idx = 0;
  3103. }
  3104. /*
  3105. * Build zonelists ordered by zone and nodes within zones.
  3106. * This results in conserving DMA zone[s] until all Normal memory is
  3107. * exhausted, but results in overflowing to remote node while memory
  3108. * may still exist in local DMA zone.
  3109. */
  3110. static int node_order[MAX_NUMNODES];
  3111. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  3112. {
  3113. int pos, j, node;
  3114. int zone_type; /* needs to be signed */
  3115. struct zone *z;
  3116. struct zonelist *zonelist;
  3117. zonelist = &pgdat->node_zonelists[0];
  3118. pos = 0;
  3119. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  3120. for (j = 0; j < nr_nodes; j++) {
  3121. node = node_order[j];
  3122. z = &NODE_DATA(node)->node_zones[zone_type];
  3123. if (populated_zone(z)) {
  3124. zoneref_set_zone(z,
  3125. &zonelist->_zonerefs[pos++]);
  3126. check_highest_zone(zone_type);
  3127. }
  3128. }
  3129. }
  3130. zonelist->_zonerefs[pos].zone = NULL;
  3131. zonelist->_zonerefs[pos].zone_idx = 0;
  3132. }
  3133. #if defined(CONFIG_64BIT)
  3134. /*
  3135. * Devices that require DMA32/DMA are relatively rare and do not justify a
  3136. * penalty to every machine in case the specialised case applies. Default
  3137. * to Node-ordering on 64-bit NUMA machines
  3138. */
  3139. static int default_zonelist_order(void)
  3140. {
  3141. return ZONELIST_ORDER_NODE;
  3142. }
  3143. #else
  3144. /*
  3145. * On 32-bit, the Normal zone needs to be preserved for allocations accessible
  3146. * by the kernel. If processes running on node 0 deplete the low memory zone
  3147. * then reclaim will occur more frequency increasing stalls and potentially
  3148. * be easier to OOM if a large percentage of the zone is under writeback or
  3149. * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
  3150. * Hence, default to zone ordering on 32-bit.
  3151. */
  3152. static int default_zonelist_order(void)
  3153. {
  3154. return ZONELIST_ORDER_ZONE;
  3155. }
  3156. #endif /* CONFIG_64BIT */
  3157. static void set_zonelist_order(void)
  3158. {
  3159. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  3160. current_zonelist_order = default_zonelist_order();
  3161. else
  3162. current_zonelist_order = user_zonelist_order;
  3163. }
  3164. static void build_zonelists(pg_data_t *pgdat)
  3165. {
  3166. int j, node, load;
  3167. enum zone_type i;
  3168. nodemask_t used_mask;
  3169. int local_node, prev_node;
  3170. struct zonelist *zonelist;
  3171. int order = current_zonelist_order;
  3172. /* initialize zonelists */
  3173. for (i = 0; i < MAX_ZONELISTS; i++) {
  3174. zonelist = pgdat->node_zonelists + i;
  3175. zonelist->_zonerefs[0].zone = NULL;
  3176. zonelist->_zonerefs[0].zone_idx = 0;
  3177. }
  3178. /* NUMA-aware ordering of nodes */
  3179. local_node = pgdat->node_id;
  3180. load = nr_online_nodes;
  3181. prev_node = local_node;
  3182. nodes_clear(used_mask);
  3183. memset(node_order, 0, sizeof(node_order));
  3184. j = 0;
  3185. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  3186. /*
  3187. * We don't want to pressure a particular node.
  3188. * So adding penalty to the first node in same
  3189. * distance group to make it round-robin.
  3190. */
  3191. if (node_distance(local_node, node) !=
  3192. node_distance(local_node, prev_node))
  3193. node_load[node] = load;
  3194. prev_node = node;
  3195. load--;
  3196. if (order == ZONELIST_ORDER_NODE)
  3197. build_zonelists_in_node_order(pgdat, node);
  3198. else
  3199. node_order[j++] = node; /* remember order */
  3200. }
  3201. if (order == ZONELIST_ORDER_ZONE) {
  3202. /* calculate node order -- i.e., DMA last! */
  3203. build_zonelists_in_zone_order(pgdat, j);
  3204. }
  3205. build_thisnode_zonelists(pgdat);
  3206. }
  3207. /* Construct the zonelist performance cache - see further mmzone.h */
  3208. static void build_zonelist_cache(pg_data_t *pgdat)
  3209. {
  3210. struct zonelist *zonelist;
  3211. struct zonelist_cache *zlc;
  3212. struct zoneref *z;
  3213. zonelist = &pgdat->node_zonelists[0];
  3214. zonelist->zlcache_ptr = zlc = &zonelist->zlcache;
  3215. bitmap_zero(zlc->fullzones, MAX_ZONES_PER_ZONELIST);
  3216. for (z = zonelist->_zonerefs; z->zone; z++)
  3217. zlc->z_to_n[z - zonelist->_zonerefs] = zonelist_node_idx(z);
  3218. }
  3219. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3220. /*
  3221. * Return node id of node used for "local" allocations.
  3222. * I.e., first node id of first zone in arg node's generic zonelist.
  3223. * Used for initializing percpu 'numa_mem', which is used primarily
  3224. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  3225. */
  3226. int local_memory_node(int node)
  3227. {
  3228. struct zone *zone;
  3229. (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  3230. gfp_zone(GFP_KERNEL),
  3231. NULL,
  3232. &zone);
  3233. return zone->node;
  3234. }
  3235. #endif
  3236. #else /* CONFIG_NUMA */
  3237. static void set_zonelist_order(void)
  3238. {
  3239. current_zonelist_order = ZONELIST_ORDER_ZONE;
  3240. }
  3241. static void build_zonelists(pg_data_t *pgdat)
  3242. {
  3243. int node, local_node;
  3244. enum zone_type j;
  3245. struct zonelist *zonelist;
  3246. local_node = pgdat->node_id;
  3247. zonelist = &pgdat->node_zonelists[0];
  3248. j = build_zonelists_node(pgdat, zonelist, 0);
  3249. /*
  3250. * Now we build the zonelist so that it contains the zones
  3251. * of all the other nodes.
  3252. * We don't want to pressure a particular node, so when
  3253. * building the zones for node N, we make sure that the
  3254. * zones coming right after the local ones are those from
  3255. * node N+1 (modulo N)
  3256. */
  3257. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  3258. if (!node_online(node))
  3259. continue;
  3260. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3261. }
  3262. for (node = 0; node < local_node; node++) {
  3263. if (!node_online(node))
  3264. continue;
  3265. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3266. }
  3267. zonelist->_zonerefs[j].zone = NULL;
  3268. zonelist->_zonerefs[j].zone_idx = 0;
  3269. }
  3270. /* non-NUMA variant of zonelist performance cache - just NULL zlcache_ptr */
  3271. static void build_zonelist_cache(pg_data_t *pgdat)
  3272. {
  3273. pgdat->node_zonelists[0].zlcache_ptr = NULL;
  3274. }
  3275. #endif /* CONFIG_NUMA */
  3276. /*
  3277. * Boot pageset table. One per cpu which is going to be used for all
  3278. * zones and all nodes. The parameters will be set in such a way
  3279. * that an item put on a list will immediately be handed over to
  3280. * the buddy list. This is safe since pageset manipulation is done
  3281. * with interrupts disabled.
  3282. *
  3283. * The boot_pagesets must be kept even after bootup is complete for
  3284. * unused processors and/or zones. They do play a role for bootstrapping
  3285. * hotplugged processors.
  3286. *
  3287. * zoneinfo_show() and maybe other functions do
  3288. * not check if the processor is online before following the pageset pointer.
  3289. * Other parts of the kernel may not check if the zone is available.
  3290. */
  3291. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  3292. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  3293. static void setup_zone_pageset(struct zone *zone);
  3294. /*
  3295. * Global mutex to protect against size modification of zonelists
  3296. * as well as to serialize pageset setup for the new populated zone.
  3297. */
  3298. DEFINE_MUTEX(zonelists_mutex);
  3299. /* return values int ....just for stop_machine() */
  3300. static int __build_all_zonelists(void *data)
  3301. {
  3302. int nid;
  3303. int cpu;
  3304. pg_data_t *self = data;
  3305. #ifdef CONFIG_NUMA
  3306. memset(node_load, 0, sizeof(node_load));
  3307. #endif
  3308. if (self && !node_online(self->node_id)) {
  3309. build_zonelists(self);
  3310. build_zonelist_cache(self);
  3311. }
  3312. for_each_online_node(nid) {
  3313. pg_data_t *pgdat = NODE_DATA(nid);
  3314. build_zonelists(pgdat);
  3315. build_zonelist_cache(pgdat);
  3316. }
  3317. /*
  3318. * Initialize the boot_pagesets that are going to be used
  3319. * for bootstrapping processors. The real pagesets for
  3320. * each zone will be allocated later when the per cpu
  3321. * allocator is available.
  3322. *
  3323. * boot_pagesets are used also for bootstrapping offline
  3324. * cpus if the system is already booted because the pagesets
  3325. * are needed to initialize allocators on a specific cpu too.
  3326. * F.e. the percpu allocator needs the page allocator which
  3327. * needs the percpu allocator in order to allocate its pagesets
  3328. * (a chicken-egg dilemma).
  3329. */
  3330. for_each_possible_cpu(cpu) {
  3331. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  3332. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3333. /*
  3334. * We now know the "local memory node" for each node--
  3335. * i.e., the node of the first zone in the generic zonelist.
  3336. * Set up numa_mem percpu variable for on-line cpus. During
  3337. * boot, only the boot cpu should be on-line; we'll init the
  3338. * secondary cpus' numa_mem as they come on-line. During
  3339. * node/memory hotplug, we'll fixup all on-line cpus.
  3340. */
  3341. if (cpu_online(cpu))
  3342. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  3343. #endif
  3344. }
  3345. return 0;
  3346. }
  3347. /*
  3348. * Called with zonelists_mutex held always
  3349. * unless system_state == SYSTEM_BOOTING.
  3350. */
  3351. void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
  3352. {
  3353. set_zonelist_order();
  3354. if (system_state == SYSTEM_BOOTING) {
  3355. __build_all_zonelists(NULL);
  3356. mminit_verify_zonelist();
  3357. cpuset_init_current_mems_allowed();
  3358. } else {
  3359. #ifdef CONFIG_MEMORY_HOTPLUG
  3360. if (zone)
  3361. setup_zone_pageset(zone);
  3362. #endif
  3363. /* we have to stop all cpus to guarantee there is no user
  3364. of zonelist */
  3365. stop_machine(__build_all_zonelists, pgdat, NULL);
  3366. /* cpuset refresh routine should be here */
  3367. }
  3368. vm_total_pages = nr_free_pagecache_pages();
  3369. /*
  3370. * Disable grouping by mobility if the number of pages in the
  3371. * system is too low to allow the mechanism to work. It would be
  3372. * more accurate, but expensive to check per-zone. This check is
  3373. * made on memory-hotadd so a system can start with mobility
  3374. * disabled and enable it later
  3375. */
  3376. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  3377. page_group_by_mobility_disabled = 1;
  3378. else
  3379. page_group_by_mobility_disabled = 0;
  3380. printk("Built %i zonelists in %s order, mobility grouping %s. "
  3381. "Total pages: %ld\n",
  3382. nr_online_nodes,
  3383. zonelist_order_name[current_zonelist_order],
  3384. page_group_by_mobility_disabled ? "off" : "on",
  3385. vm_total_pages);
  3386. #ifdef CONFIG_NUMA
  3387. printk("Policy zone: %s\n", zone_names[policy_zone]);
  3388. #endif
  3389. }
  3390. /*
  3391. * Helper functions to size the waitqueue hash table.
  3392. * Essentially these want to choose hash table sizes sufficiently
  3393. * large so that collisions trying to wait on pages are rare.
  3394. * But in fact, the number of active page waitqueues on typical
  3395. * systems is ridiculously low, less than 200. So this is even
  3396. * conservative, even though it seems large.
  3397. *
  3398. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  3399. * waitqueues, i.e. the size of the waitq table given the number of pages.
  3400. */
  3401. #define PAGES_PER_WAITQUEUE 256
  3402. #ifndef CONFIG_MEMORY_HOTPLUG
  3403. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  3404. {
  3405. unsigned long size = 1;
  3406. pages /= PAGES_PER_WAITQUEUE;
  3407. while (size < pages)
  3408. size <<= 1;
  3409. /*
  3410. * Once we have dozens or even hundreds of threads sleeping
  3411. * on IO we've got bigger problems than wait queue collision.
  3412. * Limit the size of the wait table to a reasonable size.
  3413. */
  3414. size = min(size, 4096UL);
  3415. return max(size, 4UL);
  3416. }
  3417. #else
  3418. /*
  3419. * A zone's size might be changed by hot-add, so it is not possible to determine
  3420. * a suitable size for its wait_table. So we use the maximum size now.
  3421. *
  3422. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  3423. *
  3424. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  3425. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  3426. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  3427. *
  3428. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  3429. * or more by the traditional way. (See above). It equals:
  3430. *
  3431. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  3432. * ia64(16K page size) : = ( 8G + 4M)byte.
  3433. * powerpc (64K page size) : = (32G +16M)byte.
  3434. */
  3435. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  3436. {
  3437. return 4096UL;
  3438. }
  3439. #endif
  3440. /*
  3441. * This is an integer logarithm so that shifts can be used later
  3442. * to extract the more random high bits from the multiplicative
  3443. * hash function before the remainder is taken.
  3444. */
  3445. static inline unsigned long wait_table_bits(unsigned long size)
  3446. {
  3447. return ffz(~size);
  3448. }
  3449. /*
  3450. * Check if a pageblock contains reserved pages
  3451. */
  3452. static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
  3453. {
  3454. unsigned long pfn;
  3455. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  3456. if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
  3457. return 1;
  3458. }
  3459. return 0;
  3460. }
  3461. /*
  3462. * Mark a number of pageblocks as MIGRATE_RESERVE. The number
  3463. * of blocks reserved is based on min_wmark_pages(zone). The memory within
  3464. * the reserve will tend to store contiguous free pages. Setting min_free_kbytes
  3465. * higher will lead to a bigger reserve which will get freed as contiguous
  3466. * blocks as reclaim kicks in
  3467. */
  3468. static void setup_zone_migrate_reserve(struct zone *zone)
  3469. {
  3470. unsigned long start_pfn, pfn, end_pfn, block_end_pfn;
  3471. struct page *page;
  3472. unsigned long block_migratetype;
  3473. int reserve;
  3474. int old_reserve;
  3475. /*
  3476. * Get the start pfn, end pfn and the number of blocks to reserve
  3477. * We have to be careful to be aligned to pageblock_nr_pages to
  3478. * make sure that we always check pfn_valid for the first page in
  3479. * the block.
  3480. */
  3481. start_pfn = zone->zone_start_pfn;
  3482. end_pfn = zone_end_pfn(zone);
  3483. start_pfn = roundup(start_pfn, pageblock_nr_pages);
  3484. reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
  3485. pageblock_order;
  3486. /*
  3487. * Reserve blocks are generally in place to help high-order atomic
  3488. * allocations that are short-lived. A min_free_kbytes value that
  3489. * would result in more than 2 reserve blocks for atomic allocations
  3490. * is assumed to be in place to help anti-fragmentation for the
  3491. * future allocation of hugepages at runtime.
  3492. */
  3493. reserve = min(2, reserve);
  3494. old_reserve = zone->nr_migrate_reserve_block;
  3495. /* When memory hot-add, we almost always need to do nothing */
  3496. if (reserve == old_reserve)
  3497. return;
  3498. zone->nr_migrate_reserve_block = reserve;
  3499. for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
  3500. if (!pfn_valid(pfn))
  3501. continue;
  3502. page = pfn_to_page(pfn);
  3503. /* Watch out for overlapping nodes */
  3504. if (page_to_nid(page) != zone_to_nid(zone))
  3505. continue;
  3506. block_migratetype = get_pageblock_migratetype(page);
  3507. /* Only test what is necessary when the reserves are not met */
  3508. if (reserve > 0) {
  3509. /*
  3510. * Blocks with reserved pages will never free, skip
  3511. * them.
  3512. */
  3513. block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
  3514. if (pageblock_is_reserved(pfn, block_end_pfn))
  3515. continue;
  3516. /* If this block is reserved, account for it */
  3517. if (block_migratetype == MIGRATE_RESERVE) {
  3518. reserve--;
  3519. continue;
  3520. }
  3521. /* Suitable for reserving if this block is movable */
  3522. if (block_migratetype == MIGRATE_MOVABLE) {
  3523. set_pageblock_migratetype(page,
  3524. MIGRATE_RESERVE);
  3525. move_freepages_block(zone, page,
  3526. MIGRATE_RESERVE);
  3527. reserve--;
  3528. continue;
  3529. }
  3530. } else if (!old_reserve) {
  3531. /*
  3532. * At boot time we don't need to scan the whole zone
  3533. * for turning off MIGRATE_RESERVE.
  3534. */
  3535. break;
  3536. }
  3537. /*
  3538. * If the reserve is met and this is a previous reserved block,
  3539. * take it back
  3540. */
  3541. if (block_migratetype == MIGRATE_RESERVE) {
  3542. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  3543. move_freepages_block(zone, page, MIGRATE_MOVABLE);
  3544. }
  3545. }
  3546. }
  3547. /*
  3548. * Initially all pages are reserved - free ones are freed
  3549. * up by free_all_bootmem() once the early boot process is
  3550. * done. Non-atomic initialization, single-pass.
  3551. */
  3552. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  3553. unsigned long start_pfn, enum memmap_context context)
  3554. {
  3555. struct page *page;
  3556. unsigned long end_pfn = start_pfn + size;
  3557. unsigned long pfn;
  3558. struct zone *z;
  3559. if (highest_memmap_pfn < end_pfn - 1)
  3560. highest_memmap_pfn = end_pfn - 1;
  3561. z = &NODE_DATA(nid)->node_zones[zone];
  3562. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  3563. /*
  3564. * There can be holes in boot-time mem_map[]s
  3565. * handed to this function. They do not
  3566. * exist on hotplugged memory.
  3567. */
  3568. if (context == MEMMAP_EARLY) {
  3569. if (!early_pfn_valid(pfn))
  3570. continue;
  3571. if (!early_pfn_in_nid(pfn, nid))
  3572. continue;
  3573. }
  3574. page = pfn_to_page(pfn);
  3575. set_page_links(page, zone, nid, pfn);
  3576. mminit_verify_page_links(page, zone, nid, pfn);
  3577. init_page_count(page);
  3578. page_mapcount_reset(page);
  3579. page_cpupid_reset_last(page);
  3580. SetPageReserved(page);
  3581. /*
  3582. * Mark the block movable so that blocks are reserved for
  3583. * movable at startup. This will force kernel allocations
  3584. * to reserve their blocks rather than leaking throughout
  3585. * the address space during boot when many long-lived
  3586. * kernel allocations are made. Later some blocks near
  3587. * the start are marked MIGRATE_RESERVE by
  3588. * setup_zone_migrate_reserve()
  3589. *
  3590. * bitmap is created for zone's valid pfn range. but memmap
  3591. * can be created for invalid pages (for alignment)
  3592. * check here not to call set_pageblock_migratetype() against
  3593. * pfn out of zone.
  3594. */
  3595. if ((z->zone_start_pfn <= pfn)
  3596. && (pfn < zone_end_pfn(z))
  3597. && !(pfn & (pageblock_nr_pages - 1)))
  3598. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  3599. INIT_LIST_HEAD(&page->lru);
  3600. #ifdef WANT_PAGE_VIRTUAL
  3601. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  3602. if (!is_highmem_idx(zone))
  3603. set_page_address(page, __va(pfn << PAGE_SHIFT));
  3604. #endif
  3605. }
  3606. }
  3607. static void __meminit zone_init_free_lists(struct zone *zone)
  3608. {
  3609. unsigned int order, t;
  3610. for_each_migratetype_order(order, t) {
  3611. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  3612. zone->free_area[order].nr_free = 0;
  3613. }
  3614. }
  3615. #ifndef __HAVE_ARCH_MEMMAP_INIT
  3616. #define memmap_init(size, nid, zone, start_pfn) \
  3617. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  3618. #endif
  3619. static int zone_batchsize(struct zone *zone)
  3620. {
  3621. #ifdef CONFIG_MMU
  3622. int batch;
  3623. /*
  3624. * The per-cpu-pages pools are set to around 1000th of the
  3625. * size of the zone. But no more than 1/2 of a meg.
  3626. *
  3627. * OK, so we don't know how big the cache is. So guess.
  3628. */
  3629. batch = zone->managed_pages / 1024;
  3630. if (batch * PAGE_SIZE > 512 * 1024)
  3631. batch = (512 * 1024) / PAGE_SIZE;
  3632. batch /= 4; /* We effectively *= 4 below */
  3633. if (batch < 1)
  3634. batch = 1;
  3635. /*
  3636. * Clamp the batch to a 2^n - 1 value. Having a power
  3637. * of 2 value was found to be more likely to have
  3638. * suboptimal cache aliasing properties in some cases.
  3639. *
  3640. * For example if 2 tasks are alternately allocating
  3641. * batches of pages, one task can end up with a lot
  3642. * of pages of one half of the possible page colors
  3643. * and the other with pages of the other colors.
  3644. */
  3645. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  3646. return batch;
  3647. #else
  3648. /* The deferral and batching of frees should be suppressed under NOMMU
  3649. * conditions.
  3650. *
  3651. * The problem is that NOMMU needs to be able to allocate large chunks
  3652. * of contiguous memory as there's no hardware page translation to
  3653. * assemble apparent contiguous memory from discontiguous pages.
  3654. *
  3655. * Queueing large contiguous runs of pages for batching, however,
  3656. * causes the pages to actually be freed in smaller chunks. As there
  3657. * can be a significant delay between the individual batches being
  3658. * recycled, this leads to the once large chunks of space being
  3659. * fragmented and becoming unavailable for high-order allocations.
  3660. */
  3661. return 0;
  3662. #endif
  3663. }
  3664. /*
  3665. * pcp->high and pcp->batch values are related and dependent on one another:
  3666. * ->batch must never be higher then ->high.
  3667. * The following function updates them in a safe manner without read side
  3668. * locking.
  3669. *
  3670. * Any new users of pcp->batch and pcp->high should ensure they can cope with
  3671. * those fields changing asynchronously (acording the the above rule).
  3672. *
  3673. * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  3674. * outside of boot time (or some other assurance that no concurrent updaters
  3675. * exist).
  3676. */
  3677. static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
  3678. unsigned long batch)
  3679. {
  3680. /* start with a fail safe value for batch */
  3681. pcp->batch = 1;
  3682. smp_wmb();
  3683. /* Update high, then batch, in order */
  3684. pcp->high = high;
  3685. smp_wmb();
  3686. pcp->batch = batch;
  3687. }
  3688. /* a companion to pageset_set_high() */
  3689. static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
  3690. {
  3691. pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
  3692. }
  3693. static void pageset_init(struct per_cpu_pageset *p)
  3694. {
  3695. struct per_cpu_pages *pcp;
  3696. int migratetype;
  3697. memset(p, 0, sizeof(*p));
  3698. pcp = &p->pcp;
  3699. pcp->count = 0;
  3700. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  3701. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  3702. }
  3703. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  3704. {
  3705. pageset_init(p);
  3706. pageset_set_batch(p, batch);
  3707. }
  3708. /*
  3709. * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
  3710. * to the value high for the pageset p.
  3711. */
  3712. static void pageset_set_high(struct per_cpu_pageset *p,
  3713. unsigned long high)
  3714. {
  3715. unsigned long batch = max(1UL, high / 4);
  3716. if ((high / 4) > (PAGE_SHIFT * 8))
  3717. batch = PAGE_SHIFT * 8;
  3718. pageset_update(&p->pcp, high, batch);
  3719. }
  3720. static void pageset_set_high_and_batch(struct zone *zone,
  3721. struct per_cpu_pageset *pcp)
  3722. {
  3723. if (percpu_pagelist_fraction)
  3724. pageset_set_high(pcp,
  3725. (zone->managed_pages /
  3726. percpu_pagelist_fraction));
  3727. else
  3728. pageset_set_batch(pcp, zone_batchsize(zone));
  3729. }
  3730. static void __meminit zone_pageset_init(struct zone *zone, int cpu)
  3731. {
  3732. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  3733. pageset_init(pcp);
  3734. pageset_set_high_and_batch(zone, pcp);
  3735. }
  3736. static void __meminit setup_zone_pageset(struct zone *zone)
  3737. {
  3738. int cpu;
  3739. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  3740. for_each_possible_cpu(cpu)
  3741. zone_pageset_init(zone, cpu);
  3742. }
  3743. /*
  3744. * Allocate per cpu pagesets and initialize them.
  3745. * Before this call only boot pagesets were available.
  3746. */
  3747. void __init setup_per_cpu_pageset(void)
  3748. {
  3749. struct zone *zone;
  3750. for_each_populated_zone(zone)
  3751. setup_zone_pageset(zone);
  3752. }
  3753. static noinline __init_refok
  3754. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  3755. {
  3756. int i;
  3757. size_t alloc_size;
  3758. /*
  3759. * The per-page waitqueue mechanism uses hashed waitqueues
  3760. * per zone.
  3761. */
  3762. zone->wait_table_hash_nr_entries =
  3763. wait_table_hash_nr_entries(zone_size_pages);
  3764. zone->wait_table_bits =
  3765. wait_table_bits(zone->wait_table_hash_nr_entries);
  3766. alloc_size = zone->wait_table_hash_nr_entries
  3767. * sizeof(wait_queue_head_t);
  3768. if (!slab_is_available()) {
  3769. zone->wait_table = (wait_queue_head_t *)
  3770. memblock_virt_alloc_node_nopanic(
  3771. alloc_size, zone->zone_pgdat->node_id);
  3772. } else {
  3773. /*
  3774. * This case means that a zone whose size was 0 gets new memory
  3775. * via memory hot-add.
  3776. * But it may be the case that a new node was hot-added. In
  3777. * this case vmalloc() will not be able to use this new node's
  3778. * memory - this wait_table must be initialized to use this new
  3779. * node itself as well.
  3780. * To use this new node's memory, further consideration will be
  3781. * necessary.
  3782. */
  3783. zone->wait_table = vmalloc(alloc_size);
  3784. }
  3785. if (!zone->wait_table)
  3786. return -ENOMEM;
  3787. for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  3788. init_waitqueue_head(zone->wait_table + i);
  3789. return 0;
  3790. }
  3791. static __meminit void zone_pcp_init(struct zone *zone)
  3792. {
  3793. /*
  3794. * per cpu subsystem is not up at this point. The following code
  3795. * relies on the ability of the linker to provide the
  3796. * offset of a (static) per cpu variable into the per cpu area.
  3797. */
  3798. zone->pageset = &boot_pageset;
  3799. if (populated_zone(zone))
  3800. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  3801. zone->name, zone->present_pages,
  3802. zone_batchsize(zone));
  3803. }
  3804. int __meminit init_currently_empty_zone(struct zone *zone,
  3805. unsigned long zone_start_pfn,
  3806. unsigned long size,
  3807. enum memmap_context context)
  3808. {
  3809. struct pglist_data *pgdat = zone->zone_pgdat;
  3810. int ret;
  3811. ret = zone_wait_table_init(zone, size);
  3812. if (ret)
  3813. return ret;
  3814. pgdat->nr_zones = zone_idx(zone) + 1;
  3815. zone->zone_start_pfn = zone_start_pfn;
  3816. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  3817. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  3818. pgdat->node_id,
  3819. (unsigned long)zone_idx(zone),
  3820. zone_start_pfn, (zone_start_pfn + size));
  3821. zone_init_free_lists(zone);
  3822. return 0;
  3823. }
  3824. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  3825. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  3826. /*
  3827. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  3828. */
  3829. int __meminit __early_pfn_to_nid(unsigned long pfn)
  3830. {
  3831. unsigned long start_pfn, end_pfn;
  3832. int nid;
  3833. /*
  3834. * NOTE: The following SMP-unsafe globals are only used early in boot
  3835. * when the kernel is running single-threaded.
  3836. */
  3837. static unsigned long __meminitdata last_start_pfn, last_end_pfn;
  3838. static int __meminitdata last_nid;
  3839. if (last_start_pfn <= pfn && pfn < last_end_pfn)
  3840. return last_nid;
  3841. nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
  3842. if (nid != -1) {
  3843. last_start_pfn = start_pfn;
  3844. last_end_pfn = end_pfn;
  3845. last_nid = nid;
  3846. }
  3847. return nid;
  3848. }
  3849. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  3850. int __meminit early_pfn_to_nid(unsigned long pfn)
  3851. {
  3852. int nid;
  3853. nid = __early_pfn_to_nid(pfn);
  3854. if (nid >= 0)
  3855. return nid;
  3856. /* just returns 0 */
  3857. return 0;
  3858. }
  3859. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  3860. bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  3861. {
  3862. int nid;
  3863. nid = __early_pfn_to_nid(pfn);
  3864. if (nid >= 0 && nid != node)
  3865. return false;
  3866. return true;
  3867. }
  3868. #endif
  3869. /**
  3870. * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  3871. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  3872. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
  3873. *
  3874. * If an architecture guarantees that all ranges registered contain no holes
  3875. * and may be freed, this this function may be used instead of calling
  3876. * memblock_free_early_nid() manually.
  3877. */
  3878. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  3879. {
  3880. unsigned long start_pfn, end_pfn;
  3881. int i, this_nid;
  3882. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  3883. start_pfn = min(start_pfn, max_low_pfn);
  3884. end_pfn = min(end_pfn, max_low_pfn);
  3885. if (start_pfn < end_pfn)
  3886. memblock_free_early_nid(PFN_PHYS(start_pfn),
  3887. (end_pfn - start_pfn) << PAGE_SHIFT,
  3888. this_nid);
  3889. }
  3890. }
  3891. /**
  3892. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  3893. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  3894. *
  3895. * If an architecture guarantees that all ranges registered contain no holes and may
  3896. * be freed, this function may be used instead of calling memory_present() manually.
  3897. */
  3898. void __init sparse_memory_present_with_active_regions(int nid)
  3899. {
  3900. unsigned long start_pfn, end_pfn;
  3901. int i, this_nid;
  3902. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  3903. memory_present(this_nid, start_pfn, end_pfn);
  3904. }
  3905. /**
  3906. * get_pfn_range_for_nid - Return the start and end page frames for a node
  3907. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  3908. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  3909. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  3910. *
  3911. * It returns the start and end page frame of a node based on information
  3912. * provided by memblock_set_node(). If called for a node
  3913. * with no available memory, a warning is printed and the start and end
  3914. * PFNs will be 0.
  3915. */
  3916. void __meminit get_pfn_range_for_nid(unsigned int nid,
  3917. unsigned long *start_pfn, unsigned long *end_pfn)
  3918. {
  3919. unsigned long this_start_pfn, this_end_pfn;
  3920. int i;
  3921. *start_pfn = -1UL;
  3922. *end_pfn = 0;
  3923. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  3924. *start_pfn = min(*start_pfn, this_start_pfn);
  3925. *end_pfn = max(*end_pfn, this_end_pfn);
  3926. }
  3927. if (*start_pfn == -1UL)
  3928. *start_pfn = 0;
  3929. }
  3930. /*
  3931. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  3932. * assumption is made that zones within a node are ordered in monotonic
  3933. * increasing memory addresses so that the "highest" populated zone is used
  3934. */
  3935. static void __init find_usable_zone_for_movable(void)
  3936. {
  3937. int zone_index;
  3938. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  3939. if (zone_index == ZONE_MOVABLE)
  3940. continue;
  3941. if (arch_zone_highest_possible_pfn[zone_index] >
  3942. arch_zone_lowest_possible_pfn[zone_index])
  3943. break;
  3944. }
  3945. VM_BUG_ON(zone_index == -1);
  3946. movable_zone = zone_index;
  3947. }
  3948. /*
  3949. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  3950. * because it is sized independent of architecture. Unlike the other zones,
  3951. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  3952. * in each node depending on the size of each node and how evenly kernelcore
  3953. * is distributed. This helper function adjusts the zone ranges
  3954. * provided by the architecture for a given node by using the end of the
  3955. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  3956. * zones within a node are in order of monotonic increases memory addresses
  3957. */
  3958. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  3959. unsigned long zone_type,
  3960. unsigned long node_start_pfn,
  3961. unsigned long node_end_pfn,
  3962. unsigned long *zone_start_pfn,
  3963. unsigned long *zone_end_pfn)
  3964. {
  3965. /* Only adjust if ZONE_MOVABLE is on this node */
  3966. if (zone_movable_pfn[nid]) {
  3967. /* Size ZONE_MOVABLE */
  3968. if (zone_type == ZONE_MOVABLE) {
  3969. *zone_start_pfn = zone_movable_pfn[nid];
  3970. *zone_end_pfn = min(node_end_pfn,
  3971. arch_zone_highest_possible_pfn[movable_zone]);
  3972. /* Adjust for ZONE_MOVABLE starting within this range */
  3973. } else if (*zone_start_pfn < zone_movable_pfn[nid] &&
  3974. *zone_end_pfn > zone_movable_pfn[nid]) {
  3975. *zone_end_pfn = zone_movable_pfn[nid];
  3976. /* Check if this whole range is within ZONE_MOVABLE */
  3977. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  3978. *zone_start_pfn = *zone_end_pfn;
  3979. }
  3980. }
  3981. /*
  3982. * Return the number of pages a zone spans in a node, including holes
  3983. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  3984. */
  3985. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  3986. unsigned long zone_type,
  3987. unsigned long node_start_pfn,
  3988. unsigned long node_end_pfn,
  3989. unsigned long *ignored)
  3990. {
  3991. unsigned long zone_start_pfn, zone_end_pfn;
  3992. /* Get the start and end of the zone */
  3993. zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  3994. zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  3995. adjust_zone_range_for_zone_movable(nid, zone_type,
  3996. node_start_pfn, node_end_pfn,
  3997. &zone_start_pfn, &zone_end_pfn);
  3998. /* Check that this node has pages within the zone's required range */
  3999. if (zone_end_pfn < node_start_pfn || zone_start_pfn > node_end_pfn)
  4000. return 0;
  4001. /* Move the zone boundaries inside the node if necessary */
  4002. zone_end_pfn = min(zone_end_pfn, node_end_pfn);
  4003. zone_start_pfn = max(zone_start_pfn, node_start_pfn);
  4004. /* Return the spanned pages */
  4005. return zone_end_pfn - zone_start_pfn;
  4006. }
  4007. /*
  4008. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  4009. * then all holes in the requested range will be accounted for.
  4010. */
  4011. unsigned long __meminit __absent_pages_in_range(int nid,
  4012. unsigned long range_start_pfn,
  4013. unsigned long range_end_pfn)
  4014. {
  4015. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  4016. unsigned long start_pfn, end_pfn;
  4017. int i;
  4018. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  4019. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  4020. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  4021. nr_absent -= end_pfn - start_pfn;
  4022. }
  4023. return nr_absent;
  4024. }
  4025. /**
  4026. * absent_pages_in_range - Return number of page frames in holes within a range
  4027. * @start_pfn: The start PFN to start searching for holes
  4028. * @end_pfn: The end PFN to stop searching for holes
  4029. *
  4030. * It returns the number of pages frames in memory holes within a range.
  4031. */
  4032. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  4033. unsigned long end_pfn)
  4034. {
  4035. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  4036. }
  4037. /* Return the number of page frames in holes in a zone on a node */
  4038. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  4039. unsigned long zone_type,
  4040. unsigned long node_start_pfn,
  4041. unsigned long node_end_pfn,
  4042. unsigned long *ignored)
  4043. {
  4044. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  4045. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  4046. unsigned long zone_start_pfn, zone_end_pfn;
  4047. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  4048. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  4049. adjust_zone_range_for_zone_movable(nid, zone_type,
  4050. node_start_pfn, node_end_pfn,
  4051. &zone_start_pfn, &zone_end_pfn);
  4052. return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  4053. }
  4054. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4055. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4056. unsigned long zone_type,
  4057. unsigned long node_start_pfn,
  4058. unsigned long node_end_pfn,
  4059. unsigned long *zones_size)
  4060. {
  4061. return zones_size[zone_type];
  4062. }
  4063. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  4064. unsigned long zone_type,
  4065. unsigned long node_start_pfn,
  4066. unsigned long node_end_pfn,
  4067. unsigned long *zholes_size)
  4068. {
  4069. if (!zholes_size)
  4070. return 0;
  4071. return zholes_size[zone_type];
  4072. }
  4073. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4074. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  4075. unsigned long node_start_pfn,
  4076. unsigned long node_end_pfn,
  4077. unsigned long *zones_size,
  4078. unsigned long *zholes_size)
  4079. {
  4080. unsigned long realtotalpages, totalpages = 0;
  4081. enum zone_type i;
  4082. for (i = 0; i < MAX_NR_ZONES; i++)
  4083. totalpages += zone_spanned_pages_in_node(pgdat->node_id, i,
  4084. node_start_pfn,
  4085. node_end_pfn,
  4086. zones_size);
  4087. pgdat->node_spanned_pages = totalpages;
  4088. realtotalpages = totalpages;
  4089. for (i = 0; i < MAX_NR_ZONES; i++)
  4090. realtotalpages -=
  4091. zone_absent_pages_in_node(pgdat->node_id, i,
  4092. node_start_pfn, node_end_pfn,
  4093. zholes_size);
  4094. pgdat->node_present_pages = realtotalpages;
  4095. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  4096. realtotalpages);
  4097. }
  4098. #ifndef CONFIG_SPARSEMEM
  4099. /*
  4100. * Calculate the size of the zone->blockflags rounded to an unsigned long
  4101. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  4102. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  4103. * round what is now in bits to nearest long in bits, then return it in
  4104. * bytes.
  4105. */
  4106. static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
  4107. {
  4108. unsigned long usemapsize;
  4109. zonesize += zone_start_pfn & (pageblock_nr_pages-1);
  4110. usemapsize = roundup(zonesize, pageblock_nr_pages);
  4111. usemapsize = usemapsize >> pageblock_order;
  4112. usemapsize *= NR_PAGEBLOCK_BITS;
  4113. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  4114. return usemapsize / 8;
  4115. }
  4116. static void __init setup_usemap(struct pglist_data *pgdat,
  4117. struct zone *zone,
  4118. unsigned long zone_start_pfn,
  4119. unsigned long zonesize)
  4120. {
  4121. unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
  4122. zone->pageblock_flags = NULL;
  4123. if (usemapsize)
  4124. zone->pageblock_flags =
  4125. memblock_virt_alloc_node_nopanic(usemapsize,
  4126. pgdat->node_id);
  4127. }
  4128. #else
  4129. static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
  4130. unsigned long zone_start_pfn, unsigned long zonesize) {}
  4131. #endif /* CONFIG_SPARSEMEM */
  4132. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  4133. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  4134. void __paginginit set_pageblock_order(void)
  4135. {
  4136. unsigned int order;
  4137. /* Check that pageblock_nr_pages has not already been setup */
  4138. if (pageblock_order)
  4139. return;
  4140. if (HPAGE_SHIFT > PAGE_SHIFT)
  4141. order = HUGETLB_PAGE_ORDER;
  4142. else
  4143. order = MAX_ORDER - 1;
  4144. /*
  4145. * Assume the largest contiguous order of interest is a huge page.
  4146. * This value may be variable depending on boot parameters on IA64 and
  4147. * powerpc.
  4148. */
  4149. pageblock_order = order;
  4150. }
  4151. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4152. /*
  4153. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  4154. * is unused as pageblock_order is set at compile-time. See
  4155. * include/linux/pageblock-flags.h for the values of pageblock_order based on
  4156. * the kernel config
  4157. */
  4158. void __paginginit set_pageblock_order(void)
  4159. {
  4160. }
  4161. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4162. static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  4163. unsigned long present_pages)
  4164. {
  4165. unsigned long pages = spanned_pages;
  4166. /*
  4167. * Provide a more accurate estimation if there are holes within
  4168. * the zone and SPARSEMEM is in use. If there are holes within the
  4169. * zone, each populated memory region may cost us one or two extra
  4170. * memmap pages due to alignment because memmap pages for each
  4171. * populated regions may not naturally algined on page boundary.
  4172. * So the (present_pages >> 4) heuristic is a tradeoff for that.
  4173. */
  4174. if (spanned_pages > present_pages + (present_pages >> 4) &&
  4175. IS_ENABLED(CONFIG_SPARSEMEM))
  4176. pages = present_pages;
  4177. return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
  4178. }
  4179. /*
  4180. * Set up the zone data structures:
  4181. * - mark all pages reserved
  4182. * - mark all memory queues empty
  4183. * - clear the memory bitmaps
  4184. *
  4185. * NOTE: pgdat should get zeroed by caller.
  4186. */
  4187. static void __paginginit free_area_init_core(struct pglist_data *pgdat,
  4188. unsigned long node_start_pfn, unsigned long node_end_pfn,
  4189. unsigned long *zones_size, unsigned long *zholes_size)
  4190. {
  4191. enum zone_type j;
  4192. int nid = pgdat->node_id;
  4193. unsigned long zone_start_pfn = pgdat->node_start_pfn;
  4194. int ret;
  4195. pgdat_resize_init(pgdat);
  4196. #ifdef CONFIG_NUMA_BALANCING
  4197. spin_lock_init(&pgdat->numabalancing_migrate_lock);
  4198. pgdat->numabalancing_migrate_nr_pages = 0;
  4199. pgdat->numabalancing_migrate_next_window = jiffies;
  4200. #endif
  4201. init_waitqueue_head(&pgdat->kswapd_wait);
  4202. init_waitqueue_head(&pgdat->pfmemalloc_wait);
  4203. pgdat_page_cgroup_init(pgdat);
  4204. for (j = 0; j < MAX_NR_ZONES; j++) {
  4205. struct zone *zone = pgdat->node_zones + j;
  4206. unsigned long size, realsize, freesize, memmap_pages;
  4207. size = zone_spanned_pages_in_node(nid, j, node_start_pfn,
  4208. node_end_pfn, zones_size);
  4209. realsize = freesize = size - zone_absent_pages_in_node(nid, j,
  4210. node_start_pfn,
  4211. node_end_pfn,
  4212. zholes_size);
  4213. /*
  4214. * Adjust freesize so that it accounts for how much memory
  4215. * is used by this zone for memmap. This affects the watermark
  4216. * and per-cpu initialisations
  4217. */
  4218. memmap_pages = calc_memmap_size(size, realsize);
  4219. if (freesize >= memmap_pages) {
  4220. freesize -= memmap_pages;
  4221. if (memmap_pages)
  4222. printk(KERN_DEBUG
  4223. " %s zone: %lu pages used for memmap\n",
  4224. zone_names[j], memmap_pages);
  4225. } else
  4226. printk(KERN_WARNING
  4227. " %s zone: %lu pages exceeds freesize %lu\n",
  4228. zone_names[j], memmap_pages, freesize);
  4229. /* Account for reserved pages */
  4230. if (j == 0 && freesize > dma_reserve) {
  4231. freesize -= dma_reserve;
  4232. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  4233. zone_names[0], dma_reserve);
  4234. }
  4235. if (!is_highmem_idx(j))
  4236. nr_kernel_pages += freesize;
  4237. /* Charge for highmem memmap if there are enough kernel pages */
  4238. else if (nr_kernel_pages > memmap_pages * 2)
  4239. nr_kernel_pages -= memmap_pages;
  4240. nr_all_pages += freesize;
  4241. zone->spanned_pages = size;
  4242. zone->present_pages = realsize;
  4243. /*
  4244. * Set an approximate value for lowmem here, it will be adjusted
  4245. * when the bootmem allocator frees pages into the buddy system.
  4246. * And all highmem pages will be managed by the buddy system.
  4247. */
  4248. zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
  4249. #ifdef CONFIG_NUMA
  4250. zone->node = nid;
  4251. zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
  4252. / 100;
  4253. zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
  4254. #endif
  4255. zone->name = zone_names[j];
  4256. spin_lock_init(&zone->lock);
  4257. spin_lock_init(&zone->lru_lock);
  4258. zone_seqlock_init(zone);
  4259. zone->zone_pgdat = pgdat;
  4260. zone_pcp_init(zone);
  4261. /* For bootup, initialized properly in watermark setup */
  4262. mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
  4263. lruvec_init(&zone->lruvec);
  4264. if (!size)
  4265. continue;
  4266. set_pageblock_order();
  4267. setup_usemap(pgdat, zone, zone_start_pfn, size);
  4268. ret = init_currently_empty_zone(zone, zone_start_pfn,
  4269. size, MEMMAP_EARLY);
  4270. BUG_ON(ret);
  4271. memmap_init(size, nid, j, zone_start_pfn);
  4272. zone_start_pfn += size;
  4273. }
  4274. }
  4275. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  4276. {
  4277. /* Skip empty nodes */
  4278. if (!pgdat->node_spanned_pages)
  4279. return;
  4280. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4281. /* ia64 gets its own node_mem_map, before this, without bootmem */
  4282. if (!pgdat->node_mem_map) {
  4283. unsigned long size, start, end;
  4284. struct page *map;
  4285. /*
  4286. * The zone's endpoints aren't required to be MAX_ORDER
  4287. * aligned but the node_mem_map endpoints must be in order
  4288. * for the buddy allocator to function correctly.
  4289. */
  4290. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  4291. end = pgdat_end_pfn(pgdat);
  4292. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  4293. size = (end - start) * sizeof(struct page);
  4294. map = alloc_remap(pgdat->node_id, size);
  4295. if (!map)
  4296. map = memblock_virt_alloc_node_nopanic(size,
  4297. pgdat->node_id);
  4298. pgdat->node_mem_map = map + (pgdat->node_start_pfn - start);
  4299. }
  4300. #ifndef CONFIG_NEED_MULTIPLE_NODES
  4301. /*
  4302. * With no DISCONTIG, the global mem_map is just set as node 0's
  4303. */
  4304. if (pgdat == NODE_DATA(0)) {
  4305. mem_map = NODE_DATA(0)->node_mem_map;
  4306. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4307. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  4308. mem_map -= (pgdat->node_start_pfn - ARCH_PFN_OFFSET);
  4309. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4310. }
  4311. #endif
  4312. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  4313. }
  4314. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  4315. unsigned long node_start_pfn, unsigned long *zholes_size)
  4316. {
  4317. pg_data_t *pgdat = NODE_DATA(nid);
  4318. unsigned long start_pfn = 0;
  4319. unsigned long end_pfn = 0;
  4320. /* pg_data_t should be reset to zero when it's allocated */
  4321. WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
  4322. pgdat->node_id = nid;
  4323. pgdat->node_start_pfn = node_start_pfn;
  4324. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4325. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  4326. printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n", nid,
  4327. (u64) start_pfn << PAGE_SHIFT, (u64) (end_pfn << PAGE_SHIFT) - 1);
  4328. #endif
  4329. calculate_node_totalpages(pgdat, start_pfn, end_pfn,
  4330. zones_size, zholes_size);
  4331. alloc_node_mem_map(pgdat);
  4332. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4333. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  4334. nid, (unsigned long)pgdat,
  4335. (unsigned long)pgdat->node_mem_map);
  4336. #endif
  4337. free_area_init_core(pgdat, start_pfn, end_pfn,
  4338. zones_size, zholes_size);
  4339. }
  4340. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4341. #if MAX_NUMNODES > 1
  4342. /*
  4343. * Figure out the number of possible node ids.
  4344. */
  4345. void __init setup_nr_node_ids(void)
  4346. {
  4347. unsigned int node;
  4348. unsigned int highest = 0;
  4349. for_each_node_mask(node, node_possible_map)
  4350. highest = node;
  4351. nr_node_ids = highest + 1;
  4352. }
  4353. #endif
  4354. /**
  4355. * node_map_pfn_alignment - determine the maximum internode alignment
  4356. *
  4357. * This function should be called after node map is populated and sorted.
  4358. * It calculates the maximum power of two alignment which can distinguish
  4359. * all the nodes.
  4360. *
  4361. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  4362. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  4363. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  4364. * shifted, 1GiB is enough and this function will indicate so.
  4365. *
  4366. * This is used to test whether pfn -> nid mapping of the chosen memory
  4367. * model has fine enough granularity to avoid incorrect mapping for the
  4368. * populated node map.
  4369. *
  4370. * Returns the determined alignment in pfn's. 0 if there is no alignment
  4371. * requirement (single node).
  4372. */
  4373. unsigned long __init node_map_pfn_alignment(void)
  4374. {
  4375. unsigned long accl_mask = 0, last_end = 0;
  4376. unsigned long start, end, mask;
  4377. int last_nid = -1;
  4378. int i, nid;
  4379. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  4380. if (!start || last_nid < 0 || last_nid == nid) {
  4381. last_nid = nid;
  4382. last_end = end;
  4383. continue;
  4384. }
  4385. /*
  4386. * Start with a mask granular enough to pin-point to the
  4387. * start pfn and tick off bits one-by-one until it becomes
  4388. * too coarse to separate the current node from the last.
  4389. */
  4390. mask = ~((1 << __ffs(start)) - 1);
  4391. while (mask && last_end <= (start & (mask << 1)))
  4392. mask <<= 1;
  4393. /* accumulate all internode masks */
  4394. accl_mask |= mask;
  4395. }
  4396. /* convert mask to number of pages */
  4397. return ~accl_mask + 1;
  4398. }
  4399. /* Find the lowest pfn for a node */
  4400. static unsigned long __init find_min_pfn_for_node(int nid)
  4401. {
  4402. unsigned long min_pfn = ULONG_MAX;
  4403. unsigned long start_pfn;
  4404. int i;
  4405. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  4406. min_pfn = min(min_pfn, start_pfn);
  4407. if (min_pfn == ULONG_MAX) {
  4408. printk(KERN_WARNING
  4409. "Could not find start_pfn for node %d\n", nid);
  4410. return 0;
  4411. }
  4412. return min_pfn;
  4413. }
  4414. /**
  4415. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  4416. *
  4417. * It returns the minimum PFN based on information provided via
  4418. * memblock_set_node().
  4419. */
  4420. unsigned long __init find_min_pfn_with_active_regions(void)
  4421. {
  4422. return find_min_pfn_for_node(MAX_NUMNODES);
  4423. }
  4424. /*
  4425. * early_calculate_totalpages()
  4426. * Sum pages in active regions for movable zone.
  4427. * Populate N_MEMORY for calculating usable_nodes.
  4428. */
  4429. static unsigned long __init early_calculate_totalpages(void)
  4430. {
  4431. unsigned long totalpages = 0;
  4432. unsigned long start_pfn, end_pfn;
  4433. int i, nid;
  4434. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  4435. unsigned long pages = end_pfn - start_pfn;
  4436. totalpages += pages;
  4437. if (pages)
  4438. node_set_state(nid, N_MEMORY);
  4439. }
  4440. return totalpages;
  4441. }
  4442. /*
  4443. * Find the PFN the Movable zone begins in each node. Kernel memory
  4444. * is spread evenly between nodes as long as the nodes have enough
  4445. * memory. When they don't, some nodes will have more kernelcore than
  4446. * others
  4447. */
  4448. static void __init find_zone_movable_pfns_for_nodes(void)
  4449. {
  4450. int i, nid;
  4451. unsigned long usable_startpfn;
  4452. unsigned long kernelcore_node, kernelcore_remaining;
  4453. /* save the state before borrow the nodemask */
  4454. nodemask_t saved_node_state = node_states[N_MEMORY];
  4455. unsigned long totalpages = early_calculate_totalpages();
  4456. int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  4457. struct memblock_region *r;
  4458. /* Need to find movable_zone earlier when movable_node is specified. */
  4459. find_usable_zone_for_movable();
  4460. /*
  4461. * If movable_node is specified, ignore kernelcore and movablecore
  4462. * options.
  4463. */
  4464. if (movable_node_is_enabled()) {
  4465. for_each_memblock(memory, r) {
  4466. if (!memblock_is_hotpluggable(r))
  4467. continue;
  4468. nid = r->nid;
  4469. usable_startpfn = PFN_DOWN(r->base);
  4470. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  4471. min(usable_startpfn, zone_movable_pfn[nid]) :
  4472. usable_startpfn;
  4473. }
  4474. goto out2;
  4475. }
  4476. /*
  4477. * If movablecore=nn[KMG] was specified, calculate what size of
  4478. * kernelcore that corresponds so that memory usable for
  4479. * any allocation type is evenly spread. If both kernelcore
  4480. * and movablecore are specified, then the value of kernelcore
  4481. * will be used for required_kernelcore if it's greater than
  4482. * what movablecore would have allowed.
  4483. */
  4484. if (required_movablecore) {
  4485. unsigned long corepages;
  4486. /*
  4487. * Round-up so that ZONE_MOVABLE is at least as large as what
  4488. * was requested by the user
  4489. */
  4490. required_movablecore =
  4491. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  4492. corepages = totalpages - required_movablecore;
  4493. required_kernelcore = max(required_kernelcore, corepages);
  4494. }
  4495. /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  4496. if (!required_kernelcore)
  4497. goto out;
  4498. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  4499. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  4500. restart:
  4501. /* Spread kernelcore memory as evenly as possible throughout nodes */
  4502. kernelcore_node = required_kernelcore / usable_nodes;
  4503. for_each_node_state(nid, N_MEMORY) {
  4504. unsigned long start_pfn, end_pfn;
  4505. /*
  4506. * Recalculate kernelcore_node if the division per node
  4507. * now exceeds what is necessary to satisfy the requested
  4508. * amount of memory for the kernel
  4509. */
  4510. if (required_kernelcore < kernelcore_node)
  4511. kernelcore_node = required_kernelcore / usable_nodes;
  4512. /*
  4513. * As the map is walked, we track how much memory is usable
  4514. * by the kernel using kernelcore_remaining. When it is
  4515. * 0, the rest of the node is usable by ZONE_MOVABLE
  4516. */
  4517. kernelcore_remaining = kernelcore_node;
  4518. /* Go through each range of PFNs within this node */
  4519. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  4520. unsigned long size_pages;
  4521. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  4522. if (start_pfn >= end_pfn)
  4523. continue;
  4524. /* Account for what is only usable for kernelcore */
  4525. if (start_pfn < usable_startpfn) {
  4526. unsigned long kernel_pages;
  4527. kernel_pages = min(end_pfn, usable_startpfn)
  4528. - start_pfn;
  4529. kernelcore_remaining -= min(kernel_pages,
  4530. kernelcore_remaining);
  4531. required_kernelcore -= min(kernel_pages,
  4532. required_kernelcore);
  4533. /* Continue if range is now fully accounted */
  4534. if (end_pfn <= usable_startpfn) {
  4535. /*
  4536. * Push zone_movable_pfn to the end so
  4537. * that if we have to rebalance
  4538. * kernelcore across nodes, we will
  4539. * not double account here
  4540. */
  4541. zone_movable_pfn[nid] = end_pfn;
  4542. continue;
  4543. }
  4544. start_pfn = usable_startpfn;
  4545. }
  4546. /*
  4547. * The usable PFN range for ZONE_MOVABLE is from
  4548. * start_pfn->end_pfn. Calculate size_pages as the
  4549. * number of pages used as kernelcore
  4550. */
  4551. size_pages = end_pfn - start_pfn;
  4552. if (size_pages > kernelcore_remaining)
  4553. size_pages = kernelcore_remaining;
  4554. zone_movable_pfn[nid] = start_pfn + size_pages;
  4555. /*
  4556. * Some kernelcore has been met, update counts and
  4557. * break if the kernelcore for this node has been
  4558. * satisfied
  4559. */
  4560. required_kernelcore -= min(required_kernelcore,
  4561. size_pages);
  4562. kernelcore_remaining -= size_pages;
  4563. if (!kernelcore_remaining)
  4564. break;
  4565. }
  4566. }
  4567. /*
  4568. * If there is still required_kernelcore, we do another pass with one
  4569. * less node in the count. This will push zone_movable_pfn[nid] further
  4570. * along on the nodes that still have memory until kernelcore is
  4571. * satisfied
  4572. */
  4573. usable_nodes--;
  4574. if (usable_nodes && required_kernelcore > usable_nodes)
  4575. goto restart;
  4576. out2:
  4577. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  4578. for (nid = 0; nid < MAX_NUMNODES; nid++)
  4579. zone_movable_pfn[nid] =
  4580. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  4581. out:
  4582. /* restore the node_state */
  4583. node_states[N_MEMORY] = saved_node_state;
  4584. }
  4585. /* Any regular or high memory on that node ? */
  4586. static void check_for_memory(pg_data_t *pgdat, int nid)
  4587. {
  4588. enum zone_type zone_type;
  4589. if (N_MEMORY == N_NORMAL_MEMORY)
  4590. return;
  4591. for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
  4592. struct zone *zone = &pgdat->node_zones[zone_type];
  4593. if (populated_zone(zone)) {
  4594. node_set_state(nid, N_HIGH_MEMORY);
  4595. if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
  4596. zone_type <= ZONE_NORMAL)
  4597. node_set_state(nid, N_NORMAL_MEMORY);
  4598. break;
  4599. }
  4600. }
  4601. }
  4602. /**
  4603. * free_area_init_nodes - Initialise all pg_data_t and zone data
  4604. * @max_zone_pfn: an array of max PFNs for each zone
  4605. *
  4606. * This will call free_area_init_node() for each active node in the system.
  4607. * Using the page ranges provided by memblock_set_node(), the size of each
  4608. * zone in each node and their holes is calculated. If the maximum PFN
  4609. * between two adjacent zones match, it is assumed that the zone is empty.
  4610. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  4611. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  4612. * starts where the previous one ended. For example, ZONE_DMA32 starts
  4613. * at arch_max_dma_pfn.
  4614. */
  4615. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  4616. {
  4617. unsigned long start_pfn, end_pfn;
  4618. int i, nid;
  4619. /* Record where the zone boundaries are */
  4620. memset(arch_zone_lowest_possible_pfn, 0,
  4621. sizeof(arch_zone_lowest_possible_pfn));
  4622. memset(arch_zone_highest_possible_pfn, 0,
  4623. sizeof(arch_zone_highest_possible_pfn));
  4624. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  4625. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  4626. for (i = 1; i < MAX_NR_ZONES; i++) {
  4627. if (i == ZONE_MOVABLE)
  4628. continue;
  4629. arch_zone_lowest_possible_pfn[i] =
  4630. arch_zone_highest_possible_pfn[i-1];
  4631. arch_zone_highest_possible_pfn[i] =
  4632. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  4633. }
  4634. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  4635. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  4636. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  4637. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  4638. find_zone_movable_pfns_for_nodes();
  4639. /* Print out the zone ranges */
  4640. printk("Zone ranges:\n");
  4641. for (i = 0; i < MAX_NR_ZONES; i++) {
  4642. if (i == ZONE_MOVABLE)
  4643. continue;
  4644. printk(KERN_CONT " %-8s ", zone_names[i]);
  4645. if (arch_zone_lowest_possible_pfn[i] ==
  4646. arch_zone_highest_possible_pfn[i])
  4647. printk(KERN_CONT "empty\n");
  4648. else
  4649. printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
  4650. arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
  4651. (arch_zone_highest_possible_pfn[i]
  4652. << PAGE_SHIFT) - 1);
  4653. }
  4654. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  4655. printk("Movable zone start for each node\n");
  4656. for (i = 0; i < MAX_NUMNODES; i++) {
  4657. if (zone_movable_pfn[i])
  4658. printk(" Node %d: %#010lx\n", i,
  4659. zone_movable_pfn[i] << PAGE_SHIFT);
  4660. }
  4661. /* Print out the early node map */
  4662. printk("Early memory node ranges\n");
  4663. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  4664. printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
  4665. start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
  4666. /* Initialise every node */
  4667. mminit_verify_pageflags_layout();
  4668. setup_nr_node_ids();
  4669. for_each_online_node(nid) {
  4670. pg_data_t *pgdat = NODE_DATA(nid);
  4671. free_area_init_node(nid, NULL,
  4672. find_min_pfn_for_node(nid), NULL);
  4673. /* Any memory on that node */
  4674. if (pgdat->node_present_pages)
  4675. node_set_state(nid, N_MEMORY);
  4676. check_for_memory(pgdat, nid);
  4677. }
  4678. }
  4679. static int __init cmdline_parse_core(char *p, unsigned long *core)
  4680. {
  4681. unsigned long long coremem;
  4682. if (!p)
  4683. return -EINVAL;
  4684. coremem = memparse(p, &p);
  4685. *core = coremem >> PAGE_SHIFT;
  4686. /* Paranoid check that UL is enough for the coremem value */
  4687. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  4688. return 0;
  4689. }
  4690. /*
  4691. * kernelcore=size sets the amount of memory for use for allocations that
  4692. * cannot be reclaimed or migrated.
  4693. */
  4694. static int __init cmdline_parse_kernelcore(char *p)
  4695. {
  4696. return cmdline_parse_core(p, &required_kernelcore);
  4697. }
  4698. /*
  4699. * movablecore=size sets the amount of memory for use for allocations that
  4700. * can be reclaimed or migrated.
  4701. */
  4702. static int __init cmdline_parse_movablecore(char *p)
  4703. {
  4704. return cmdline_parse_core(p, &required_movablecore);
  4705. }
  4706. early_param("kernelcore", cmdline_parse_kernelcore);
  4707. early_param("movablecore", cmdline_parse_movablecore);
  4708. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4709. void adjust_managed_page_count(struct page *page, long count)
  4710. {
  4711. spin_lock(&managed_page_count_lock);
  4712. page_zone(page)->managed_pages += count;
  4713. totalram_pages += count;
  4714. #ifdef CONFIG_HIGHMEM
  4715. if (PageHighMem(page))
  4716. totalhigh_pages += count;
  4717. #endif
  4718. spin_unlock(&managed_page_count_lock);
  4719. }
  4720. EXPORT_SYMBOL(adjust_managed_page_count);
  4721. unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
  4722. {
  4723. void *pos;
  4724. unsigned long pages = 0;
  4725. start = (void *)PAGE_ALIGN((unsigned long)start);
  4726. end = (void *)((unsigned long)end & PAGE_MASK);
  4727. for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
  4728. if ((unsigned int)poison <= 0xFF)
  4729. memset(pos, poison, PAGE_SIZE);
  4730. free_reserved_page(virt_to_page(pos));
  4731. }
  4732. if (pages && s)
  4733. pr_info("Freeing %s memory: %ldK (%p - %p)\n",
  4734. s, pages << (PAGE_SHIFT - 10), start, end);
  4735. return pages;
  4736. }
  4737. EXPORT_SYMBOL(free_reserved_area);
  4738. #ifdef CONFIG_HIGHMEM
  4739. void free_highmem_page(struct page *page)
  4740. {
  4741. __free_reserved_page(page);
  4742. totalram_pages++;
  4743. page_zone(page)->managed_pages++;
  4744. totalhigh_pages++;
  4745. }
  4746. #endif
  4747. void __init mem_init_print_info(const char *str)
  4748. {
  4749. unsigned long physpages, codesize, datasize, rosize, bss_size;
  4750. unsigned long init_code_size, init_data_size;
  4751. physpages = get_num_physpages();
  4752. codesize = _etext - _stext;
  4753. datasize = _edata - _sdata;
  4754. rosize = __end_rodata - __start_rodata;
  4755. bss_size = __bss_stop - __bss_start;
  4756. init_data_size = __init_end - __init_begin;
  4757. init_code_size = _einittext - _sinittext;
  4758. /*
  4759. * Detect special cases and adjust section sizes accordingly:
  4760. * 1) .init.* may be embedded into .data sections
  4761. * 2) .init.text.* may be out of [__init_begin, __init_end],
  4762. * please refer to arch/tile/kernel/vmlinux.lds.S.
  4763. * 3) .rodata.* may be embedded into .text or .data sections.
  4764. */
  4765. #define adj_init_size(start, end, size, pos, adj) \
  4766. do { \
  4767. if (start <= pos && pos < end && size > adj) \
  4768. size -= adj; \
  4769. } while (0)
  4770. adj_init_size(__init_begin, __init_end, init_data_size,
  4771. _sinittext, init_code_size);
  4772. adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
  4773. adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
  4774. adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
  4775. adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
  4776. #undef adj_init_size
  4777. printk("Memory: %luK/%luK available "
  4778. "(%luK kernel code, %luK rwdata, %luK rodata, "
  4779. "%luK init, %luK bss, %luK reserved"
  4780. #ifdef CONFIG_HIGHMEM
  4781. ", %luK highmem"
  4782. #endif
  4783. "%s%s)\n",
  4784. nr_free_pages() << (PAGE_SHIFT-10), physpages << (PAGE_SHIFT-10),
  4785. codesize >> 10, datasize >> 10, rosize >> 10,
  4786. (init_data_size + init_code_size) >> 10, bss_size >> 10,
  4787. (physpages - totalram_pages) << (PAGE_SHIFT-10),
  4788. #ifdef CONFIG_HIGHMEM
  4789. totalhigh_pages << (PAGE_SHIFT-10),
  4790. #endif
  4791. str ? ", " : "", str ? str : "");
  4792. }
  4793. /**
  4794. * set_dma_reserve - set the specified number of pages reserved in the first zone
  4795. * @new_dma_reserve: The number of pages to mark reserved
  4796. *
  4797. * The per-cpu batchsize and zone watermarks are determined by present_pages.
  4798. * In the DMA zone, a significant percentage may be consumed by kernel image
  4799. * and other unfreeable allocations which can skew the watermarks badly. This
  4800. * function may optionally be used to account for unfreeable pages in the
  4801. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  4802. * smaller per-cpu batchsize.
  4803. */
  4804. void __init set_dma_reserve(unsigned long new_dma_reserve)
  4805. {
  4806. dma_reserve = new_dma_reserve;
  4807. }
  4808. void __init free_area_init(unsigned long *zones_size)
  4809. {
  4810. free_area_init_node(0, zones_size,
  4811. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  4812. }
  4813. static int page_alloc_cpu_notify(struct notifier_block *self,
  4814. unsigned long action, void *hcpu)
  4815. {
  4816. int cpu = (unsigned long)hcpu;
  4817. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  4818. lru_add_drain_cpu(cpu);
  4819. drain_pages(cpu);
  4820. /*
  4821. * Spill the event counters of the dead processor
  4822. * into the current processors event counters.
  4823. * This artificially elevates the count of the current
  4824. * processor.
  4825. */
  4826. vm_events_fold_cpu(cpu);
  4827. /*
  4828. * Zero the differential counters of the dead processor
  4829. * so that the vm statistics are consistent.
  4830. *
  4831. * This is only okay since the processor is dead and cannot
  4832. * race with what we are doing.
  4833. */
  4834. cpu_vm_stats_fold(cpu);
  4835. }
  4836. return NOTIFY_OK;
  4837. }
  4838. void __init page_alloc_init(void)
  4839. {
  4840. hotcpu_notifier(page_alloc_cpu_notify, 0);
  4841. }
  4842. /*
  4843. * calculate_totalreserve_pages - called when sysctl_lower_zone_reserve_ratio
  4844. * or min_free_kbytes changes.
  4845. */
  4846. static void calculate_totalreserve_pages(void)
  4847. {
  4848. struct pglist_data *pgdat;
  4849. unsigned long reserve_pages = 0;
  4850. enum zone_type i, j;
  4851. for_each_online_pgdat(pgdat) {
  4852. for (i = 0; i < MAX_NR_ZONES; i++) {
  4853. struct zone *zone = pgdat->node_zones + i;
  4854. long max = 0;
  4855. /* Find valid and maximum lowmem_reserve in the zone */
  4856. for (j = i; j < MAX_NR_ZONES; j++) {
  4857. if (zone->lowmem_reserve[j] > max)
  4858. max = zone->lowmem_reserve[j];
  4859. }
  4860. /* we treat the high watermark as reserved pages. */
  4861. max += high_wmark_pages(zone);
  4862. if (max > zone->managed_pages)
  4863. max = zone->managed_pages;
  4864. reserve_pages += max;
  4865. /*
  4866. * Lowmem reserves are not available to
  4867. * GFP_HIGHUSER page cache allocations and
  4868. * kswapd tries to balance zones to their high
  4869. * watermark. As a result, neither should be
  4870. * regarded as dirtyable memory, to prevent a
  4871. * situation where reclaim has to clean pages
  4872. * in order to balance the zones.
  4873. */
  4874. zone->dirty_balance_reserve = max;
  4875. }
  4876. }
  4877. dirty_balance_reserve = reserve_pages;
  4878. totalreserve_pages = reserve_pages;
  4879. }
  4880. /*
  4881. * setup_per_zone_lowmem_reserve - called whenever
  4882. * sysctl_lower_zone_reserve_ratio changes. Ensures that each zone
  4883. * has a correct pages reserved value, so an adequate number of
  4884. * pages are left in the zone after a successful __alloc_pages().
  4885. */
  4886. static void setup_per_zone_lowmem_reserve(void)
  4887. {
  4888. struct pglist_data *pgdat;
  4889. enum zone_type j, idx;
  4890. for_each_online_pgdat(pgdat) {
  4891. for (j = 0; j < MAX_NR_ZONES; j++) {
  4892. struct zone *zone = pgdat->node_zones + j;
  4893. unsigned long managed_pages = zone->managed_pages;
  4894. zone->lowmem_reserve[j] = 0;
  4895. idx = j;
  4896. while (idx) {
  4897. struct zone *lower_zone;
  4898. idx--;
  4899. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  4900. sysctl_lowmem_reserve_ratio[idx] = 1;
  4901. lower_zone = pgdat->node_zones + idx;
  4902. lower_zone->lowmem_reserve[j] = managed_pages /
  4903. sysctl_lowmem_reserve_ratio[idx];
  4904. managed_pages += lower_zone->managed_pages;
  4905. }
  4906. }
  4907. }
  4908. /* update totalreserve_pages */
  4909. calculate_totalreserve_pages();
  4910. }
  4911. static void __setup_per_zone_wmarks(void)
  4912. {
  4913. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  4914. unsigned long lowmem_pages = 0;
  4915. struct zone *zone;
  4916. unsigned long flags;
  4917. /* Calculate total number of !ZONE_HIGHMEM pages */
  4918. for_each_zone(zone) {
  4919. if (!is_highmem(zone))
  4920. lowmem_pages += zone->managed_pages;
  4921. }
  4922. for_each_zone(zone) {
  4923. u64 tmp;
  4924. spin_lock_irqsave(&zone->lock, flags);
  4925. tmp = (u64)pages_min * zone->managed_pages;
  4926. do_div(tmp, lowmem_pages);
  4927. if (is_highmem(zone)) {
  4928. /*
  4929. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  4930. * need highmem pages, so cap pages_min to a small
  4931. * value here.
  4932. *
  4933. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  4934. * deltas controls asynch page reclaim, and so should
  4935. * not be capped for highmem.
  4936. */
  4937. unsigned long min_pages;
  4938. min_pages = zone->managed_pages / 1024;
  4939. min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
  4940. zone->watermark[WMARK_MIN] = min_pages;
  4941. } else {
  4942. /*
  4943. * If it's a lowmem zone, reserve a number of pages
  4944. * proportionate to the zone's size.
  4945. */
  4946. zone->watermark[WMARK_MIN] = tmp;
  4947. }
  4948. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
  4949. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
  4950. __mod_zone_page_state(zone, NR_ALLOC_BATCH,
  4951. high_wmark_pages(zone) - low_wmark_pages(zone) -
  4952. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  4953. setup_zone_migrate_reserve(zone);
  4954. spin_unlock_irqrestore(&zone->lock, flags);
  4955. }
  4956. /* update totalreserve_pages */
  4957. calculate_totalreserve_pages();
  4958. }
  4959. /**
  4960. * setup_per_zone_wmarks - called when min_free_kbytes changes
  4961. * or when memory is hot-{added|removed}
  4962. *
  4963. * Ensures that the watermark[min,low,high] values for each zone are set
  4964. * correctly with respect to min_free_kbytes.
  4965. */
  4966. void setup_per_zone_wmarks(void)
  4967. {
  4968. mutex_lock(&zonelists_mutex);
  4969. __setup_per_zone_wmarks();
  4970. mutex_unlock(&zonelists_mutex);
  4971. }
  4972. /*
  4973. * The inactive anon list should be small enough that the VM never has to
  4974. * do too much work, but large enough that each inactive page has a chance
  4975. * to be referenced again before it is swapped out.
  4976. *
  4977. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  4978. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  4979. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  4980. * the anonymous pages are kept on the inactive list.
  4981. *
  4982. * total target max
  4983. * memory ratio inactive anon
  4984. * -------------------------------------
  4985. * 10MB 1 5MB
  4986. * 100MB 1 50MB
  4987. * 1GB 3 250MB
  4988. * 10GB 10 0.9GB
  4989. * 100GB 31 3GB
  4990. * 1TB 101 10GB
  4991. * 10TB 320 32GB
  4992. */
  4993. static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
  4994. {
  4995. unsigned int gb, ratio;
  4996. /* Zone size in gigabytes */
  4997. gb = zone->managed_pages >> (30 - PAGE_SHIFT);
  4998. if (gb)
  4999. ratio = int_sqrt(10 * gb);
  5000. else
  5001. ratio = 1;
  5002. zone->inactive_ratio = ratio;
  5003. }
  5004. static void __meminit setup_per_zone_inactive_ratio(void)
  5005. {
  5006. struct zone *zone;
  5007. for_each_zone(zone)
  5008. calculate_zone_inactive_ratio(zone);
  5009. }
  5010. /*
  5011. * Initialise min_free_kbytes.
  5012. *
  5013. * For small machines we want it small (128k min). For large machines
  5014. * we want it large (64MB max). But it is not linear, because network
  5015. * bandwidth does not increase linearly with machine size. We use
  5016. *
  5017. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  5018. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  5019. *
  5020. * which yields
  5021. *
  5022. * 16MB: 512k
  5023. * 32MB: 724k
  5024. * 64MB: 1024k
  5025. * 128MB: 1448k
  5026. * 256MB: 2048k
  5027. * 512MB: 2896k
  5028. * 1024MB: 4096k
  5029. * 2048MB: 5792k
  5030. * 4096MB: 8192k
  5031. * 8192MB: 11584k
  5032. * 16384MB: 16384k
  5033. */
  5034. int __meminit init_per_zone_wmark_min(void)
  5035. {
  5036. unsigned long lowmem_kbytes;
  5037. int new_min_free_kbytes;
  5038. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  5039. new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  5040. if (new_min_free_kbytes > user_min_free_kbytes) {
  5041. min_free_kbytes = new_min_free_kbytes;
  5042. if (min_free_kbytes < 128)
  5043. min_free_kbytes = 128;
  5044. if (min_free_kbytes > 65536)
  5045. min_free_kbytes = 65536;
  5046. } else {
  5047. pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
  5048. new_min_free_kbytes, user_min_free_kbytes);
  5049. }
  5050. setup_per_zone_wmarks();
  5051. refresh_zone_stat_thresholds();
  5052. setup_per_zone_lowmem_reserve();
  5053. setup_per_zone_inactive_ratio();
  5054. return 0;
  5055. }
  5056. module_init(init_per_zone_wmark_min)
  5057. /*
  5058. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  5059. * that we can call two helper functions whenever min_free_kbytes
  5060. * changes.
  5061. */
  5062. int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
  5063. void __user *buffer, size_t *length, loff_t *ppos)
  5064. {
  5065. int rc;
  5066. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5067. if (rc)
  5068. return rc;
  5069. if (write) {
  5070. user_min_free_kbytes = min_free_kbytes;
  5071. setup_per_zone_wmarks();
  5072. }
  5073. return 0;
  5074. }
  5075. #ifdef CONFIG_NUMA
  5076. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
  5077. void __user *buffer, size_t *length, loff_t *ppos)
  5078. {
  5079. struct zone *zone;
  5080. int rc;
  5081. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5082. if (rc)
  5083. return rc;
  5084. for_each_zone(zone)
  5085. zone->min_unmapped_pages = (zone->managed_pages *
  5086. sysctl_min_unmapped_ratio) / 100;
  5087. return 0;
  5088. }
  5089. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
  5090. void __user *buffer, size_t *length, loff_t *ppos)
  5091. {
  5092. struct zone *zone;
  5093. int rc;
  5094. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5095. if (rc)
  5096. return rc;
  5097. for_each_zone(zone)
  5098. zone->min_slab_pages = (zone->managed_pages *
  5099. sysctl_min_slab_ratio) / 100;
  5100. return 0;
  5101. }
  5102. #endif
  5103. /*
  5104. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  5105. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  5106. * whenever sysctl_lowmem_reserve_ratio changes.
  5107. *
  5108. * The reserve ratio obviously has absolutely no relation with the
  5109. * minimum watermarks. The lowmem reserve ratio can only make sense
  5110. * if in function of the boot time zone sizes.
  5111. */
  5112. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
  5113. void __user *buffer, size_t *length, loff_t *ppos)
  5114. {
  5115. proc_dointvec_minmax(table, write, buffer, length, ppos);
  5116. setup_per_zone_lowmem_reserve();
  5117. return 0;
  5118. }
  5119. /*
  5120. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  5121. * cpu. It is the fraction of total pages in each zone that a hot per cpu
  5122. * pagelist can have before it gets flushed back to buddy allocator.
  5123. */
  5124. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
  5125. void __user *buffer, size_t *length, loff_t *ppos)
  5126. {
  5127. struct zone *zone;
  5128. int old_percpu_pagelist_fraction;
  5129. int ret;
  5130. mutex_lock(&pcp_batch_high_lock);
  5131. old_percpu_pagelist_fraction = percpu_pagelist_fraction;
  5132. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5133. if (!write || ret < 0)
  5134. goto out;
  5135. /* Sanity checking to avoid pcp imbalance */
  5136. if (percpu_pagelist_fraction &&
  5137. percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
  5138. percpu_pagelist_fraction = old_percpu_pagelist_fraction;
  5139. ret = -EINVAL;
  5140. goto out;
  5141. }
  5142. /* No change? */
  5143. if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
  5144. goto out;
  5145. for_each_populated_zone(zone) {
  5146. unsigned int cpu;
  5147. for_each_possible_cpu(cpu)
  5148. pageset_set_high_and_batch(zone,
  5149. per_cpu_ptr(zone->pageset, cpu));
  5150. }
  5151. out:
  5152. mutex_unlock(&pcp_batch_high_lock);
  5153. return ret;
  5154. }
  5155. int hashdist = HASHDIST_DEFAULT;
  5156. #ifdef CONFIG_NUMA
  5157. static int __init set_hashdist(char *str)
  5158. {
  5159. if (!str)
  5160. return 0;
  5161. hashdist = simple_strtoul(str, &str, 0);
  5162. return 1;
  5163. }
  5164. __setup("hashdist=", set_hashdist);
  5165. #endif
  5166. /*
  5167. * allocate a large system hash table from bootmem
  5168. * - it is assumed that the hash table must contain an exact power-of-2
  5169. * quantity of entries
  5170. * - limit is the number of hash buckets, not the total allocation size
  5171. */
  5172. void *__init alloc_large_system_hash(const char *tablename,
  5173. unsigned long bucketsize,
  5174. unsigned long numentries,
  5175. int scale,
  5176. int flags,
  5177. unsigned int *_hash_shift,
  5178. unsigned int *_hash_mask,
  5179. unsigned long low_limit,
  5180. unsigned long high_limit)
  5181. {
  5182. unsigned long long max = high_limit;
  5183. unsigned long log2qty, size;
  5184. void *table = NULL;
  5185. /* allow the kernel cmdline to have a say */
  5186. if (!numentries) {
  5187. /* round applicable memory size up to nearest megabyte */
  5188. numentries = nr_kernel_pages;
  5189. /* It isn't necessary when PAGE_SIZE >= 1MB */
  5190. if (PAGE_SHIFT < 20)
  5191. numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
  5192. /* limit to 1 bucket per 2^scale bytes of low memory */
  5193. if (scale > PAGE_SHIFT)
  5194. numentries >>= (scale - PAGE_SHIFT);
  5195. else
  5196. numentries <<= (PAGE_SHIFT - scale);
  5197. /* Make sure we've got at least a 0-order allocation.. */
  5198. if (unlikely(flags & HASH_SMALL)) {
  5199. /* Makes no sense without HASH_EARLY */
  5200. WARN_ON(!(flags & HASH_EARLY));
  5201. if (!(numentries >> *_hash_shift)) {
  5202. numentries = 1UL << *_hash_shift;
  5203. BUG_ON(!numentries);
  5204. }
  5205. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  5206. numentries = PAGE_SIZE / bucketsize;
  5207. }
  5208. numentries = roundup_pow_of_two(numentries);
  5209. /* limit allocation size to 1/16 total memory by default */
  5210. if (max == 0) {
  5211. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  5212. do_div(max, bucketsize);
  5213. }
  5214. max = min(max, 0x80000000ULL);
  5215. if (numentries < low_limit)
  5216. numentries = low_limit;
  5217. if (numentries > max)
  5218. numentries = max;
  5219. log2qty = ilog2(numentries);
  5220. do {
  5221. size = bucketsize << log2qty;
  5222. if (flags & HASH_EARLY)
  5223. table = memblock_virt_alloc_nopanic(size, 0);
  5224. else if (hashdist)
  5225. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  5226. else {
  5227. /*
  5228. * If bucketsize is not a power-of-two, we may free
  5229. * some pages at the end of hash table which
  5230. * alloc_pages_exact() automatically does
  5231. */
  5232. if (get_order(size) < MAX_ORDER) {
  5233. table = alloc_pages_exact(size, GFP_ATOMIC);
  5234. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  5235. }
  5236. }
  5237. } while (!table && size > PAGE_SIZE && --log2qty);
  5238. if (!table)
  5239. panic("Failed to allocate %s hash table\n", tablename);
  5240. printk(KERN_INFO "%s hash table entries: %ld (order: %d, %lu bytes)\n",
  5241. tablename,
  5242. (1UL << log2qty),
  5243. ilog2(size) - PAGE_SHIFT,
  5244. size);
  5245. if (_hash_shift)
  5246. *_hash_shift = log2qty;
  5247. if (_hash_mask)
  5248. *_hash_mask = (1 << log2qty) - 1;
  5249. return table;
  5250. }
  5251. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  5252. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  5253. unsigned long pfn)
  5254. {
  5255. #ifdef CONFIG_SPARSEMEM
  5256. return __pfn_to_section(pfn)->pageblock_flags;
  5257. #else
  5258. return zone->pageblock_flags;
  5259. #endif /* CONFIG_SPARSEMEM */
  5260. }
  5261. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  5262. {
  5263. #ifdef CONFIG_SPARSEMEM
  5264. pfn &= (PAGES_PER_SECTION-1);
  5265. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5266. #else
  5267. pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
  5268. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5269. #endif /* CONFIG_SPARSEMEM */
  5270. }
  5271. /**
  5272. * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  5273. * @page: The page within the block of interest
  5274. * @pfn: The target page frame number
  5275. * @end_bitidx: The last bit of interest to retrieve
  5276. * @mask: mask of bits that the caller is interested in
  5277. *
  5278. * Return: pageblock_bits flags
  5279. */
  5280. unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
  5281. unsigned long end_bitidx,
  5282. unsigned long mask)
  5283. {
  5284. struct zone *zone;
  5285. unsigned long *bitmap;
  5286. unsigned long bitidx, word_bitidx;
  5287. unsigned long word;
  5288. zone = page_zone(page);
  5289. bitmap = get_pageblock_bitmap(zone, pfn);
  5290. bitidx = pfn_to_bitidx(zone, pfn);
  5291. word_bitidx = bitidx / BITS_PER_LONG;
  5292. bitidx &= (BITS_PER_LONG-1);
  5293. word = bitmap[word_bitidx];
  5294. bitidx += end_bitidx;
  5295. return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
  5296. }
  5297. /**
  5298. * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  5299. * @page: The page within the block of interest
  5300. * @flags: The flags to set
  5301. * @pfn: The target page frame number
  5302. * @end_bitidx: The last bit of interest
  5303. * @mask: mask of bits that the caller is interested in
  5304. */
  5305. void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
  5306. unsigned long pfn,
  5307. unsigned long end_bitidx,
  5308. unsigned long mask)
  5309. {
  5310. struct zone *zone;
  5311. unsigned long *bitmap;
  5312. unsigned long bitidx, word_bitidx;
  5313. unsigned long old_word, word;
  5314. BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
  5315. zone = page_zone(page);
  5316. bitmap = get_pageblock_bitmap(zone, pfn);
  5317. bitidx = pfn_to_bitidx(zone, pfn);
  5318. word_bitidx = bitidx / BITS_PER_LONG;
  5319. bitidx &= (BITS_PER_LONG-1);
  5320. VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
  5321. bitidx += end_bitidx;
  5322. mask <<= (BITS_PER_LONG - bitidx - 1);
  5323. flags <<= (BITS_PER_LONG - bitidx - 1);
  5324. word = ACCESS_ONCE(bitmap[word_bitidx]);
  5325. for (;;) {
  5326. old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
  5327. if (word == old_word)
  5328. break;
  5329. word = old_word;
  5330. }
  5331. }
  5332. /*
  5333. * This function checks whether pageblock includes unmovable pages or not.
  5334. * If @count is not zero, it is okay to include less @count unmovable pages
  5335. *
  5336. * PageLRU check without isolation or lru_lock could race so that
  5337. * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
  5338. * expect this function should be exact.
  5339. */
  5340. bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
  5341. bool skip_hwpoisoned_pages)
  5342. {
  5343. unsigned long pfn, iter, found;
  5344. int mt;
  5345. /*
  5346. * For avoiding noise data, lru_add_drain_all() should be called
  5347. * If ZONE_MOVABLE, the zone never contains unmovable pages
  5348. */
  5349. if (zone_idx(zone) == ZONE_MOVABLE)
  5350. return false;
  5351. mt = get_pageblock_migratetype(page);
  5352. if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
  5353. return false;
  5354. pfn = page_to_pfn(page);
  5355. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  5356. unsigned long check = pfn + iter;
  5357. if (!pfn_valid_within(check))
  5358. continue;
  5359. page = pfn_to_page(check);
  5360. /*
  5361. * Hugepages are not in LRU lists, but they're movable.
  5362. * We need not scan over tail pages bacause we don't
  5363. * handle each tail page individually in migration.
  5364. */
  5365. if (PageHuge(page)) {
  5366. iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
  5367. continue;
  5368. }
  5369. /*
  5370. * We can't use page_count without pin a page
  5371. * because another CPU can free compound page.
  5372. * This check already skips compound tails of THP
  5373. * because their page->_count is zero at all time.
  5374. */
  5375. if (!atomic_read(&page->_count)) {
  5376. if (PageBuddy(page))
  5377. iter += (1 << page_order(page)) - 1;
  5378. continue;
  5379. }
  5380. /*
  5381. * The HWPoisoned page may be not in buddy system, and
  5382. * page_count() is not 0.
  5383. */
  5384. if (skip_hwpoisoned_pages && PageHWPoison(page))
  5385. continue;
  5386. if (!PageLRU(page))
  5387. found++;
  5388. /*
  5389. * If there are RECLAIMABLE pages, we need to check it.
  5390. * But now, memory offline itself doesn't call shrink_slab()
  5391. * and it still to be fixed.
  5392. */
  5393. /*
  5394. * If the page is not RAM, page_count()should be 0.
  5395. * we don't need more check. This is an _used_ not-movable page.
  5396. *
  5397. * The problematic thing here is PG_reserved pages. PG_reserved
  5398. * is set to both of a memory hole page and a _used_ kernel
  5399. * page at boot.
  5400. */
  5401. if (found > count)
  5402. return true;
  5403. }
  5404. return false;
  5405. }
  5406. bool is_pageblock_removable_nolock(struct page *page)
  5407. {
  5408. struct zone *zone;
  5409. unsigned long pfn;
  5410. /*
  5411. * We have to be careful here because we are iterating over memory
  5412. * sections which are not zone aware so we might end up outside of
  5413. * the zone but still within the section.
  5414. * We have to take care about the node as well. If the node is offline
  5415. * its NODE_DATA will be NULL - see page_zone.
  5416. */
  5417. if (!node_online(page_to_nid(page)))
  5418. return false;
  5419. zone = page_zone(page);
  5420. pfn = page_to_pfn(page);
  5421. if (!zone_spans_pfn(zone, pfn))
  5422. return false;
  5423. return !has_unmovable_pages(zone, page, 0, true);
  5424. }
  5425. #ifdef CONFIG_CMA
  5426. static unsigned long pfn_max_align_down(unsigned long pfn)
  5427. {
  5428. return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
  5429. pageblock_nr_pages) - 1);
  5430. }
  5431. static unsigned long pfn_max_align_up(unsigned long pfn)
  5432. {
  5433. return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
  5434. pageblock_nr_pages));
  5435. }
  5436. /* [start, end) must belong to a single zone. */
  5437. static int __alloc_contig_migrate_range(struct compact_control *cc,
  5438. unsigned long start, unsigned long end)
  5439. {
  5440. /* This function is based on compact_zone() from compaction.c. */
  5441. unsigned long nr_reclaimed;
  5442. unsigned long pfn = start;
  5443. unsigned int tries = 0;
  5444. int ret = 0;
  5445. migrate_prep();
  5446. while (pfn < end || !list_empty(&cc->migratepages)) {
  5447. if (fatal_signal_pending(current)) {
  5448. ret = -EINTR;
  5449. break;
  5450. }
  5451. if (list_empty(&cc->migratepages)) {
  5452. cc->nr_migratepages = 0;
  5453. pfn = isolate_migratepages_range(cc, pfn, end);
  5454. if (!pfn) {
  5455. ret = -EINTR;
  5456. break;
  5457. }
  5458. tries = 0;
  5459. } else if (++tries == 5) {
  5460. ret = ret < 0 ? ret : -EBUSY;
  5461. break;
  5462. }
  5463. nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
  5464. &cc->migratepages);
  5465. cc->nr_migratepages -= nr_reclaimed;
  5466. ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
  5467. NULL, 0, cc->mode, MR_CMA);
  5468. }
  5469. if (ret < 0) {
  5470. putback_movable_pages(&cc->migratepages);
  5471. return ret;
  5472. }
  5473. return 0;
  5474. }
  5475. /**
  5476. * alloc_contig_range() -- tries to allocate given range of pages
  5477. * @start: start PFN to allocate
  5478. * @end: one-past-the-last PFN to allocate
  5479. * @migratetype: migratetype of the underlaying pageblocks (either
  5480. * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
  5481. * in range must have the same migratetype and it must
  5482. * be either of the two.
  5483. *
  5484. * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
  5485. * aligned, however it's the caller's responsibility to guarantee that
  5486. * we are the only thread that changes migrate type of pageblocks the
  5487. * pages fall in.
  5488. *
  5489. * The PFN range must belong to a single zone.
  5490. *
  5491. * Returns zero on success or negative error code. On success all
  5492. * pages which PFN is in [start, end) are allocated for the caller and
  5493. * need to be freed with free_contig_range().
  5494. */
  5495. int alloc_contig_range(unsigned long start, unsigned long end,
  5496. unsigned migratetype)
  5497. {
  5498. unsigned long outer_start, outer_end;
  5499. int ret = 0, order;
  5500. struct compact_control cc = {
  5501. .nr_migratepages = 0,
  5502. .order = -1,
  5503. .zone = page_zone(pfn_to_page(start)),
  5504. .mode = MIGRATE_SYNC,
  5505. .ignore_skip_hint = true,
  5506. };
  5507. INIT_LIST_HEAD(&cc.migratepages);
  5508. /*
  5509. * What we do here is we mark all pageblocks in range as
  5510. * MIGRATE_ISOLATE. Because pageblock and max order pages may
  5511. * have different sizes, and due to the way page allocator
  5512. * work, we align the range to biggest of the two pages so
  5513. * that page allocator won't try to merge buddies from
  5514. * different pageblocks and change MIGRATE_ISOLATE to some
  5515. * other migration type.
  5516. *
  5517. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  5518. * migrate the pages from an unaligned range (ie. pages that
  5519. * we are interested in). This will put all the pages in
  5520. * range back to page allocator as MIGRATE_ISOLATE.
  5521. *
  5522. * When this is done, we take the pages in range from page
  5523. * allocator removing them from the buddy system. This way
  5524. * page allocator will never consider using them.
  5525. *
  5526. * This lets us mark the pageblocks back as
  5527. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  5528. * aligned range but not in the unaligned, original range are
  5529. * put back to page allocator so that buddy can use them.
  5530. */
  5531. ret = start_isolate_page_range(pfn_max_align_down(start),
  5532. pfn_max_align_up(end), migratetype,
  5533. false);
  5534. if (ret)
  5535. return ret;
  5536. ret = __alloc_contig_migrate_range(&cc, start, end);
  5537. if (ret)
  5538. goto done;
  5539. /*
  5540. * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
  5541. * aligned blocks that are marked as MIGRATE_ISOLATE. What's
  5542. * more, all pages in [start, end) are free in page allocator.
  5543. * What we are going to do is to allocate all pages from
  5544. * [start, end) (that is remove them from page allocator).
  5545. *
  5546. * The only problem is that pages at the beginning and at the
  5547. * end of interesting range may be not aligned with pages that
  5548. * page allocator holds, ie. they can be part of higher order
  5549. * pages. Because of this, we reserve the bigger range and
  5550. * once this is done free the pages we are not interested in.
  5551. *
  5552. * We don't have to hold zone->lock here because the pages are
  5553. * isolated thus they won't get removed from buddy.
  5554. */
  5555. lru_add_drain_all();
  5556. drain_all_pages();
  5557. order = 0;
  5558. outer_start = start;
  5559. while (!PageBuddy(pfn_to_page(outer_start))) {
  5560. if (++order >= MAX_ORDER) {
  5561. ret = -EBUSY;
  5562. goto done;
  5563. }
  5564. outer_start &= ~0UL << order;
  5565. }
  5566. /* Make sure the range is really isolated. */
  5567. if (test_pages_isolated(outer_start, end, false)) {
  5568. pr_info("%s: [%lx, %lx) PFNs busy\n",
  5569. __func__, outer_start, end);
  5570. ret = -EBUSY;
  5571. goto done;
  5572. }
  5573. /* Grab isolated pages from freelists. */
  5574. outer_end = isolate_freepages_range(&cc, outer_start, end);
  5575. if (!outer_end) {
  5576. ret = -EBUSY;
  5577. goto done;
  5578. }
  5579. /* Free head and tail (if any) */
  5580. if (start != outer_start)
  5581. free_contig_range(outer_start, start - outer_start);
  5582. if (end != outer_end)
  5583. free_contig_range(end, outer_end - end);
  5584. done:
  5585. undo_isolate_page_range(pfn_max_align_down(start),
  5586. pfn_max_align_up(end), migratetype);
  5587. return ret;
  5588. }
  5589. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  5590. {
  5591. unsigned int count = 0;
  5592. for (; nr_pages--; pfn++) {
  5593. struct page *page = pfn_to_page(pfn);
  5594. count += page_count(page) != 1;
  5595. __free_page(page);
  5596. }
  5597. WARN(count != 0, "%d pages are still in use!\n", count);
  5598. }
  5599. #endif
  5600. #ifdef CONFIG_MEMORY_HOTPLUG
  5601. /*
  5602. * The zone indicated has a new number of managed_pages; batch sizes and percpu
  5603. * page high values need to be recalulated.
  5604. */
  5605. void __meminit zone_pcp_update(struct zone *zone)
  5606. {
  5607. unsigned cpu;
  5608. mutex_lock(&pcp_batch_high_lock);
  5609. for_each_possible_cpu(cpu)
  5610. pageset_set_high_and_batch(zone,
  5611. per_cpu_ptr(zone->pageset, cpu));
  5612. mutex_unlock(&pcp_batch_high_lock);
  5613. }
  5614. #endif
  5615. void zone_pcp_reset(struct zone *zone)
  5616. {
  5617. unsigned long flags;
  5618. int cpu;
  5619. struct per_cpu_pageset *pset;
  5620. /* avoid races with drain_pages() */
  5621. local_irq_save(flags);
  5622. if (zone->pageset != &boot_pageset) {
  5623. for_each_online_cpu(cpu) {
  5624. pset = per_cpu_ptr(zone->pageset, cpu);
  5625. drain_zonestat(zone, pset);
  5626. }
  5627. free_percpu(zone->pageset);
  5628. zone->pageset = &boot_pageset;
  5629. }
  5630. local_irq_restore(flags);
  5631. }
  5632. #ifdef CONFIG_MEMORY_HOTREMOVE
  5633. /*
  5634. * All pages in the range must be isolated before calling this.
  5635. */
  5636. void
  5637. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  5638. {
  5639. struct page *page;
  5640. struct zone *zone;
  5641. unsigned int order, i;
  5642. unsigned long pfn;
  5643. unsigned long flags;
  5644. /* find the first valid pfn */
  5645. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  5646. if (pfn_valid(pfn))
  5647. break;
  5648. if (pfn == end_pfn)
  5649. return;
  5650. zone = page_zone(pfn_to_page(pfn));
  5651. spin_lock_irqsave(&zone->lock, flags);
  5652. pfn = start_pfn;
  5653. while (pfn < end_pfn) {
  5654. if (!pfn_valid(pfn)) {
  5655. pfn++;
  5656. continue;
  5657. }
  5658. page = pfn_to_page(pfn);
  5659. /*
  5660. * The HWPoisoned page may be not in buddy system, and
  5661. * page_count() is not 0.
  5662. */
  5663. if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
  5664. pfn++;
  5665. SetPageReserved(page);
  5666. continue;
  5667. }
  5668. BUG_ON(page_count(page));
  5669. BUG_ON(!PageBuddy(page));
  5670. order = page_order(page);
  5671. #ifdef CONFIG_DEBUG_VM
  5672. printk(KERN_INFO "remove from free list %lx %d %lx\n",
  5673. pfn, 1 << order, end_pfn);
  5674. #endif
  5675. list_del(&page->lru);
  5676. rmv_page_order(page);
  5677. zone->free_area[order].nr_free--;
  5678. for (i = 0; i < (1 << order); i++)
  5679. SetPageReserved((page+i));
  5680. pfn += (1 << order);
  5681. }
  5682. spin_unlock_irqrestore(&zone->lock, flags);
  5683. }
  5684. #endif
  5685. #ifdef CONFIG_MEMORY_FAILURE
  5686. bool is_free_buddy_page(struct page *page)
  5687. {
  5688. struct zone *zone = page_zone(page);
  5689. unsigned long pfn = page_to_pfn(page);
  5690. unsigned long flags;
  5691. unsigned int order;
  5692. spin_lock_irqsave(&zone->lock, flags);
  5693. for (order = 0; order < MAX_ORDER; order++) {
  5694. struct page *page_head = page - (pfn & ((1 << order) - 1));
  5695. if (PageBuddy(page_head) && page_order(page_head) >= order)
  5696. break;
  5697. }
  5698. spin_unlock_irqrestore(&zone->lock, flags);
  5699. return order < MAX_ORDER;
  5700. }
  5701. #endif