page_alloc.c 197 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kmemcheck.h>
  27. #include <linux/kasan.h>
  28. #include <linux/module.h>
  29. #include <linux/suspend.h>
  30. #include <linux/pagevec.h>
  31. #include <linux/blkdev.h>
  32. #include <linux/slab.h>
  33. #include <linux/ratelimit.h>
  34. #include <linux/oom.h>
  35. #include <linux/notifier.h>
  36. #include <linux/topology.h>
  37. #include <linux/sysctl.h>
  38. #include <linux/cpu.h>
  39. #include <linux/cpuset.h>
  40. #include <linux/memory_hotplug.h>
  41. #include <linux/nodemask.h>
  42. #include <linux/vmalloc.h>
  43. #include <linux/vmstat.h>
  44. #include <linux/mempolicy.h>
  45. #include <linux/memremap.h>
  46. #include <linux/stop_machine.h>
  47. #include <linux/sort.h>
  48. #include <linux/pfn.h>
  49. #include <linux/backing-dev.h>
  50. #include <linux/fault-inject.h>
  51. #include <linux/page-isolation.h>
  52. #include <linux/page_ext.h>
  53. #include <linux/debugobjects.h>
  54. #include <linux/kmemleak.h>
  55. #include <linux/compaction.h>
  56. #include <trace/events/kmem.h>
  57. #include <linux/prefetch.h>
  58. #include <linux/mm_inline.h>
  59. #include <linux/migrate.h>
  60. #include <linux/page_ext.h>
  61. #include <linux/hugetlb.h>
  62. #include <linux/sched/rt.h>
  63. #include <linux/page_owner.h>
  64. #include <linux/kthread.h>
  65. #include <asm/sections.h>
  66. #include <asm/tlbflush.h>
  67. #include <asm/div64.h>
  68. #include "internal.h"
  69. /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  70. static DEFINE_MUTEX(pcp_batch_high_lock);
  71. #define MIN_PERCPU_PAGELIST_FRACTION (8)
  72. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  73. DEFINE_PER_CPU(int, numa_node);
  74. EXPORT_PER_CPU_SYMBOL(numa_node);
  75. #endif
  76. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  77. /*
  78. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  79. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  80. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  81. * defined in <linux/topology.h>.
  82. */
  83. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  84. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  85. int _node_numa_mem_[MAX_NUMNODES];
  86. #endif
  87. /*
  88. * Array of node states.
  89. */
  90. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  91. [N_POSSIBLE] = NODE_MASK_ALL,
  92. [N_ONLINE] = { { [0] = 1UL } },
  93. #ifndef CONFIG_NUMA
  94. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  95. #ifdef CONFIG_HIGHMEM
  96. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  97. #endif
  98. #ifdef CONFIG_MOVABLE_NODE
  99. [N_MEMORY] = { { [0] = 1UL } },
  100. #endif
  101. [N_CPU] = { { [0] = 1UL } },
  102. #endif /* NUMA */
  103. };
  104. EXPORT_SYMBOL(node_states);
  105. /* Protect totalram_pages and zone->managed_pages */
  106. static DEFINE_SPINLOCK(managed_page_count_lock);
  107. unsigned long totalram_pages __read_mostly;
  108. unsigned long totalreserve_pages __read_mostly;
  109. unsigned long totalcma_pages __read_mostly;
  110. int percpu_pagelist_fraction;
  111. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  112. /*
  113. * A cached value of the page's pageblock's migratetype, used when the page is
  114. * put on a pcplist. Used to avoid the pageblock migratetype lookup when
  115. * freeing from pcplists in most cases, at the cost of possibly becoming stale.
  116. * Also the migratetype set in the page does not necessarily match the pcplist
  117. * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
  118. * other index - this ensures that it will be put on the correct CMA freelist.
  119. */
  120. static inline int get_pcppage_migratetype(struct page *page)
  121. {
  122. return page->index;
  123. }
  124. static inline void set_pcppage_migratetype(struct page *page, int migratetype)
  125. {
  126. page->index = migratetype;
  127. }
  128. #ifdef CONFIG_PM_SLEEP
  129. /*
  130. * The following functions are used by the suspend/hibernate code to temporarily
  131. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  132. * while devices are suspended. To avoid races with the suspend/hibernate code,
  133. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  134. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  135. * guaranteed not to run in parallel with that modification).
  136. */
  137. static gfp_t saved_gfp_mask;
  138. void pm_restore_gfp_mask(void)
  139. {
  140. WARN_ON(!mutex_is_locked(&pm_mutex));
  141. if (saved_gfp_mask) {
  142. gfp_allowed_mask = saved_gfp_mask;
  143. saved_gfp_mask = 0;
  144. }
  145. }
  146. void pm_restrict_gfp_mask(void)
  147. {
  148. WARN_ON(!mutex_is_locked(&pm_mutex));
  149. WARN_ON(saved_gfp_mask);
  150. saved_gfp_mask = gfp_allowed_mask;
  151. gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
  152. }
  153. bool pm_suspended_storage(void)
  154. {
  155. if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
  156. return false;
  157. return true;
  158. }
  159. #endif /* CONFIG_PM_SLEEP */
  160. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  161. unsigned int pageblock_order __read_mostly;
  162. #endif
  163. static void __free_pages_ok(struct page *page, unsigned int order);
  164. /*
  165. * results with 256, 32 in the lowmem_reserve sysctl:
  166. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  167. * 1G machine -> (16M dma, 784M normal, 224M high)
  168. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  169. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  170. * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
  171. *
  172. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  173. * don't need any ZONE_NORMAL reservation
  174. */
  175. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1] = {
  176. #ifdef CONFIG_ZONE_DMA
  177. 256,
  178. #endif
  179. #ifdef CONFIG_ZONE_DMA32
  180. 256,
  181. #endif
  182. #ifdef CONFIG_HIGHMEM
  183. 32,
  184. #endif
  185. 32,
  186. };
  187. EXPORT_SYMBOL(totalram_pages);
  188. static char * const zone_names[MAX_NR_ZONES] = {
  189. #ifdef CONFIG_ZONE_DMA
  190. "DMA",
  191. #endif
  192. #ifdef CONFIG_ZONE_DMA32
  193. "DMA32",
  194. #endif
  195. "Normal",
  196. #ifdef CONFIG_HIGHMEM
  197. "HighMem",
  198. #endif
  199. "Movable",
  200. #ifdef CONFIG_ZONE_DEVICE
  201. "Device",
  202. #endif
  203. };
  204. char * const migratetype_names[MIGRATE_TYPES] = {
  205. "Unmovable",
  206. "Movable",
  207. "Reclaimable",
  208. "HighAtomic",
  209. #ifdef CONFIG_CMA
  210. "CMA",
  211. #endif
  212. #ifdef CONFIG_MEMORY_ISOLATION
  213. "Isolate",
  214. #endif
  215. };
  216. compound_page_dtor * const compound_page_dtors[] = {
  217. NULL,
  218. free_compound_page,
  219. #ifdef CONFIG_HUGETLB_PAGE
  220. free_huge_page,
  221. #endif
  222. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  223. free_transhuge_page,
  224. #endif
  225. };
  226. int min_free_kbytes = 1024;
  227. int user_min_free_kbytes = -1;
  228. int watermark_scale_factor = 10;
  229. static unsigned long __meminitdata nr_kernel_pages;
  230. static unsigned long __meminitdata nr_all_pages;
  231. static unsigned long __meminitdata dma_reserve;
  232. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  233. static unsigned long __meminitdata arch_zone_lowest_possible_pfn[MAX_NR_ZONES];
  234. static unsigned long __meminitdata arch_zone_highest_possible_pfn[MAX_NR_ZONES];
  235. static unsigned long __initdata required_kernelcore;
  236. static unsigned long __initdata required_movablecore;
  237. static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES];
  238. static bool mirrored_kernelcore;
  239. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  240. int movable_zone;
  241. EXPORT_SYMBOL(movable_zone);
  242. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  243. #if MAX_NUMNODES > 1
  244. int nr_node_ids __read_mostly = MAX_NUMNODES;
  245. int nr_online_nodes __read_mostly = 1;
  246. EXPORT_SYMBOL(nr_node_ids);
  247. EXPORT_SYMBOL(nr_online_nodes);
  248. #endif
  249. int page_group_by_mobility_disabled __read_mostly;
  250. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  251. static inline void reset_deferred_meminit(pg_data_t *pgdat)
  252. {
  253. pgdat->first_deferred_pfn = ULONG_MAX;
  254. }
  255. /* Returns true if the struct page for the pfn is uninitialised */
  256. static inline bool __meminit early_page_uninitialised(unsigned long pfn)
  257. {
  258. if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
  259. return true;
  260. return false;
  261. }
  262. static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
  263. {
  264. if (pfn >= NODE_DATA(nid)->first_deferred_pfn)
  265. return true;
  266. return false;
  267. }
  268. /*
  269. * Returns false when the remaining initialisation should be deferred until
  270. * later in the boot cycle when it can be parallelised.
  271. */
  272. static inline bool update_defer_init(pg_data_t *pgdat,
  273. unsigned long pfn, unsigned long zone_end,
  274. unsigned long *nr_initialised)
  275. {
  276. /* Always populate low zones for address-contrained allocations */
  277. if (zone_end < pgdat_end_pfn(pgdat))
  278. return true;
  279. /* Initialise at least 2G of the highest zone */
  280. (*nr_initialised)++;
  281. if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
  282. (pfn & (PAGES_PER_SECTION - 1)) == 0) {
  283. pgdat->first_deferred_pfn = pfn;
  284. return false;
  285. }
  286. return true;
  287. }
  288. #else
  289. static inline void reset_deferred_meminit(pg_data_t *pgdat)
  290. {
  291. }
  292. static inline bool early_page_uninitialised(unsigned long pfn)
  293. {
  294. return false;
  295. }
  296. static inline bool early_page_nid_uninitialised(unsigned long pfn, int nid)
  297. {
  298. return false;
  299. }
  300. static inline bool update_defer_init(pg_data_t *pgdat,
  301. unsigned long pfn, unsigned long zone_end,
  302. unsigned long *nr_initialised)
  303. {
  304. return true;
  305. }
  306. #endif
  307. void set_pageblock_migratetype(struct page *page, int migratetype)
  308. {
  309. if (unlikely(page_group_by_mobility_disabled &&
  310. migratetype < MIGRATE_PCPTYPES))
  311. migratetype = MIGRATE_UNMOVABLE;
  312. set_pageblock_flags_group(page, (unsigned long)migratetype,
  313. PB_migrate, PB_migrate_end);
  314. }
  315. #ifdef CONFIG_DEBUG_VM
  316. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  317. {
  318. int ret = 0;
  319. unsigned seq;
  320. unsigned long pfn = page_to_pfn(page);
  321. unsigned long sp, start_pfn;
  322. do {
  323. seq = zone_span_seqbegin(zone);
  324. start_pfn = zone->zone_start_pfn;
  325. sp = zone->spanned_pages;
  326. if (!zone_spans_pfn(zone, pfn))
  327. ret = 1;
  328. } while (zone_span_seqretry(zone, seq));
  329. if (ret)
  330. pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
  331. pfn, zone_to_nid(zone), zone->name,
  332. start_pfn, start_pfn + sp);
  333. return ret;
  334. }
  335. static int page_is_consistent(struct zone *zone, struct page *page)
  336. {
  337. if (!pfn_valid_within(page_to_pfn(page)))
  338. return 0;
  339. if (zone != page_zone(page))
  340. return 0;
  341. return 1;
  342. }
  343. /*
  344. * Temporary debugging check for pages not lying within a given zone.
  345. */
  346. static int bad_range(struct zone *zone, struct page *page)
  347. {
  348. if (page_outside_zone_boundaries(zone, page))
  349. return 1;
  350. if (!page_is_consistent(zone, page))
  351. return 1;
  352. return 0;
  353. }
  354. #else
  355. static inline int bad_range(struct zone *zone, struct page *page)
  356. {
  357. return 0;
  358. }
  359. #endif
  360. static void bad_page(struct page *page, const char *reason,
  361. unsigned long bad_flags)
  362. {
  363. static unsigned long resume;
  364. static unsigned long nr_shown;
  365. static unsigned long nr_unshown;
  366. /* Don't complain about poisoned pages */
  367. if (PageHWPoison(page)) {
  368. page_mapcount_reset(page); /* remove PageBuddy */
  369. return;
  370. }
  371. /*
  372. * Allow a burst of 60 reports, then keep quiet for that minute;
  373. * or allow a steady drip of one report per second.
  374. */
  375. if (nr_shown == 60) {
  376. if (time_before(jiffies, resume)) {
  377. nr_unshown++;
  378. goto out;
  379. }
  380. if (nr_unshown) {
  381. pr_alert(
  382. "BUG: Bad page state: %lu messages suppressed\n",
  383. nr_unshown);
  384. nr_unshown = 0;
  385. }
  386. nr_shown = 0;
  387. }
  388. if (nr_shown++ == 0)
  389. resume = jiffies + 60 * HZ;
  390. pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
  391. current->comm, page_to_pfn(page));
  392. __dump_page(page, reason);
  393. bad_flags &= page->flags;
  394. if (bad_flags)
  395. pr_alert("bad because of flags: %#lx(%pGp)\n",
  396. bad_flags, &bad_flags);
  397. dump_page_owner(page);
  398. print_modules();
  399. dump_stack();
  400. out:
  401. /* Leave bad fields for debug, except PageBuddy could make trouble */
  402. page_mapcount_reset(page); /* remove PageBuddy */
  403. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  404. }
  405. /*
  406. * Higher-order pages are called "compound pages". They are structured thusly:
  407. *
  408. * The first PAGE_SIZE page is called the "head page" and have PG_head set.
  409. *
  410. * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
  411. * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
  412. *
  413. * The first tail page's ->compound_dtor holds the offset in array of compound
  414. * page destructors. See compound_page_dtors.
  415. *
  416. * The first tail page's ->compound_order holds the order of allocation.
  417. * This usage means that zero-order pages may not be compound.
  418. */
  419. void free_compound_page(struct page *page)
  420. {
  421. __free_pages_ok(page, compound_order(page));
  422. }
  423. void prep_compound_page(struct page *page, unsigned int order)
  424. {
  425. int i;
  426. int nr_pages = 1 << order;
  427. set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
  428. set_compound_order(page, order);
  429. __SetPageHead(page);
  430. for (i = 1; i < nr_pages; i++) {
  431. struct page *p = page + i;
  432. set_page_count(p, 0);
  433. p->mapping = TAIL_MAPPING;
  434. set_compound_head(p, page);
  435. }
  436. atomic_set(compound_mapcount_ptr(page), -1);
  437. }
  438. #ifdef CONFIG_DEBUG_PAGEALLOC
  439. unsigned int _debug_guardpage_minorder;
  440. bool _debug_pagealloc_enabled __read_mostly
  441. = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
  442. EXPORT_SYMBOL(_debug_pagealloc_enabled);
  443. bool _debug_guardpage_enabled __read_mostly;
  444. static int __init early_debug_pagealloc(char *buf)
  445. {
  446. if (!buf)
  447. return -EINVAL;
  448. if (strcmp(buf, "on") == 0)
  449. _debug_pagealloc_enabled = true;
  450. if (strcmp(buf, "off") == 0)
  451. _debug_pagealloc_enabled = false;
  452. return 0;
  453. }
  454. early_param("debug_pagealloc", early_debug_pagealloc);
  455. static bool need_debug_guardpage(void)
  456. {
  457. /* If we don't use debug_pagealloc, we don't need guard page */
  458. if (!debug_pagealloc_enabled())
  459. return false;
  460. return true;
  461. }
  462. static void init_debug_guardpage(void)
  463. {
  464. if (!debug_pagealloc_enabled())
  465. return;
  466. _debug_guardpage_enabled = true;
  467. }
  468. struct page_ext_operations debug_guardpage_ops = {
  469. .need = need_debug_guardpage,
  470. .init = init_debug_guardpage,
  471. };
  472. static int __init debug_guardpage_minorder_setup(char *buf)
  473. {
  474. unsigned long res;
  475. if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
  476. pr_err("Bad debug_guardpage_minorder value\n");
  477. return 0;
  478. }
  479. _debug_guardpage_minorder = res;
  480. pr_info("Setting debug_guardpage_minorder to %lu\n", res);
  481. return 0;
  482. }
  483. __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
  484. static inline void set_page_guard(struct zone *zone, struct page *page,
  485. unsigned int order, int migratetype)
  486. {
  487. struct page_ext *page_ext;
  488. if (!debug_guardpage_enabled())
  489. return;
  490. page_ext = lookup_page_ext(page);
  491. __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  492. INIT_LIST_HEAD(&page->lru);
  493. set_page_private(page, order);
  494. /* Guard pages are not available for any usage */
  495. __mod_zone_freepage_state(zone, -(1 << order), migratetype);
  496. }
  497. static inline void clear_page_guard(struct zone *zone, struct page *page,
  498. unsigned int order, int migratetype)
  499. {
  500. struct page_ext *page_ext;
  501. if (!debug_guardpage_enabled())
  502. return;
  503. page_ext = lookup_page_ext(page);
  504. __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  505. set_page_private(page, 0);
  506. if (!is_migrate_isolate(migratetype))
  507. __mod_zone_freepage_state(zone, (1 << order), migratetype);
  508. }
  509. #else
  510. struct page_ext_operations debug_guardpage_ops = { NULL, };
  511. static inline void set_page_guard(struct zone *zone, struct page *page,
  512. unsigned int order, int migratetype) {}
  513. static inline void clear_page_guard(struct zone *zone, struct page *page,
  514. unsigned int order, int migratetype) {}
  515. #endif
  516. static inline void set_page_order(struct page *page, unsigned int order)
  517. {
  518. set_page_private(page, order);
  519. __SetPageBuddy(page);
  520. }
  521. static inline void rmv_page_order(struct page *page)
  522. {
  523. __ClearPageBuddy(page);
  524. set_page_private(page, 0);
  525. }
  526. /*
  527. * This function checks whether a page is free && is the buddy
  528. * we can do coalesce a page and its buddy if
  529. * (a) the buddy is not in a hole &&
  530. * (b) the buddy is in the buddy system &&
  531. * (c) a page and its buddy have the same order &&
  532. * (d) a page and its buddy are in the same zone.
  533. *
  534. * For recording whether a page is in the buddy system, we set ->_mapcount
  535. * PAGE_BUDDY_MAPCOUNT_VALUE.
  536. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
  537. * serialized by zone->lock.
  538. *
  539. * For recording page's order, we use page_private(page).
  540. */
  541. static inline int page_is_buddy(struct page *page, struct page *buddy,
  542. unsigned int order)
  543. {
  544. if (!pfn_valid_within(page_to_pfn(buddy)))
  545. return 0;
  546. if (page_is_guard(buddy) && page_order(buddy) == order) {
  547. if (page_zone_id(page) != page_zone_id(buddy))
  548. return 0;
  549. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  550. return 1;
  551. }
  552. if (PageBuddy(buddy) && page_order(buddy) == order) {
  553. /*
  554. * zone check is done late to avoid uselessly
  555. * calculating zone/node ids for pages that could
  556. * never merge.
  557. */
  558. if (page_zone_id(page) != page_zone_id(buddy))
  559. return 0;
  560. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  561. return 1;
  562. }
  563. return 0;
  564. }
  565. /*
  566. * Freeing function for a buddy system allocator.
  567. *
  568. * The concept of a buddy system is to maintain direct-mapped table
  569. * (containing bit values) for memory blocks of various "orders".
  570. * The bottom level table contains the map for the smallest allocatable
  571. * units of memory (here, pages), and each level above it describes
  572. * pairs of units from the levels below, hence, "buddies".
  573. * At a high level, all that happens here is marking the table entry
  574. * at the bottom level available, and propagating the changes upward
  575. * as necessary, plus some accounting needed to play nicely with other
  576. * parts of the VM system.
  577. * At each level, we keep a list of pages, which are heads of continuous
  578. * free pages of length of (1 << order) and marked with _mapcount
  579. * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
  580. * field.
  581. * So when we are allocating or freeing one, we can derive the state of the
  582. * other. That is, if we allocate a small block, and both were
  583. * free, the remainder of the region must be split into blocks.
  584. * If a block is freed, and its buddy is also free, then this
  585. * triggers coalescing into a block of larger size.
  586. *
  587. * -- nyc
  588. */
  589. static inline void __free_one_page(struct page *page,
  590. unsigned long pfn,
  591. struct zone *zone, unsigned int order,
  592. int migratetype)
  593. {
  594. unsigned long page_idx;
  595. unsigned long combined_idx;
  596. unsigned long uninitialized_var(buddy_idx);
  597. struct page *buddy;
  598. unsigned int max_order = MAX_ORDER;
  599. VM_BUG_ON(!zone_is_initialized(zone));
  600. VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
  601. VM_BUG_ON(migratetype == -1);
  602. if (is_migrate_isolate(migratetype)) {
  603. /*
  604. * We restrict max order of merging to prevent merge
  605. * between freepages on isolate pageblock and normal
  606. * pageblock. Without this, pageblock isolation
  607. * could cause incorrect freepage accounting.
  608. */
  609. max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
  610. } else {
  611. __mod_zone_freepage_state(zone, 1 << order, migratetype);
  612. }
  613. page_idx = pfn & ((1 << max_order) - 1);
  614. VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
  615. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  616. while (order < max_order - 1) {
  617. buddy_idx = __find_buddy_index(page_idx, order);
  618. buddy = page + (buddy_idx - page_idx);
  619. if (!page_is_buddy(page, buddy, order))
  620. break;
  621. /*
  622. * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  623. * merge with it and move up one order.
  624. */
  625. if (page_is_guard(buddy)) {
  626. clear_page_guard(zone, buddy, order, migratetype);
  627. } else {
  628. list_del(&buddy->lru);
  629. zone->free_area[order].nr_free--;
  630. rmv_page_order(buddy);
  631. }
  632. combined_idx = buddy_idx & page_idx;
  633. page = page + (combined_idx - page_idx);
  634. page_idx = combined_idx;
  635. order++;
  636. }
  637. set_page_order(page, order);
  638. /*
  639. * If this is not the largest possible page, check if the buddy
  640. * of the next-highest order is free. If it is, it's possible
  641. * that pages are being freed that will coalesce soon. In case,
  642. * that is happening, add the free page to the tail of the list
  643. * so it's less likely to be used soon and more likely to be merged
  644. * as a higher order page
  645. */
  646. if ((order < MAX_ORDER-2) && pfn_valid_within(page_to_pfn(buddy))) {
  647. struct page *higher_page, *higher_buddy;
  648. combined_idx = buddy_idx & page_idx;
  649. higher_page = page + (combined_idx - page_idx);
  650. buddy_idx = __find_buddy_index(combined_idx, order + 1);
  651. higher_buddy = higher_page + (buddy_idx - combined_idx);
  652. if (page_is_buddy(higher_page, higher_buddy, order + 1)) {
  653. list_add_tail(&page->lru,
  654. &zone->free_area[order].free_list[migratetype]);
  655. goto out;
  656. }
  657. }
  658. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  659. out:
  660. zone->free_area[order].nr_free++;
  661. }
  662. static inline int free_pages_check(struct page *page)
  663. {
  664. const char *bad_reason = NULL;
  665. unsigned long bad_flags = 0;
  666. if (unlikely(atomic_read(&page->_mapcount) != -1))
  667. bad_reason = "nonzero mapcount";
  668. if (unlikely(page->mapping != NULL))
  669. bad_reason = "non-NULL mapping";
  670. if (unlikely(page_ref_count(page) != 0))
  671. bad_reason = "nonzero _count";
  672. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
  673. bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
  674. bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
  675. }
  676. #ifdef CONFIG_MEMCG
  677. if (unlikely(page->mem_cgroup))
  678. bad_reason = "page still charged to cgroup";
  679. #endif
  680. if (unlikely(bad_reason)) {
  681. bad_page(page, bad_reason, bad_flags);
  682. return 1;
  683. }
  684. page_cpupid_reset_last(page);
  685. if (page->flags & PAGE_FLAGS_CHECK_AT_PREP)
  686. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  687. return 0;
  688. }
  689. /*
  690. * Frees a number of pages from the PCP lists
  691. * Assumes all pages on list are in same zone, and of same order.
  692. * count is the number of pages to free.
  693. *
  694. * If the zone was previously in an "all pages pinned" state then look to
  695. * see if this freeing clears that state.
  696. *
  697. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  698. * pinned" detection logic.
  699. */
  700. static void free_pcppages_bulk(struct zone *zone, int count,
  701. struct per_cpu_pages *pcp)
  702. {
  703. int migratetype = 0;
  704. int batch_free = 0;
  705. int to_free = count;
  706. unsigned long nr_scanned;
  707. spin_lock(&zone->lock);
  708. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  709. if (nr_scanned)
  710. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  711. while (to_free) {
  712. struct page *page;
  713. struct list_head *list;
  714. /*
  715. * Remove pages from lists in a round-robin fashion. A
  716. * batch_free count is maintained that is incremented when an
  717. * empty list is encountered. This is so more pages are freed
  718. * off fuller lists instead of spinning excessively around empty
  719. * lists
  720. */
  721. do {
  722. batch_free++;
  723. if (++migratetype == MIGRATE_PCPTYPES)
  724. migratetype = 0;
  725. list = &pcp->lists[migratetype];
  726. } while (list_empty(list));
  727. /* This is the only non-empty list. Free them all. */
  728. if (batch_free == MIGRATE_PCPTYPES)
  729. batch_free = to_free;
  730. do {
  731. int mt; /* migratetype of the to-be-freed page */
  732. page = list_last_entry(list, struct page, lru);
  733. /* must delete as __free_one_page list manipulates */
  734. list_del(&page->lru);
  735. mt = get_pcppage_migratetype(page);
  736. /* MIGRATE_ISOLATE page should not go to pcplists */
  737. VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
  738. /* Pageblock could have been isolated meanwhile */
  739. if (unlikely(has_isolate_pageblock(zone)))
  740. mt = get_pageblock_migratetype(page);
  741. __free_one_page(page, page_to_pfn(page), zone, 0, mt);
  742. trace_mm_page_pcpu_drain(page, 0, mt);
  743. } while (--to_free && --batch_free && !list_empty(list));
  744. }
  745. spin_unlock(&zone->lock);
  746. }
  747. static void free_one_page(struct zone *zone,
  748. struct page *page, unsigned long pfn,
  749. unsigned int order,
  750. int migratetype)
  751. {
  752. unsigned long nr_scanned;
  753. spin_lock(&zone->lock);
  754. nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED);
  755. if (nr_scanned)
  756. __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
  757. if (unlikely(has_isolate_pageblock(zone) ||
  758. is_migrate_isolate(migratetype))) {
  759. migratetype = get_pfnblock_migratetype(page, pfn);
  760. }
  761. __free_one_page(page, pfn, zone, order, migratetype);
  762. spin_unlock(&zone->lock);
  763. }
  764. static int free_tail_pages_check(struct page *head_page, struct page *page)
  765. {
  766. int ret = 1;
  767. /*
  768. * We rely page->lru.next never has bit 0 set, unless the page
  769. * is PageTail(). Let's make sure that's true even for poisoned ->lru.
  770. */
  771. BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
  772. if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
  773. ret = 0;
  774. goto out;
  775. }
  776. switch (page - head_page) {
  777. case 1:
  778. /* the first tail page: ->mapping is compound_mapcount() */
  779. if (unlikely(compound_mapcount(page))) {
  780. bad_page(page, "nonzero compound_mapcount", 0);
  781. goto out;
  782. }
  783. break;
  784. case 2:
  785. /*
  786. * the second tail page: ->mapping is
  787. * page_deferred_list().next -- ignore value.
  788. */
  789. break;
  790. default:
  791. if (page->mapping != TAIL_MAPPING) {
  792. bad_page(page, "corrupted mapping in tail page", 0);
  793. goto out;
  794. }
  795. break;
  796. }
  797. if (unlikely(!PageTail(page))) {
  798. bad_page(page, "PageTail not set", 0);
  799. goto out;
  800. }
  801. if (unlikely(compound_head(page) != head_page)) {
  802. bad_page(page, "compound_head not consistent", 0);
  803. goto out;
  804. }
  805. ret = 0;
  806. out:
  807. page->mapping = NULL;
  808. clear_compound_head(page);
  809. return ret;
  810. }
  811. static void __meminit __init_single_page(struct page *page, unsigned long pfn,
  812. unsigned long zone, int nid)
  813. {
  814. set_page_links(page, zone, nid, pfn);
  815. init_page_count(page);
  816. page_mapcount_reset(page);
  817. page_cpupid_reset_last(page);
  818. INIT_LIST_HEAD(&page->lru);
  819. #ifdef WANT_PAGE_VIRTUAL
  820. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  821. if (!is_highmem_idx(zone))
  822. set_page_address(page, __va(pfn << PAGE_SHIFT));
  823. #endif
  824. }
  825. static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
  826. int nid)
  827. {
  828. return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
  829. }
  830. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  831. static void init_reserved_page(unsigned long pfn)
  832. {
  833. pg_data_t *pgdat;
  834. int nid, zid;
  835. if (!early_page_uninitialised(pfn))
  836. return;
  837. nid = early_pfn_to_nid(pfn);
  838. pgdat = NODE_DATA(nid);
  839. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  840. struct zone *zone = &pgdat->node_zones[zid];
  841. if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
  842. break;
  843. }
  844. __init_single_pfn(pfn, zid, nid);
  845. }
  846. #else
  847. static inline void init_reserved_page(unsigned long pfn)
  848. {
  849. }
  850. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  851. /*
  852. * Initialised pages do not have PageReserved set. This function is
  853. * called for each range allocated by the bootmem allocator and
  854. * marks the pages PageReserved. The remaining valid pages are later
  855. * sent to the buddy page allocator.
  856. */
  857. void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
  858. {
  859. unsigned long start_pfn = PFN_DOWN(start);
  860. unsigned long end_pfn = PFN_UP(end);
  861. for (; start_pfn < end_pfn; start_pfn++) {
  862. if (pfn_valid(start_pfn)) {
  863. struct page *page = pfn_to_page(start_pfn);
  864. init_reserved_page(start_pfn);
  865. /* Avoid false-positive PageTail() */
  866. INIT_LIST_HEAD(&page->lru);
  867. SetPageReserved(page);
  868. }
  869. }
  870. }
  871. static bool free_pages_prepare(struct page *page, unsigned int order)
  872. {
  873. bool compound = PageCompound(page);
  874. int i, bad = 0;
  875. VM_BUG_ON_PAGE(PageTail(page), page);
  876. VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
  877. trace_mm_page_free(page, order);
  878. kmemcheck_free_shadow(page, order);
  879. kasan_free_pages(page, order);
  880. if (PageAnon(page))
  881. page->mapping = NULL;
  882. bad += free_pages_check(page);
  883. for (i = 1; i < (1 << order); i++) {
  884. if (compound)
  885. bad += free_tail_pages_check(page, page + i);
  886. bad += free_pages_check(page + i);
  887. }
  888. if (bad)
  889. return false;
  890. reset_page_owner(page, order);
  891. if (!PageHighMem(page)) {
  892. debug_check_no_locks_freed(page_address(page),
  893. PAGE_SIZE << order);
  894. debug_check_no_obj_freed(page_address(page),
  895. PAGE_SIZE << order);
  896. }
  897. arch_free_page(page, order);
  898. kernel_poison_pages(page, 1 << order, 0);
  899. kernel_map_pages(page, 1 << order, 0);
  900. return true;
  901. }
  902. static void __free_pages_ok(struct page *page, unsigned int order)
  903. {
  904. unsigned long flags;
  905. int migratetype;
  906. unsigned long pfn = page_to_pfn(page);
  907. if (!free_pages_prepare(page, order))
  908. return;
  909. migratetype = get_pfnblock_migratetype(page, pfn);
  910. local_irq_save(flags);
  911. __count_vm_events(PGFREE, 1 << order);
  912. free_one_page(page_zone(page), page, pfn, order, migratetype);
  913. local_irq_restore(flags);
  914. }
  915. static void __init __free_pages_boot_core(struct page *page,
  916. unsigned long pfn, unsigned int order)
  917. {
  918. unsigned int nr_pages = 1 << order;
  919. struct page *p = page;
  920. unsigned int loop;
  921. prefetchw(p);
  922. for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
  923. prefetchw(p + 1);
  924. __ClearPageReserved(p);
  925. set_page_count(p, 0);
  926. }
  927. __ClearPageReserved(p);
  928. set_page_count(p, 0);
  929. page_zone(page)->managed_pages += nr_pages;
  930. set_page_refcounted(page);
  931. __free_pages(page, order);
  932. }
  933. #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
  934. defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  935. static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
  936. int __meminit early_pfn_to_nid(unsigned long pfn)
  937. {
  938. static DEFINE_SPINLOCK(early_pfn_lock);
  939. int nid;
  940. spin_lock(&early_pfn_lock);
  941. nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
  942. if (nid < 0)
  943. nid = 0;
  944. spin_unlock(&early_pfn_lock);
  945. return nid;
  946. }
  947. #endif
  948. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  949. static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
  950. struct mminit_pfnnid_cache *state)
  951. {
  952. int nid;
  953. nid = __early_pfn_to_nid(pfn, state);
  954. if (nid >= 0 && nid != node)
  955. return false;
  956. return true;
  957. }
  958. /* Only safe to use early in boot when initialisation is single-threaded */
  959. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  960. {
  961. return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
  962. }
  963. #else
  964. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  965. {
  966. return true;
  967. }
  968. static inline bool __meminit meminit_pfn_in_nid(unsigned long pfn, int node,
  969. struct mminit_pfnnid_cache *state)
  970. {
  971. return true;
  972. }
  973. #endif
  974. void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
  975. unsigned int order)
  976. {
  977. if (early_page_uninitialised(pfn))
  978. return;
  979. return __free_pages_boot_core(page, pfn, order);
  980. }
  981. /*
  982. * Check that the whole (or subset of) a pageblock given by the interval of
  983. * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
  984. * with the migration of free compaction scanner. The scanners then need to
  985. * use only pfn_valid_within() check for arches that allow holes within
  986. * pageblocks.
  987. *
  988. * Return struct page pointer of start_pfn, or NULL if checks were not passed.
  989. *
  990. * It's possible on some configurations to have a setup like node0 node1 node0
  991. * i.e. it's possible that all pages within a zones range of pages do not
  992. * belong to a single zone. We assume that a border between node0 and node1
  993. * can occur within a single pageblock, but not a node0 node1 node0
  994. * interleaving within a single pageblock. It is therefore sufficient to check
  995. * the first and last page of a pageblock and avoid checking each individual
  996. * page in a pageblock.
  997. */
  998. struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
  999. unsigned long end_pfn, struct zone *zone)
  1000. {
  1001. struct page *start_page;
  1002. struct page *end_page;
  1003. /* end_pfn is one past the range we are checking */
  1004. end_pfn--;
  1005. if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
  1006. return NULL;
  1007. start_page = pfn_to_page(start_pfn);
  1008. if (page_zone(start_page) != zone)
  1009. return NULL;
  1010. end_page = pfn_to_page(end_pfn);
  1011. /* This gives a shorter code than deriving page_zone(end_page) */
  1012. if (page_zone_id(start_page) != page_zone_id(end_page))
  1013. return NULL;
  1014. return start_page;
  1015. }
  1016. void set_zone_contiguous(struct zone *zone)
  1017. {
  1018. unsigned long block_start_pfn = zone->zone_start_pfn;
  1019. unsigned long block_end_pfn;
  1020. block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
  1021. for (; block_start_pfn < zone_end_pfn(zone);
  1022. block_start_pfn = block_end_pfn,
  1023. block_end_pfn += pageblock_nr_pages) {
  1024. block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
  1025. if (!__pageblock_pfn_to_page(block_start_pfn,
  1026. block_end_pfn, zone))
  1027. return;
  1028. }
  1029. /* We confirm that there is no hole */
  1030. zone->contiguous = true;
  1031. }
  1032. void clear_zone_contiguous(struct zone *zone)
  1033. {
  1034. zone->contiguous = false;
  1035. }
  1036. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1037. static void __init deferred_free_range(struct page *page,
  1038. unsigned long pfn, int nr_pages)
  1039. {
  1040. int i;
  1041. if (!page)
  1042. return;
  1043. /* Free a large naturally-aligned chunk if possible */
  1044. if (nr_pages == MAX_ORDER_NR_PAGES &&
  1045. (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
  1046. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1047. __free_pages_boot_core(page, pfn, MAX_ORDER-1);
  1048. return;
  1049. }
  1050. for (i = 0; i < nr_pages; i++, page++, pfn++)
  1051. __free_pages_boot_core(page, pfn, 0);
  1052. }
  1053. /* Completion tracking for deferred_init_memmap() threads */
  1054. static atomic_t pgdat_init_n_undone __initdata;
  1055. static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
  1056. static inline void __init pgdat_init_report_one_done(void)
  1057. {
  1058. if (atomic_dec_and_test(&pgdat_init_n_undone))
  1059. complete(&pgdat_init_all_done_comp);
  1060. }
  1061. /* Initialise remaining memory on a node */
  1062. static int __init deferred_init_memmap(void *data)
  1063. {
  1064. pg_data_t *pgdat = data;
  1065. int nid = pgdat->node_id;
  1066. struct mminit_pfnnid_cache nid_init_state = { };
  1067. unsigned long start = jiffies;
  1068. unsigned long nr_pages = 0;
  1069. unsigned long walk_start, walk_end;
  1070. int i, zid;
  1071. struct zone *zone;
  1072. unsigned long first_init_pfn = pgdat->first_deferred_pfn;
  1073. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1074. if (first_init_pfn == ULONG_MAX) {
  1075. pgdat_init_report_one_done();
  1076. return 0;
  1077. }
  1078. /* Bind memory initialisation thread to a local node if possible */
  1079. if (!cpumask_empty(cpumask))
  1080. set_cpus_allowed_ptr(current, cpumask);
  1081. /* Sanity check boundaries */
  1082. BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
  1083. BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
  1084. pgdat->first_deferred_pfn = ULONG_MAX;
  1085. /* Only the highest zone is deferred so find it */
  1086. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1087. zone = pgdat->node_zones + zid;
  1088. if (first_init_pfn < zone_end_pfn(zone))
  1089. break;
  1090. }
  1091. for_each_mem_pfn_range(i, nid, &walk_start, &walk_end, NULL) {
  1092. unsigned long pfn, end_pfn;
  1093. struct page *page = NULL;
  1094. struct page *free_base_page = NULL;
  1095. unsigned long free_base_pfn = 0;
  1096. int nr_to_free = 0;
  1097. end_pfn = min(walk_end, zone_end_pfn(zone));
  1098. pfn = first_init_pfn;
  1099. if (pfn < walk_start)
  1100. pfn = walk_start;
  1101. if (pfn < zone->zone_start_pfn)
  1102. pfn = zone->zone_start_pfn;
  1103. for (; pfn < end_pfn; pfn++) {
  1104. if (!pfn_valid_within(pfn))
  1105. goto free_range;
  1106. /*
  1107. * Ensure pfn_valid is checked every
  1108. * MAX_ORDER_NR_PAGES for memory holes
  1109. */
  1110. if ((pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
  1111. if (!pfn_valid(pfn)) {
  1112. page = NULL;
  1113. goto free_range;
  1114. }
  1115. }
  1116. if (!meminit_pfn_in_nid(pfn, nid, &nid_init_state)) {
  1117. page = NULL;
  1118. goto free_range;
  1119. }
  1120. /* Minimise pfn page lookups and scheduler checks */
  1121. if (page && (pfn & (MAX_ORDER_NR_PAGES - 1)) != 0) {
  1122. page++;
  1123. } else {
  1124. nr_pages += nr_to_free;
  1125. deferred_free_range(free_base_page,
  1126. free_base_pfn, nr_to_free);
  1127. free_base_page = NULL;
  1128. free_base_pfn = nr_to_free = 0;
  1129. page = pfn_to_page(pfn);
  1130. cond_resched();
  1131. }
  1132. if (page->flags) {
  1133. VM_BUG_ON(page_zone(page) != zone);
  1134. goto free_range;
  1135. }
  1136. __init_single_page(page, pfn, zid, nid);
  1137. if (!free_base_page) {
  1138. free_base_page = page;
  1139. free_base_pfn = pfn;
  1140. nr_to_free = 0;
  1141. }
  1142. nr_to_free++;
  1143. /* Where possible, batch up pages for a single free */
  1144. continue;
  1145. free_range:
  1146. /* Free the current block of pages to allocator */
  1147. nr_pages += nr_to_free;
  1148. deferred_free_range(free_base_page, free_base_pfn,
  1149. nr_to_free);
  1150. free_base_page = NULL;
  1151. free_base_pfn = nr_to_free = 0;
  1152. }
  1153. first_init_pfn = max(end_pfn, first_init_pfn);
  1154. }
  1155. /* Sanity check that the next zone really is unpopulated */
  1156. WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
  1157. pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
  1158. jiffies_to_msecs(jiffies - start));
  1159. pgdat_init_report_one_done();
  1160. return 0;
  1161. }
  1162. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  1163. void __init page_alloc_init_late(void)
  1164. {
  1165. struct zone *zone;
  1166. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1167. int nid;
  1168. /* There will be num_node_state(N_MEMORY) threads */
  1169. atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
  1170. for_each_node_state(nid, N_MEMORY) {
  1171. kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
  1172. }
  1173. /* Block until all are initialised */
  1174. wait_for_completion(&pgdat_init_all_done_comp);
  1175. /* Reinit limits that are based on free pages after the kernel is up */
  1176. files_maxfiles_init();
  1177. #endif
  1178. for_each_populated_zone(zone)
  1179. set_zone_contiguous(zone);
  1180. }
  1181. #ifdef CONFIG_CMA
  1182. /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
  1183. void __init init_cma_reserved_pageblock(struct page *page)
  1184. {
  1185. unsigned i = pageblock_nr_pages;
  1186. struct page *p = page;
  1187. do {
  1188. __ClearPageReserved(p);
  1189. set_page_count(p, 0);
  1190. } while (++p, --i);
  1191. set_pageblock_migratetype(page, MIGRATE_CMA);
  1192. if (pageblock_order >= MAX_ORDER) {
  1193. i = pageblock_nr_pages;
  1194. p = page;
  1195. do {
  1196. set_page_refcounted(p);
  1197. __free_pages(p, MAX_ORDER - 1);
  1198. p += MAX_ORDER_NR_PAGES;
  1199. } while (i -= MAX_ORDER_NR_PAGES);
  1200. } else {
  1201. set_page_refcounted(page);
  1202. __free_pages(page, pageblock_order);
  1203. }
  1204. adjust_managed_page_count(page, pageblock_nr_pages);
  1205. }
  1206. #endif
  1207. /*
  1208. * The order of subdivision here is critical for the IO subsystem.
  1209. * Please do not alter this order without good reasons and regression
  1210. * testing. Specifically, as large blocks of memory are subdivided,
  1211. * the order in which smaller blocks are delivered depends on the order
  1212. * they're subdivided in this function. This is the primary factor
  1213. * influencing the order in which pages are delivered to the IO
  1214. * subsystem according to empirical testing, and this is also justified
  1215. * by considering the behavior of a buddy system containing a single
  1216. * large block of memory acted on by a series of small allocations.
  1217. * This behavior is a critical factor in sglist merging's success.
  1218. *
  1219. * -- nyc
  1220. */
  1221. static inline void expand(struct zone *zone, struct page *page,
  1222. int low, int high, struct free_area *area,
  1223. int migratetype)
  1224. {
  1225. unsigned long size = 1 << high;
  1226. while (high > low) {
  1227. area--;
  1228. high--;
  1229. size >>= 1;
  1230. VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  1231. if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
  1232. debug_guardpage_enabled() &&
  1233. high < debug_guardpage_minorder()) {
  1234. /*
  1235. * Mark as guard pages (or page), that will allow to
  1236. * merge back to allocator when buddy will be freed.
  1237. * Corresponding page table entries will not be touched,
  1238. * pages will stay not present in virtual address space
  1239. */
  1240. set_page_guard(zone, &page[size], high, migratetype);
  1241. continue;
  1242. }
  1243. list_add(&page[size].lru, &area->free_list[migratetype]);
  1244. area->nr_free++;
  1245. set_page_order(&page[size], high);
  1246. }
  1247. }
  1248. /*
  1249. * This page is about to be returned from the page allocator
  1250. */
  1251. static inline int check_new_page(struct page *page)
  1252. {
  1253. const char *bad_reason = NULL;
  1254. unsigned long bad_flags = 0;
  1255. if (unlikely(atomic_read(&page->_mapcount) != -1))
  1256. bad_reason = "nonzero mapcount";
  1257. if (unlikely(page->mapping != NULL))
  1258. bad_reason = "non-NULL mapping";
  1259. if (unlikely(page_ref_count(page) != 0))
  1260. bad_reason = "nonzero _count";
  1261. if (unlikely(page->flags & __PG_HWPOISON)) {
  1262. bad_reason = "HWPoisoned (hardware-corrupted)";
  1263. bad_flags = __PG_HWPOISON;
  1264. }
  1265. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
  1266. bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
  1267. bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
  1268. }
  1269. #ifdef CONFIG_MEMCG
  1270. if (unlikely(page->mem_cgroup))
  1271. bad_reason = "page still charged to cgroup";
  1272. #endif
  1273. if (unlikely(bad_reason)) {
  1274. bad_page(page, bad_reason, bad_flags);
  1275. return 1;
  1276. }
  1277. return 0;
  1278. }
  1279. static inline bool free_pages_prezeroed(bool poisoned)
  1280. {
  1281. return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
  1282. page_poisoning_enabled() && poisoned;
  1283. }
  1284. static int prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
  1285. int alloc_flags)
  1286. {
  1287. int i;
  1288. bool poisoned = true;
  1289. for (i = 0; i < (1 << order); i++) {
  1290. struct page *p = page + i;
  1291. if (unlikely(check_new_page(p)))
  1292. return 1;
  1293. if (poisoned)
  1294. poisoned &= page_is_poisoned(p);
  1295. }
  1296. set_page_private(page, 0);
  1297. set_page_refcounted(page);
  1298. arch_alloc_page(page, order);
  1299. kernel_map_pages(page, 1 << order, 1);
  1300. kernel_poison_pages(page, 1 << order, 1);
  1301. kasan_alloc_pages(page, order);
  1302. if (!free_pages_prezeroed(poisoned) && (gfp_flags & __GFP_ZERO))
  1303. for (i = 0; i < (1 << order); i++)
  1304. clear_highpage(page + i);
  1305. if (order && (gfp_flags & __GFP_COMP))
  1306. prep_compound_page(page, order);
  1307. set_page_owner(page, order, gfp_flags);
  1308. /*
  1309. * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
  1310. * allocate the page. The expectation is that the caller is taking
  1311. * steps that will free more memory. The caller should avoid the page
  1312. * being used for !PFMEMALLOC purposes.
  1313. */
  1314. if (alloc_flags & ALLOC_NO_WATERMARKS)
  1315. set_page_pfmemalloc(page);
  1316. else
  1317. clear_page_pfmemalloc(page);
  1318. return 0;
  1319. }
  1320. /*
  1321. * Go through the free lists for the given migratetype and remove
  1322. * the smallest available page from the freelists
  1323. */
  1324. static inline
  1325. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  1326. int migratetype)
  1327. {
  1328. unsigned int current_order;
  1329. struct free_area *area;
  1330. struct page *page;
  1331. /* Find a page of the appropriate size in the preferred list */
  1332. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  1333. area = &(zone->free_area[current_order]);
  1334. page = list_first_entry_or_null(&area->free_list[migratetype],
  1335. struct page, lru);
  1336. if (!page)
  1337. continue;
  1338. list_del(&page->lru);
  1339. rmv_page_order(page);
  1340. area->nr_free--;
  1341. expand(zone, page, order, current_order, area, migratetype);
  1342. set_pcppage_migratetype(page, migratetype);
  1343. return page;
  1344. }
  1345. return NULL;
  1346. }
  1347. /*
  1348. * This array describes the order lists are fallen back to when
  1349. * the free lists for the desirable migrate type are depleted
  1350. */
  1351. static int fallbacks[MIGRATE_TYPES][4] = {
  1352. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1353. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1354. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
  1355. #ifdef CONFIG_CMA
  1356. [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
  1357. #endif
  1358. #ifdef CONFIG_MEMORY_ISOLATION
  1359. [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
  1360. #endif
  1361. };
  1362. #ifdef CONFIG_CMA
  1363. static struct page *__rmqueue_cma_fallback(struct zone *zone,
  1364. unsigned int order)
  1365. {
  1366. return __rmqueue_smallest(zone, order, MIGRATE_CMA);
  1367. }
  1368. #else
  1369. static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  1370. unsigned int order) { return NULL; }
  1371. #endif
  1372. /*
  1373. * Move the free pages in a range to the free lists of the requested type.
  1374. * Note that start_page and end_pages are not aligned on a pageblock
  1375. * boundary. If alignment is required, use move_freepages_block()
  1376. */
  1377. int move_freepages(struct zone *zone,
  1378. struct page *start_page, struct page *end_page,
  1379. int migratetype)
  1380. {
  1381. struct page *page;
  1382. unsigned int order;
  1383. int pages_moved = 0;
  1384. #ifndef CONFIG_HOLES_IN_ZONE
  1385. /*
  1386. * page_zone is not safe to call in this context when
  1387. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  1388. * anyway as we check zone boundaries in move_freepages_block().
  1389. * Remove at a later date when no bug reports exist related to
  1390. * grouping pages by mobility
  1391. */
  1392. VM_BUG_ON(page_zone(start_page) != page_zone(end_page));
  1393. #endif
  1394. for (page = start_page; page <= end_page;) {
  1395. /* Make sure we are not inadvertently changing nodes */
  1396. VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
  1397. if (!pfn_valid_within(page_to_pfn(page))) {
  1398. page++;
  1399. continue;
  1400. }
  1401. if (!PageBuddy(page)) {
  1402. page++;
  1403. continue;
  1404. }
  1405. order = page_order(page);
  1406. list_move(&page->lru,
  1407. &zone->free_area[order].free_list[migratetype]);
  1408. page += 1 << order;
  1409. pages_moved += 1 << order;
  1410. }
  1411. return pages_moved;
  1412. }
  1413. int move_freepages_block(struct zone *zone, struct page *page,
  1414. int migratetype)
  1415. {
  1416. unsigned long start_pfn, end_pfn;
  1417. struct page *start_page, *end_page;
  1418. start_pfn = page_to_pfn(page);
  1419. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  1420. start_page = pfn_to_page(start_pfn);
  1421. end_page = start_page + pageblock_nr_pages - 1;
  1422. end_pfn = start_pfn + pageblock_nr_pages - 1;
  1423. /* Do not cross zone boundaries */
  1424. if (!zone_spans_pfn(zone, start_pfn))
  1425. start_page = page;
  1426. if (!zone_spans_pfn(zone, end_pfn))
  1427. return 0;
  1428. return move_freepages(zone, start_page, end_page, migratetype);
  1429. }
  1430. static void change_pageblock_range(struct page *pageblock_page,
  1431. int start_order, int migratetype)
  1432. {
  1433. int nr_pageblocks = 1 << (start_order - pageblock_order);
  1434. while (nr_pageblocks--) {
  1435. set_pageblock_migratetype(pageblock_page, migratetype);
  1436. pageblock_page += pageblock_nr_pages;
  1437. }
  1438. }
  1439. /*
  1440. * When we are falling back to another migratetype during allocation, try to
  1441. * steal extra free pages from the same pageblocks to satisfy further
  1442. * allocations, instead of polluting multiple pageblocks.
  1443. *
  1444. * If we are stealing a relatively large buddy page, it is likely there will
  1445. * be more free pages in the pageblock, so try to steal them all. For
  1446. * reclaimable and unmovable allocations, we steal regardless of page size,
  1447. * as fragmentation caused by those allocations polluting movable pageblocks
  1448. * is worse than movable allocations stealing from unmovable and reclaimable
  1449. * pageblocks.
  1450. */
  1451. static bool can_steal_fallback(unsigned int order, int start_mt)
  1452. {
  1453. /*
  1454. * Leaving this order check is intended, although there is
  1455. * relaxed order check in next check. The reason is that
  1456. * we can actually steal whole pageblock if this condition met,
  1457. * but, below check doesn't guarantee it and that is just heuristic
  1458. * so could be changed anytime.
  1459. */
  1460. if (order >= pageblock_order)
  1461. return true;
  1462. if (order >= pageblock_order / 2 ||
  1463. start_mt == MIGRATE_RECLAIMABLE ||
  1464. start_mt == MIGRATE_UNMOVABLE ||
  1465. page_group_by_mobility_disabled)
  1466. return true;
  1467. return false;
  1468. }
  1469. /*
  1470. * This function implements actual steal behaviour. If order is large enough,
  1471. * we can steal whole pageblock. If not, we first move freepages in this
  1472. * pageblock and check whether half of pages are moved or not. If half of
  1473. * pages are moved, we can change migratetype of pageblock and permanently
  1474. * use it's pages as requested migratetype in the future.
  1475. */
  1476. static void steal_suitable_fallback(struct zone *zone, struct page *page,
  1477. int start_type)
  1478. {
  1479. unsigned int current_order = page_order(page);
  1480. int pages;
  1481. /* Take ownership for orders >= pageblock_order */
  1482. if (current_order >= pageblock_order) {
  1483. change_pageblock_range(page, current_order, start_type);
  1484. return;
  1485. }
  1486. pages = move_freepages_block(zone, page, start_type);
  1487. /* Claim the whole block if over half of it is free */
  1488. if (pages >= (1 << (pageblock_order-1)) ||
  1489. page_group_by_mobility_disabled)
  1490. set_pageblock_migratetype(page, start_type);
  1491. }
  1492. /*
  1493. * Check whether there is a suitable fallback freepage with requested order.
  1494. * If only_stealable is true, this function returns fallback_mt only if
  1495. * we can steal other freepages all together. This would help to reduce
  1496. * fragmentation due to mixed migratetype pages in one pageblock.
  1497. */
  1498. int find_suitable_fallback(struct free_area *area, unsigned int order,
  1499. int migratetype, bool only_stealable, bool *can_steal)
  1500. {
  1501. int i;
  1502. int fallback_mt;
  1503. if (area->nr_free == 0)
  1504. return -1;
  1505. *can_steal = false;
  1506. for (i = 0;; i++) {
  1507. fallback_mt = fallbacks[migratetype][i];
  1508. if (fallback_mt == MIGRATE_TYPES)
  1509. break;
  1510. if (list_empty(&area->free_list[fallback_mt]))
  1511. continue;
  1512. if (can_steal_fallback(order, migratetype))
  1513. *can_steal = true;
  1514. if (!only_stealable)
  1515. return fallback_mt;
  1516. if (*can_steal)
  1517. return fallback_mt;
  1518. }
  1519. return -1;
  1520. }
  1521. /*
  1522. * Reserve a pageblock for exclusive use of high-order atomic allocations if
  1523. * there are no empty page blocks that contain a page with a suitable order
  1524. */
  1525. static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
  1526. unsigned int alloc_order)
  1527. {
  1528. int mt;
  1529. unsigned long max_managed, flags;
  1530. /*
  1531. * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
  1532. * Check is race-prone but harmless.
  1533. */
  1534. max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
  1535. if (zone->nr_reserved_highatomic >= max_managed)
  1536. return;
  1537. spin_lock_irqsave(&zone->lock, flags);
  1538. /* Recheck the nr_reserved_highatomic limit under the lock */
  1539. if (zone->nr_reserved_highatomic >= max_managed)
  1540. goto out_unlock;
  1541. /* Yoink! */
  1542. mt = get_pageblock_migratetype(page);
  1543. if (mt != MIGRATE_HIGHATOMIC &&
  1544. !is_migrate_isolate(mt) && !is_migrate_cma(mt)) {
  1545. zone->nr_reserved_highatomic += pageblock_nr_pages;
  1546. set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
  1547. move_freepages_block(zone, page, MIGRATE_HIGHATOMIC);
  1548. }
  1549. out_unlock:
  1550. spin_unlock_irqrestore(&zone->lock, flags);
  1551. }
  1552. /*
  1553. * Used when an allocation is about to fail under memory pressure. This
  1554. * potentially hurts the reliability of high-order allocations when under
  1555. * intense memory pressure but failed atomic allocations should be easier
  1556. * to recover from than an OOM.
  1557. */
  1558. static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
  1559. {
  1560. struct zonelist *zonelist = ac->zonelist;
  1561. unsigned long flags;
  1562. struct zoneref *z;
  1563. struct zone *zone;
  1564. struct page *page;
  1565. int order;
  1566. for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
  1567. ac->nodemask) {
  1568. /* Preserve at least one pageblock */
  1569. if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
  1570. continue;
  1571. spin_lock_irqsave(&zone->lock, flags);
  1572. for (order = 0; order < MAX_ORDER; order++) {
  1573. struct free_area *area = &(zone->free_area[order]);
  1574. page = list_first_entry_or_null(
  1575. &area->free_list[MIGRATE_HIGHATOMIC],
  1576. struct page, lru);
  1577. if (!page)
  1578. continue;
  1579. /*
  1580. * It should never happen but changes to locking could
  1581. * inadvertently allow a per-cpu drain to add pages
  1582. * to MIGRATE_HIGHATOMIC while unreserving so be safe
  1583. * and watch for underflows.
  1584. */
  1585. zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
  1586. zone->nr_reserved_highatomic);
  1587. /*
  1588. * Convert to ac->migratetype and avoid the normal
  1589. * pageblock stealing heuristics. Minimally, the caller
  1590. * is doing the work and needs the pages. More
  1591. * importantly, if the block was always converted to
  1592. * MIGRATE_UNMOVABLE or another type then the number
  1593. * of pageblocks that cannot be completely freed
  1594. * may increase.
  1595. */
  1596. set_pageblock_migratetype(page, ac->migratetype);
  1597. move_freepages_block(zone, page, ac->migratetype);
  1598. spin_unlock_irqrestore(&zone->lock, flags);
  1599. return;
  1600. }
  1601. spin_unlock_irqrestore(&zone->lock, flags);
  1602. }
  1603. }
  1604. /* Remove an element from the buddy allocator from the fallback list */
  1605. static inline struct page *
  1606. __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
  1607. {
  1608. struct free_area *area;
  1609. unsigned int current_order;
  1610. struct page *page;
  1611. int fallback_mt;
  1612. bool can_steal;
  1613. /* Find the largest possible block of pages in the other list */
  1614. for (current_order = MAX_ORDER-1;
  1615. current_order >= order && current_order <= MAX_ORDER-1;
  1616. --current_order) {
  1617. area = &(zone->free_area[current_order]);
  1618. fallback_mt = find_suitable_fallback(area, current_order,
  1619. start_migratetype, false, &can_steal);
  1620. if (fallback_mt == -1)
  1621. continue;
  1622. page = list_first_entry(&area->free_list[fallback_mt],
  1623. struct page, lru);
  1624. if (can_steal)
  1625. steal_suitable_fallback(zone, page, start_migratetype);
  1626. /* Remove the page from the freelists */
  1627. area->nr_free--;
  1628. list_del(&page->lru);
  1629. rmv_page_order(page);
  1630. expand(zone, page, order, current_order, area,
  1631. start_migratetype);
  1632. /*
  1633. * The pcppage_migratetype may differ from pageblock's
  1634. * migratetype depending on the decisions in
  1635. * find_suitable_fallback(). This is OK as long as it does not
  1636. * differ for MIGRATE_CMA pageblocks. Those can be used as
  1637. * fallback only via special __rmqueue_cma_fallback() function
  1638. */
  1639. set_pcppage_migratetype(page, start_migratetype);
  1640. trace_mm_page_alloc_extfrag(page, order, current_order,
  1641. start_migratetype, fallback_mt);
  1642. return page;
  1643. }
  1644. return NULL;
  1645. }
  1646. /*
  1647. * Do the hard work of removing an element from the buddy allocator.
  1648. * Call me with the zone->lock already held.
  1649. */
  1650. static struct page *__rmqueue(struct zone *zone, unsigned int order,
  1651. int migratetype)
  1652. {
  1653. struct page *page;
  1654. page = __rmqueue_smallest(zone, order, migratetype);
  1655. if (unlikely(!page)) {
  1656. if (migratetype == MIGRATE_MOVABLE)
  1657. page = __rmqueue_cma_fallback(zone, order);
  1658. if (!page)
  1659. page = __rmqueue_fallback(zone, order, migratetype);
  1660. }
  1661. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  1662. return page;
  1663. }
  1664. /*
  1665. * Obtain a specified number of elements from the buddy allocator, all under
  1666. * a single hold of the lock, for efficiency. Add them to the supplied list.
  1667. * Returns the number of new pages which were placed at *list.
  1668. */
  1669. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  1670. unsigned long count, struct list_head *list,
  1671. int migratetype, bool cold)
  1672. {
  1673. int i;
  1674. spin_lock(&zone->lock);
  1675. for (i = 0; i < count; ++i) {
  1676. struct page *page = __rmqueue(zone, order, migratetype);
  1677. if (unlikely(page == NULL))
  1678. break;
  1679. /*
  1680. * Split buddy pages returned by expand() are received here
  1681. * in physical page order. The page is added to the callers and
  1682. * list and the list head then moves forward. From the callers
  1683. * perspective, the linked list is ordered by page number in
  1684. * some conditions. This is useful for IO devices that can
  1685. * merge IO requests if the physical pages are ordered
  1686. * properly.
  1687. */
  1688. if (likely(!cold))
  1689. list_add(&page->lru, list);
  1690. else
  1691. list_add_tail(&page->lru, list);
  1692. list = &page->lru;
  1693. if (is_migrate_cma(get_pcppage_migratetype(page)))
  1694. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
  1695. -(1 << order));
  1696. }
  1697. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  1698. spin_unlock(&zone->lock);
  1699. return i;
  1700. }
  1701. #ifdef CONFIG_NUMA
  1702. /*
  1703. * Called from the vmstat counter updater to drain pagesets of this
  1704. * currently executing processor on remote nodes after they have
  1705. * expired.
  1706. *
  1707. * Note that this function must be called with the thread pinned to
  1708. * a single processor.
  1709. */
  1710. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  1711. {
  1712. unsigned long flags;
  1713. int to_drain, batch;
  1714. local_irq_save(flags);
  1715. batch = READ_ONCE(pcp->batch);
  1716. to_drain = min(pcp->count, batch);
  1717. if (to_drain > 0) {
  1718. free_pcppages_bulk(zone, to_drain, pcp);
  1719. pcp->count -= to_drain;
  1720. }
  1721. local_irq_restore(flags);
  1722. }
  1723. #endif
  1724. /*
  1725. * Drain pcplists of the indicated processor and zone.
  1726. *
  1727. * The processor must either be the current processor and the
  1728. * thread pinned to the current processor or a processor that
  1729. * is not online.
  1730. */
  1731. static void drain_pages_zone(unsigned int cpu, struct zone *zone)
  1732. {
  1733. unsigned long flags;
  1734. struct per_cpu_pageset *pset;
  1735. struct per_cpu_pages *pcp;
  1736. local_irq_save(flags);
  1737. pset = per_cpu_ptr(zone->pageset, cpu);
  1738. pcp = &pset->pcp;
  1739. if (pcp->count) {
  1740. free_pcppages_bulk(zone, pcp->count, pcp);
  1741. pcp->count = 0;
  1742. }
  1743. local_irq_restore(flags);
  1744. }
  1745. /*
  1746. * Drain pcplists of all zones on the indicated processor.
  1747. *
  1748. * The processor must either be the current processor and the
  1749. * thread pinned to the current processor or a processor that
  1750. * is not online.
  1751. */
  1752. static void drain_pages(unsigned int cpu)
  1753. {
  1754. struct zone *zone;
  1755. for_each_populated_zone(zone) {
  1756. drain_pages_zone(cpu, zone);
  1757. }
  1758. }
  1759. /*
  1760. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  1761. *
  1762. * The CPU has to be pinned. When zone parameter is non-NULL, spill just
  1763. * the single zone's pages.
  1764. */
  1765. void drain_local_pages(struct zone *zone)
  1766. {
  1767. int cpu = smp_processor_id();
  1768. if (zone)
  1769. drain_pages_zone(cpu, zone);
  1770. else
  1771. drain_pages(cpu);
  1772. }
  1773. /*
  1774. * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  1775. *
  1776. * When zone parameter is non-NULL, spill just the single zone's pages.
  1777. *
  1778. * Note that this code is protected against sending an IPI to an offline
  1779. * CPU but does not guarantee sending an IPI to newly hotplugged CPUs:
  1780. * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but
  1781. * nothing keeps CPUs from showing up after we populated the cpumask and
  1782. * before the call to on_each_cpu_mask().
  1783. */
  1784. void drain_all_pages(struct zone *zone)
  1785. {
  1786. int cpu;
  1787. /*
  1788. * Allocate in the BSS so we wont require allocation in
  1789. * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
  1790. */
  1791. static cpumask_t cpus_with_pcps;
  1792. /*
  1793. * We don't care about racing with CPU hotplug event
  1794. * as offline notification will cause the notified
  1795. * cpu to drain that CPU pcps and on_each_cpu_mask
  1796. * disables preemption as part of its processing
  1797. */
  1798. for_each_online_cpu(cpu) {
  1799. struct per_cpu_pageset *pcp;
  1800. struct zone *z;
  1801. bool has_pcps = false;
  1802. if (zone) {
  1803. pcp = per_cpu_ptr(zone->pageset, cpu);
  1804. if (pcp->pcp.count)
  1805. has_pcps = true;
  1806. } else {
  1807. for_each_populated_zone(z) {
  1808. pcp = per_cpu_ptr(z->pageset, cpu);
  1809. if (pcp->pcp.count) {
  1810. has_pcps = true;
  1811. break;
  1812. }
  1813. }
  1814. }
  1815. if (has_pcps)
  1816. cpumask_set_cpu(cpu, &cpus_with_pcps);
  1817. else
  1818. cpumask_clear_cpu(cpu, &cpus_with_pcps);
  1819. }
  1820. on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
  1821. zone, 1);
  1822. }
  1823. #ifdef CONFIG_HIBERNATION
  1824. void mark_free_pages(struct zone *zone)
  1825. {
  1826. unsigned long pfn, max_zone_pfn;
  1827. unsigned long flags;
  1828. unsigned int order, t;
  1829. struct page *page;
  1830. if (zone_is_empty(zone))
  1831. return;
  1832. spin_lock_irqsave(&zone->lock, flags);
  1833. max_zone_pfn = zone_end_pfn(zone);
  1834. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  1835. if (pfn_valid(pfn)) {
  1836. page = pfn_to_page(pfn);
  1837. if (!swsusp_page_is_forbidden(page))
  1838. swsusp_unset_page_free(page);
  1839. }
  1840. for_each_migratetype_order(order, t) {
  1841. list_for_each_entry(page,
  1842. &zone->free_area[order].free_list[t], lru) {
  1843. unsigned long i;
  1844. pfn = page_to_pfn(page);
  1845. for (i = 0; i < (1UL << order); i++)
  1846. swsusp_set_page_free(pfn_to_page(pfn + i));
  1847. }
  1848. }
  1849. spin_unlock_irqrestore(&zone->lock, flags);
  1850. }
  1851. #endif /* CONFIG_PM */
  1852. /*
  1853. * Free a 0-order page
  1854. * cold == true ? free a cold page : free a hot page
  1855. */
  1856. void free_hot_cold_page(struct page *page, bool cold)
  1857. {
  1858. struct zone *zone = page_zone(page);
  1859. struct per_cpu_pages *pcp;
  1860. unsigned long flags;
  1861. unsigned long pfn = page_to_pfn(page);
  1862. int migratetype;
  1863. if (!free_pages_prepare(page, 0))
  1864. return;
  1865. migratetype = get_pfnblock_migratetype(page, pfn);
  1866. set_pcppage_migratetype(page, migratetype);
  1867. local_irq_save(flags);
  1868. __count_vm_event(PGFREE);
  1869. /*
  1870. * We only track unmovable, reclaimable and movable on pcp lists.
  1871. * Free ISOLATE pages back to the allocator because they are being
  1872. * offlined but treat RESERVE as movable pages so we can get those
  1873. * areas back if necessary. Otherwise, we may have to free
  1874. * excessively into the page allocator
  1875. */
  1876. if (migratetype >= MIGRATE_PCPTYPES) {
  1877. if (unlikely(is_migrate_isolate(migratetype))) {
  1878. free_one_page(zone, page, pfn, 0, migratetype);
  1879. goto out;
  1880. }
  1881. migratetype = MIGRATE_MOVABLE;
  1882. }
  1883. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  1884. if (!cold)
  1885. list_add(&page->lru, &pcp->lists[migratetype]);
  1886. else
  1887. list_add_tail(&page->lru, &pcp->lists[migratetype]);
  1888. pcp->count++;
  1889. if (pcp->count >= pcp->high) {
  1890. unsigned long batch = READ_ONCE(pcp->batch);
  1891. free_pcppages_bulk(zone, batch, pcp);
  1892. pcp->count -= batch;
  1893. }
  1894. out:
  1895. local_irq_restore(flags);
  1896. }
  1897. /*
  1898. * Free a list of 0-order pages
  1899. */
  1900. void free_hot_cold_page_list(struct list_head *list, bool cold)
  1901. {
  1902. struct page *page, *next;
  1903. list_for_each_entry_safe(page, next, list, lru) {
  1904. trace_mm_page_free_batched(page, cold);
  1905. free_hot_cold_page(page, cold);
  1906. }
  1907. }
  1908. /*
  1909. * split_page takes a non-compound higher-order page, and splits it into
  1910. * n (1<<order) sub-pages: page[0..n]
  1911. * Each sub-page must be freed individually.
  1912. *
  1913. * Note: this is probably too low level an operation for use in drivers.
  1914. * Please consult with lkml before using this in your driver.
  1915. */
  1916. void split_page(struct page *page, unsigned int order)
  1917. {
  1918. int i;
  1919. gfp_t gfp_mask;
  1920. VM_BUG_ON_PAGE(PageCompound(page), page);
  1921. VM_BUG_ON_PAGE(!page_count(page), page);
  1922. #ifdef CONFIG_KMEMCHECK
  1923. /*
  1924. * Split shadow pages too, because free(page[0]) would
  1925. * otherwise free the whole shadow.
  1926. */
  1927. if (kmemcheck_page_is_tracked(page))
  1928. split_page(virt_to_page(page[0].shadow), order);
  1929. #endif
  1930. gfp_mask = get_page_owner_gfp(page);
  1931. set_page_owner(page, 0, gfp_mask);
  1932. for (i = 1; i < (1 << order); i++) {
  1933. set_page_refcounted(page + i);
  1934. set_page_owner(page + i, 0, gfp_mask);
  1935. }
  1936. }
  1937. EXPORT_SYMBOL_GPL(split_page);
  1938. int __isolate_free_page(struct page *page, unsigned int order)
  1939. {
  1940. unsigned long watermark;
  1941. struct zone *zone;
  1942. int mt;
  1943. BUG_ON(!PageBuddy(page));
  1944. zone = page_zone(page);
  1945. mt = get_pageblock_migratetype(page);
  1946. if (!is_migrate_isolate(mt)) {
  1947. /* Obey watermarks as if the page was being allocated */
  1948. watermark = low_wmark_pages(zone) + (1 << order);
  1949. if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
  1950. return 0;
  1951. __mod_zone_freepage_state(zone, -(1UL << order), mt);
  1952. }
  1953. /* Remove page from free list */
  1954. list_del(&page->lru);
  1955. zone->free_area[order].nr_free--;
  1956. rmv_page_order(page);
  1957. set_page_owner(page, order, __GFP_MOVABLE);
  1958. /* Set the pageblock if the isolated page is at least a pageblock */
  1959. if (order >= pageblock_order - 1) {
  1960. struct page *endpage = page + (1 << order) - 1;
  1961. for (; page < endpage; page += pageblock_nr_pages) {
  1962. int mt = get_pageblock_migratetype(page);
  1963. if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
  1964. set_pageblock_migratetype(page,
  1965. MIGRATE_MOVABLE);
  1966. }
  1967. }
  1968. return 1UL << order;
  1969. }
  1970. /*
  1971. * Similar to split_page except the page is already free. As this is only
  1972. * being used for migration, the migratetype of the block also changes.
  1973. * As this is called with interrupts disabled, the caller is responsible
  1974. * for calling arch_alloc_page() and kernel_map_page() after interrupts
  1975. * are enabled.
  1976. *
  1977. * Note: this is probably too low level an operation for use in drivers.
  1978. * Please consult with lkml before using this in your driver.
  1979. */
  1980. int split_free_page(struct page *page)
  1981. {
  1982. unsigned int order;
  1983. int nr_pages;
  1984. order = page_order(page);
  1985. nr_pages = __isolate_free_page(page, order);
  1986. if (!nr_pages)
  1987. return 0;
  1988. /* Split into individual pages */
  1989. set_page_refcounted(page);
  1990. split_page(page, order);
  1991. return nr_pages;
  1992. }
  1993. /*
  1994. * Allocate a page from the given zone. Use pcplists for order-0 allocations.
  1995. */
  1996. static inline
  1997. struct page *buffered_rmqueue(struct zone *preferred_zone,
  1998. struct zone *zone, unsigned int order,
  1999. gfp_t gfp_flags, int alloc_flags, int migratetype)
  2000. {
  2001. unsigned long flags;
  2002. struct page *page;
  2003. bool cold = ((gfp_flags & __GFP_COLD) != 0);
  2004. if (likely(order == 0)) {
  2005. struct per_cpu_pages *pcp;
  2006. struct list_head *list;
  2007. local_irq_save(flags);
  2008. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  2009. list = &pcp->lists[migratetype];
  2010. if (list_empty(list)) {
  2011. pcp->count += rmqueue_bulk(zone, 0,
  2012. pcp->batch, list,
  2013. migratetype, cold);
  2014. if (unlikely(list_empty(list)))
  2015. goto failed;
  2016. }
  2017. if (cold)
  2018. page = list_last_entry(list, struct page, lru);
  2019. else
  2020. page = list_first_entry(list, struct page, lru);
  2021. list_del(&page->lru);
  2022. pcp->count--;
  2023. } else {
  2024. /*
  2025. * We most definitely don't want callers attempting to
  2026. * allocate greater than order-1 page units with __GFP_NOFAIL.
  2027. */
  2028. WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
  2029. spin_lock_irqsave(&zone->lock, flags);
  2030. page = NULL;
  2031. if (alloc_flags & ALLOC_HARDER) {
  2032. page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
  2033. if (page)
  2034. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  2035. }
  2036. if (!page)
  2037. page = __rmqueue(zone, order, migratetype);
  2038. spin_unlock(&zone->lock);
  2039. if (!page)
  2040. goto failed;
  2041. __mod_zone_freepage_state(zone, -(1 << order),
  2042. get_pcppage_migratetype(page));
  2043. }
  2044. __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
  2045. if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 &&
  2046. !test_bit(ZONE_FAIR_DEPLETED, &zone->flags))
  2047. set_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  2048. __count_zone_vm_events(PGALLOC, zone, 1 << order);
  2049. zone_statistics(preferred_zone, zone, gfp_flags);
  2050. local_irq_restore(flags);
  2051. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  2052. return page;
  2053. failed:
  2054. local_irq_restore(flags);
  2055. return NULL;
  2056. }
  2057. #ifdef CONFIG_FAIL_PAGE_ALLOC
  2058. static struct {
  2059. struct fault_attr attr;
  2060. bool ignore_gfp_highmem;
  2061. bool ignore_gfp_reclaim;
  2062. u32 min_order;
  2063. } fail_page_alloc = {
  2064. .attr = FAULT_ATTR_INITIALIZER,
  2065. .ignore_gfp_reclaim = true,
  2066. .ignore_gfp_highmem = true,
  2067. .min_order = 1,
  2068. };
  2069. static int __init setup_fail_page_alloc(char *str)
  2070. {
  2071. return setup_fault_attr(&fail_page_alloc.attr, str);
  2072. }
  2073. __setup("fail_page_alloc=", setup_fail_page_alloc);
  2074. static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2075. {
  2076. if (order < fail_page_alloc.min_order)
  2077. return false;
  2078. if (gfp_mask & __GFP_NOFAIL)
  2079. return false;
  2080. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  2081. return false;
  2082. if (fail_page_alloc.ignore_gfp_reclaim &&
  2083. (gfp_mask & __GFP_DIRECT_RECLAIM))
  2084. return false;
  2085. return should_fail(&fail_page_alloc.attr, 1 << order);
  2086. }
  2087. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  2088. static int __init fail_page_alloc_debugfs(void)
  2089. {
  2090. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  2091. struct dentry *dir;
  2092. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  2093. &fail_page_alloc.attr);
  2094. if (IS_ERR(dir))
  2095. return PTR_ERR(dir);
  2096. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  2097. &fail_page_alloc.ignore_gfp_reclaim))
  2098. goto fail;
  2099. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  2100. &fail_page_alloc.ignore_gfp_highmem))
  2101. goto fail;
  2102. if (!debugfs_create_u32("min-order", mode, dir,
  2103. &fail_page_alloc.min_order))
  2104. goto fail;
  2105. return 0;
  2106. fail:
  2107. debugfs_remove_recursive(dir);
  2108. return -ENOMEM;
  2109. }
  2110. late_initcall(fail_page_alloc_debugfs);
  2111. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  2112. #else /* CONFIG_FAIL_PAGE_ALLOC */
  2113. static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2114. {
  2115. return false;
  2116. }
  2117. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  2118. /*
  2119. * Return true if free base pages are above 'mark'. For high-order checks it
  2120. * will return true of the order-0 watermark is reached and there is at least
  2121. * one free page of a suitable size. Checking now avoids taking the zone lock
  2122. * to check in the allocation paths if no pages are free.
  2123. */
  2124. static bool __zone_watermark_ok(struct zone *z, unsigned int order,
  2125. unsigned long mark, int classzone_idx, int alloc_flags,
  2126. long free_pages)
  2127. {
  2128. long min = mark;
  2129. int o;
  2130. const int alloc_harder = (alloc_flags & ALLOC_HARDER);
  2131. /* free_pages may go negative - that's OK */
  2132. free_pages -= (1 << order) - 1;
  2133. if (alloc_flags & ALLOC_HIGH)
  2134. min -= min / 2;
  2135. /*
  2136. * If the caller does not have rights to ALLOC_HARDER then subtract
  2137. * the high-atomic reserves. This will over-estimate the size of the
  2138. * atomic reserve but it avoids a search.
  2139. */
  2140. if (likely(!alloc_harder))
  2141. free_pages -= z->nr_reserved_highatomic;
  2142. else
  2143. min -= min / 4;
  2144. #ifdef CONFIG_CMA
  2145. /* If allocation can't use CMA areas don't use free CMA pages */
  2146. if (!(alloc_flags & ALLOC_CMA))
  2147. free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
  2148. #endif
  2149. /*
  2150. * Check watermarks for an order-0 allocation request. If these
  2151. * are not met, then a high-order request also cannot go ahead
  2152. * even if a suitable page happened to be free.
  2153. */
  2154. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  2155. return false;
  2156. /* If this is an order-0 request then the watermark is fine */
  2157. if (!order)
  2158. return true;
  2159. /* For a high-order request, check at least one suitable page is free */
  2160. for (o = order; o < MAX_ORDER; o++) {
  2161. struct free_area *area = &z->free_area[o];
  2162. int mt;
  2163. if (!area->nr_free)
  2164. continue;
  2165. if (alloc_harder)
  2166. return true;
  2167. for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
  2168. if (!list_empty(&area->free_list[mt]))
  2169. return true;
  2170. }
  2171. #ifdef CONFIG_CMA
  2172. if ((alloc_flags & ALLOC_CMA) &&
  2173. !list_empty(&area->free_list[MIGRATE_CMA])) {
  2174. return true;
  2175. }
  2176. #endif
  2177. }
  2178. return false;
  2179. }
  2180. bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  2181. int classzone_idx, int alloc_flags)
  2182. {
  2183. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2184. zone_page_state(z, NR_FREE_PAGES));
  2185. }
  2186. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  2187. unsigned long mark, int classzone_idx)
  2188. {
  2189. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2190. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  2191. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  2192. return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
  2193. free_pages);
  2194. }
  2195. #ifdef CONFIG_NUMA
  2196. static bool zone_local(struct zone *local_zone, struct zone *zone)
  2197. {
  2198. return local_zone->node == zone->node;
  2199. }
  2200. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2201. {
  2202. return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <
  2203. RECLAIM_DISTANCE;
  2204. }
  2205. #else /* CONFIG_NUMA */
  2206. static bool zone_local(struct zone *local_zone, struct zone *zone)
  2207. {
  2208. return true;
  2209. }
  2210. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2211. {
  2212. return true;
  2213. }
  2214. #endif /* CONFIG_NUMA */
  2215. static void reset_alloc_batches(struct zone *preferred_zone)
  2216. {
  2217. struct zone *zone = preferred_zone->zone_pgdat->node_zones;
  2218. do {
  2219. mod_zone_page_state(zone, NR_ALLOC_BATCH,
  2220. high_wmark_pages(zone) - low_wmark_pages(zone) -
  2221. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  2222. clear_bit(ZONE_FAIR_DEPLETED, &zone->flags);
  2223. } while (zone++ != preferred_zone);
  2224. }
  2225. /*
  2226. * get_page_from_freelist goes through the zonelist trying to allocate
  2227. * a page.
  2228. */
  2229. static struct page *
  2230. get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
  2231. const struct alloc_context *ac)
  2232. {
  2233. struct zonelist *zonelist = ac->zonelist;
  2234. struct zoneref *z;
  2235. struct page *page = NULL;
  2236. struct zone *zone;
  2237. int nr_fair_skipped = 0;
  2238. bool zonelist_rescan;
  2239. zonelist_scan:
  2240. zonelist_rescan = false;
  2241. /*
  2242. * Scan zonelist, looking for a zone with enough free.
  2243. * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
  2244. */
  2245. for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
  2246. ac->nodemask) {
  2247. unsigned long mark;
  2248. if (cpusets_enabled() &&
  2249. (alloc_flags & ALLOC_CPUSET) &&
  2250. !cpuset_zone_allowed(zone, gfp_mask))
  2251. continue;
  2252. /*
  2253. * Distribute pages in proportion to the individual
  2254. * zone size to ensure fair page aging. The zone a
  2255. * page was allocated in should have no effect on the
  2256. * time the page has in memory before being reclaimed.
  2257. */
  2258. if (alloc_flags & ALLOC_FAIR) {
  2259. if (!zone_local(ac->preferred_zone, zone))
  2260. break;
  2261. if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) {
  2262. nr_fair_skipped++;
  2263. continue;
  2264. }
  2265. }
  2266. /*
  2267. * When allocating a page cache page for writing, we
  2268. * want to get it from a zone that is within its dirty
  2269. * limit, such that no single zone holds more than its
  2270. * proportional share of globally allowed dirty pages.
  2271. * The dirty limits take into account the zone's
  2272. * lowmem reserves and high watermark so that kswapd
  2273. * should be able to balance it without having to
  2274. * write pages from its LRU list.
  2275. *
  2276. * This may look like it could increase pressure on
  2277. * lower zones by failing allocations in higher zones
  2278. * before they are full. But the pages that do spill
  2279. * over are limited as the lower zones are protected
  2280. * by this very same mechanism. It should not become
  2281. * a practical burden to them.
  2282. *
  2283. * XXX: For now, allow allocations to potentially
  2284. * exceed the per-zone dirty limit in the slowpath
  2285. * (spread_dirty_pages unset) before going into reclaim,
  2286. * which is important when on a NUMA setup the allowed
  2287. * zones are together not big enough to reach the
  2288. * global limit. The proper fix for these situations
  2289. * will require awareness of zones in the
  2290. * dirty-throttling and the flusher threads.
  2291. */
  2292. if (ac->spread_dirty_pages && !zone_dirty_ok(zone))
  2293. continue;
  2294. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  2295. if (!zone_watermark_ok(zone, order, mark,
  2296. ac->classzone_idx, alloc_flags)) {
  2297. int ret;
  2298. /* Checked here to keep the fast path fast */
  2299. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  2300. if (alloc_flags & ALLOC_NO_WATERMARKS)
  2301. goto try_this_zone;
  2302. if (zone_reclaim_mode == 0 ||
  2303. !zone_allows_reclaim(ac->preferred_zone, zone))
  2304. continue;
  2305. ret = zone_reclaim(zone, gfp_mask, order);
  2306. switch (ret) {
  2307. case ZONE_RECLAIM_NOSCAN:
  2308. /* did not scan */
  2309. continue;
  2310. case ZONE_RECLAIM_FULL:
  2311. /* scanned but unreclaimable */
  2312. continue;
  2313. default:
  2314. /* did we reclaim enough */
  2315. if (zone_watermark_ok(zone, order, mark,
  2316. ac->classzone_idx, alloc_flags))
  2317. goto try_this_zone;
  2318. continue;
  2319. }
  2320. }
  2321. try_this_zone:
  2322. page = buffered_rmqueue(ac->preferred_zone, zone, order,
  2323. gfp_mask, alloc_flags, ac->migratetype);
  2324. if (page) {
  2325. if (prep_new_page(page, order, gfp_mask, alloc_flags))
  2326. goto try_this_zone;
  2327. /*
  2328. * If this is a high-order atomic allocation then check
  2329. * if the pageblock should be reserved for the future
  2330. */
  2331. if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
  2332. reserve_highatomic_pageblock(page, zone, order);
  2333. return page;
  2334. }
  2335. }
  2336. /*
  2337. * The first pass makes sure allocations are spread fairly within the
  2338. * local node. However, the local node might have free pages left
  2339. * after the fairness batches are exhausted, and remote zones haven't
  2340. * even been considered yet. Try once more without fairness, and
  2341. * include remote zones now, before entering the slowpath and waking
  2342. * kswapd: prefer spilling to a remote zone over swapping locally.
  2343. */
  2344. if (alloc_flags & ALLOC_FAIR) {
  2345. alloc_flags &= ~ALLOC_FAIR;
  2346. if (nr_fair_skipped) {
  2347. zonelist_rescan = true;
  2348. reset_alloc_batches(ac->preferred_zone);
  2349. }
  2350. if (nr_online_nodes > 1)
  2351. zonelist_rescan = true;
  2352. }
  2353. if (zonelist_rescan)
  2354. goto zonelist_scan;
  2355. return NULL;
  2356. }
  2357. /*
  2358. * Large machines with many possible nodes should not always dump per-node
  2359. * meminfo in irq context.
  2360. */
  2361. static inline bool should_suppress_show_mem(void)
  2362. {
  2363. bool ret = false;
  2364. #if NODES_SHIFT > 8
  2365. ret = in_interrupt();
  2366. #endif
  2367. return ret;
  2368. }
  2369. static DEFINE_RATELIMIT_STATE(nopage_rs,
  2370. DEFAULT_RATELIMIT_INTERVAL,
  2371. DEFAULT_RATELIMIT_BURST);
  2372. void warn_alloc_failed(gfp_t gfp_mask, unsigned int order, const char *fmt, ...)
  2373. {
  2374. unsigned int filter = SHOW_MEM_FILTER_NODES;
  2375. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs) ||
  2376. debug_guardpage_minorder() > 0)
  2377. return;
  2378. /*
  2379. * This documents exceptions given to allocations in certain
  2380. * contexts that are allowed to allocate outside current's set
  2381. * of allowed nodes.
  2382. */
  2383. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2384. if (test_thread_flag(TIF_MEMDIE) ||
  2385. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  2386. filter &= ~SHOW_MEM_FILTER_NODES;
  2387. if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
  2388. filter &= ~SHOW_MEM_FILTER_NODES;
  2389. if (fmt) {
  2390. struct va_format vaf;
  2391. va_list args;
  2392. va_start(args, fmt);
  2393. vaf.fmt = fmt;
  2394. vaf.va = &args;
  2395. pr_warn("%pV", &vaf);
  2396. va_end(args);
  2397. }
  2398. pr_warn("%s: page allocation failure: order:%u, mode:%#x(%pGg)\n",
  2399. current->comm, order, gfp_mask, &gfp_mask);
  2400. dump_stack();
  2401. if (!should_suppress_show_mem())
  2402. show_mem(filter);
  2403. }
  2404. static inline struct page *
  2405. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  2406. const struct alloc_context *ac, unsigned long *did_some_progress)
  2407. {
  2408. struct oom_control oc = {
  2409. .zonelist = ac->zonelist,
  2410. .nodemask = ac->nodemask,
  2411. .gfp_mask = gfp_mask,
  2412. .order = order,
  2413. };
  2414. struct page *page;
  2415. *did_some_progress = 0;
  2416. /*
  2417. * Acquire the oom lock. If that fails, somebody else is
  2418. * making progress for us.
  2419. */
  2420. if (!mutex_trylock(&oom_lock)) {
  2421. *did_some_progress = 1;
  2422. schedule_timeout_uninterruptible(1);
  2423. return NULL;
  2424. }
  2425. /*
  2426. * Go through the zonelist yet one more time, keep very high watermark
  2427. * here, this is only to catch a parallel oom killing, we must fail if
  2428. * we're still under heavy pressure.
  2429. */
  2430. page = get_page_from_freelist(gfp_mask | __GFP_HARDWALL, order,
  2431. ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
  2432. if (page)
  2433. goto out;
  2434. if (!(gfp_mask & __GFP_NOFAIL)) {
  2435. /* Coredumps can quickly deplete all memory reserves */
  2436. if (current->flags & PF_DUMPCORE)
  2437. goto out;
  2438. /* The OOM killer will not help higher order allocs */
  2439. if (order > PAGE_ALLOC_COSTLY_ORDER)
  2440. goto out;
  2441. /* The OOM killer does not needlessly kill tasks for lowmem */
  2442. if (ac->high_zoneidx < ZONE_NORMAL)
  2443. goto out;
  2444. /* The OOM killer does not compensate for IO-less reclaim */
  2445. if (!(gfp_mask & __GFP_FS)) {
  2446. /*
  2447. * XXX: Page reclaim didn't yield anything,
  2448. * and the OOM killer can't be invoked, but
  2449. * keep looping as per tradition.
  2450. */
  2451. *did_some_progress = 1;
  2452. goto out;
  2453. }
  2454. if (pm_suspended_storage())
  2455. goto out;
  2456. /* The OOM killer may not free memory on a specific node */
  2457. if (gfp_mask & __GFP_THISNODE)
  2458. goto out;
  2459. }
  2460. /* Exhausted what can be done so it's blamo time */
  2461. if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
  2462. *did_some_progress = 1;
  2463. if (gfp_mask & __GFP_NOFAIL) {
  2464. page = get_page_from_freelist(gfp_mask, order,
  2465. ALLOC_NO_WATERMARKS|ALLOC_CPUSET, ac);
  2466. /*
  2467. * fallback to ignore cpuset restriction if our nodes
  2468. * are depleted
  2469. */
  2470. if (!page)
  2471. page = get_page_from_freelist(gfp_mask, order,
  2472. ALLOC_NO_WATERMARKS, ac);
  2473. }
  2474. }
  2475. out:
  2476. mutex_unlock(&oom_lock);
  2477. return page;
  2478. }
  2479. #ifdef CONFIG_COMPACTION
  2480. /* Try memory compaction for high-order allocations before reclaim */
  2481. static struct page *
  2482. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2483. int alloc_flags, const struct alloc_context *ac,
  2484. enum migrate_mode mode, int *contended_compaction,
  2485. bool *deferred_compaction)
  2486. {
  2487. unsigned long compact_result;
  2488. struct page *page;
  2489. if (!order)
  2490. return NULL;
  2491. current->flags |= PF_MEMALLOC;
  2492. compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
  2493. mode, contended_compaction);
  2494. current->flags &= ~PF_MEMALLOC;
  2495. switch (compact_result) {
  2496. case COMPACT_DEFERRED:
  2497. *deferred_compaction = true;
  2498. /* fall-through */
  2499. case COMPACT_SKIPPED:
  2500. return NULL;
  2501. default:
  2502. break;
  2503. }
  2504. /*
  2505. * At least in one zone compaction wasn't deferred or skipped, so let's
  2506. * count a compaction stall
  2507. */
  2508. count_vm_event(COMPACTSTALL);
  2509. page = get_page_from_freelist(gfp_mask, order,
  2510. alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
  2511. if (page) {
  2512. struct zone *zone = page_zone(page);
  2513. zone->compact_blockskip_flush = false;
  2514. compaction_defer_reset(zone, order, true);
  2515. count_vm_event(COMPACTSUCCESS);
  2516. return page;
  2517. }
  2518. /*
  2519. * It's bad if compaction run occurs and fails. The most likely reason
  2520. * is that pages exist, but not enough to satisfy watermarks.
  2521. */
  2522. count_vm_event(COMPACTFAIL);
  2523. cond_resched();
  2524. return NULL;
  2525. }
  2526. #else
  2527. static inline struct page *
  2528. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  2529. int alloc_flags, const struct alloc_context *ac,
  2530. enum migrate_mode mode, int *contended_compaction,
  2531. bool *deferred_compaction)
  2532. {
  2533. return NULL;
  2534. }
  2535. #endif /* CONFIG_COMPACTION */
  2536. /* Perform direct synchronous page reclaim */
  2537. static int
  2538. __perform_reclaim(gfp_t gfp_mask, unsigned int order,
  2539. const struct alloc_context *ac)
  2540. {
  2541. struct reclaim_state reclaim_state;
  2542. int progress;
  2543. cond_resched();
  2544. /* We now go into synchronous reclaim */
  2545. cpuset_memory_pressure_bump();
  2546. current->flags |= PF_MEMALLOC;
  2547. lockdep_set_current_reclaim_state(gfp_mask);
  2548. reclaim_state.reclaimed_slab = 0;
  2549. current->reclaim_state = &reclaim_state;
  2550. progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
  2551. ac->nodemask);
  2552. current->reclaim_state = NULL;
  2553. lockdep_clear_current_reclaim_state();
  2554. current->flags &= ~PF_MEMALLOC;
  2555. cond_resched();
  2556. return progress;
  2557. }
  2558. /* The really slow allocator path where we enter direct reclaim */
  2559. static inline struct page *
  2560. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  2561. int alloc_flags, const struct alloc_context *ac,
  2562. unsigned long *did_some_progress)
  2563. {
  2564. struct page *page = NULL;
  2565. bool drained = false;
  2566. *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
  2567. if (unlikely(!(*did_some_progress)))
  2568. return NULL;
  2569. retry:
  2570. page = get_page_from_freelist(gfp_mask, order,
  2571. alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
  2572. /*
  2573. * If an allocation failed after direct reclaim, it could be because
  2574. * pages are pinned on the per-cpu lists or in high alloc reserves.
  2575. * Shrink them them and try again
  2576. */
  2577. if (!page && !drained) {
  2578. unreserve_highatomic_pageblock(ac);
  2579. drain_all_pages(NULL);
  2580. drained = true;
  2581. goto retry;
  2582. }
  2583. return page;
  2584. }
  2585. static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
  2586. {
  2587. struct zoneref *z;
  2588. struct zone *zone;
  2589. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
  2590. ac->high_zoneidx, ac->nodemask)
  2591. wakeup_kswapd(zone, order, zone_idx(ac->preferred_zone));
  2592. }
  2593. static inline int
  2594. gfp_to_alloc_flags(gfp_t gfp_mask)
  2595. {
  2596. int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  2597. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  2598. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  2599. /*
  2600. * The caller may dip into page reserves a bit more if the caller
  2601. * cannot run direct reclaim, or if the caller has realtime scheduling
  2602. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  2603. * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
  2604. */
  2605. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  2606. if (gfp_mask & __GFP_ATOMIC) {
  2607. /*
  2608. * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
  2609. * if it can't schedule.
  2610. */
  2611. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2612. alloc_flags |= ALLOC_HARDER;
  2613. /*
  2614. * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
  2615. * comment for __cpuset_node_allowed().
  2616. */
  2617. alloc_flags &= ~ALLOC_CPUSET;
  2618. } else if (unlikely(rt_task(current)) && !in_interrupt())
  2619. alloc_flags |= ALLOC_HARDER;
  2620. if (likely(!(gfp_mask & __GFP_NOMEMALLOC))) {
  2621. if (gfp_mask & __GFP_MEMALLOC)
  2622. alloc_flags |= ALLOC_NO_WATERMARKS;
  2623. else if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
  2624. alloc_flags |= ALLOC_NO_WATERMARKS;
  2625. else if (!in_interrupt() &&
  2626. ((current->flags & PF_MEMALLOC) ||
  2627. unlikely(test_thread_flag(TIF_MEMDIE))))
  2628. alloc_flags |= ALLOC_NO_WATERMARKS;
  2629. }
  2630. #ifdef CONFIG_CMA
  2631. if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  2632. alloc_flags |= ALLOC_CMA;
  2633. #endif
  2634. return alloc_flags;
  2635. }
  2636. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
  2637. {
  2638. return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
  2639. }
  2640. static inline bool is_thp_gfp_mask(gfp_t gfp_mask)
  2641. {
  2642. return (gfp_mask & (GFP_TRANSHUGE | __GFP_KSWAPD_RECLAIM)) == GFP_TRANSHUGE;
  2643. }
  2644. static inline struct page *
  2645. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  2646. struct alloc_context *ac)
  2647. {
  2648. bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
  2649. struct page *page = NULL;
  2650. int alloc_flags;
  2651. unsigned long pages_reclaimed = 0;
  2652. unsigned long did_some_progress;
  2653. enum migrate_mode migration_mode = MIGRATE_ASYNC;
  2654. bool deferred_compaction = false;
  2655. int contended_compaction = COMPACT_CONTENDED_NONE;
  2656. /*
  2657. * In the slowpath, we sanity check order to avoid ever trying to
  2658. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  2659. * be using allocators in order of preference for an area that is
  2660. * too large.
  2661. */
  2662. if (order >= MAX_ORDER) {
  2663. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  2664. return NULL;
  2665. }
  2666. /*
  2667. * We also sanity check to catch abuse of atomic reserves being used by
  2668. * callers that are not in atomic context.
  2669. */
  2670. if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
  2671. (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
  2672. gfp_mask &= ~__GFP_ATOMIC;
  2673. retry:
  2674. if (gfp_mask & __GFP_KSWAPD_RECLAIM)
  2675. wake_all_kswapds(order, ac);
  2676. /*
  2677. * OK, we're below the kswapd watermark and have kicked background
  2678. * reclaim. Now things get more complex, so set up alloc_flags according
  2679. * to how we want to proceed.
  2680. */
  2681. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  2682. /*
  2683. * Find the true preferred zone if the allocation is unconstrained by
  2684. * cpusets.
  2685. */
  2686. if (!(alloc_flags & ALLOC_CPUSET) && !ac->nodemask) {
  2687. struct zoneref *preferred_zoneref;
  2688. preferred_zoneref = first_zones_zonelist(ac->zonelist,
  2689. ac->high_zoneidx, NULL, &ac->preferred_zone);
  2690. ac->classzone_idx = zonelist_zone_idx(preferred_zoneref);
  2691. }
  2692. /* This is the last chance, in general, before the goto nopage. */
  2693. page = get_page_from_freelist(gfp_mask, order,
  2694. alloc_flags & ~ALLOC_NO_WATERMARKS, ac);
  2695. if (page)
  2696. goto got_pg;
  2697. /* Allocate without watermarks if the context allows */
  2698. if (alloc_flags & ALLOC_NO_WATERMARKS) {
  2699. /*
  2700. * Ignore mempolicies if ALLOC_NO_WATERMARKS on the grounds
  2701. * the allocation is high priority and these type of
  2702. * allocations are system rather than user orientated
  2703. */
  2704. ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
  2705. page = get_page_from_freelist(gfp_mask, order,
  2706. ALLOC_NO_WATERMARKS, ac);
  2707. if (page)
  2708. goto got_pg;
  2709. }
  2710. /* Caller is not willing to reclaim, we can't balance anything */
  2711. if (!can_direct_reclaim) {
  2712. /*
  2713. * All existing users of the __GFP_NOFAIL are blockable, so warn
  2714. * of any new users that actually allow this type of allocation
  2715. * to fail.
  2716. */
  2717. WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL);
  2718. goto nopage;
  2719. }
  2720. /* Avoid recursion of direct reclaim */
  2721. if (current->flags & PF_MEMALLOC) {
  2722. /*
  2723. * __GFP_NOFAIL request from this context is rather bizarre
  2724. * because we cannot reclaim anything and only can loop waiting
  2725. * for somebody to do a work for us.
  2726. */
  2727. if (WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
  2728. cond_resched();
  2729. goto retry;
  2730. }
  2731. goto nopage;
  2732. }
  2733. /* Avoid allocations with no watermarks from looping endlessly */
  2734. if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
  2735. goto nopage;
  2736. /*
  2737. * Try direct compaction. The first pass is asynchronous. Subsequent
  2738. * attempts after direct reclaim are synchronous
  2739. */
  2740. page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
  2741. migration_mode,
  2742. &contended_compaction,
  2743. &deferred_compaction);
  2744. if (page)
  2745. goto got_pg;
  2746. /* Checks for THP-specific high-order allocations */
  2747. if (is_thp_gfp_mask(gfp_mask)) {
  2748. /*
  2749. * If compaction is deferred for high-order allocations, it is
  2750. * because sync compaction recently failed. If this is the case
  2751. * and the caller requested a THP allocation, we do not want
  2752. * to heavily disrupt the system, so we fail the allocation
  2753. * instead of entering direct reclaim.
  2754. */
  2755. if (deferred_compaction)
  2756. goto nopage;
  2757. /*
  2758. * In all zones where compaction was attempted (and not
  2759. * deferred or skipped), lock contention has been detected.
  2760. * For THP allocation we do not want to disrupt the others
  2761. * so we fallback to base pages instead.
  2762. */
  2763. if (contended_compaction == COMPACT_CONTENDED_LOCK)
  2764. goto nopage;
  2765. /*
  2766. * If compaction was aborted due to need_resched(), we do not
  2767. * want to further increase allocation latency, unless it is
  2768. * khugepaged trying to collapse.
  2769. */
  2770. if (contended_compaction == COMPACT_CONTENDED_SCHED
  2771. && !(current->flags & PF_KTHREAD))
  2772. goto nopage;
  2773. }
  2774. /*
  2775. * It can become very expensive to allocate transparent hugepages at
  2776. * fault, so use asynchronous memory compaction for THP unless it is
  2777. * khugepaged trying to collapse.
  2778. */
  2779. if (!is_thp_gfp_mask(gfp_mask) || (current->flags & PF_KTHREAD))
  2780. migration_mode = MIGRATE_SYNC_LIGHT;
  2781. /* Try direct reclaim and then allocating */
  2782. page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
  2783. &did_some_progress);
  2784. if (page)
  2785. goto got_pg;
  2786. /* Do not loop if specifically requested */
  2787. if (gfp_mask & __GFP_NORETRY)
  2788. goto noretry;
  2789. /* Keep reclaiming pages as long as there is reasonable progress */
  2790. pages_reclaimed += did_some_progress;
  2791. if ((did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) ||
  2792. ((gfp_mask & __GFP_REPEAT) && pages_reclaimed < (1 << order))) {
  2793. /* Wait for some write requests to complete then retry */
  2794. wait_iff_congested(ac->preferred_zone, BLK_RW_ASYNC, HZ/50);
  2795. goto retry;
  2796. }
  2797. /* Reclaim has failed us, start killing things */
  2798. page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
  2799. if (page)
  2800. goto got_pg;
  2801. /* Retry as long as the OOM killer is making progress */
  2802. if (did_some_progress)
  2803. goto retry;
  2804. noretry:
  2805. /*
  2806. * High-order allocations do not necessarily loop after
  2807. * direct reclaim and reclaim/compaction depends on compaction
  2808. * being called after reclaim so call directly if necessary
  2809. */
  2810. page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
  2811. ac, migration_mode,
  2812. &contended_compaction,
  2813. &deferred_compaction);
  2814. if (page)
  2815. goto got_pg;
  2816. nopage:
  2817. warn_alloc_failed(gfp_mask, order, NULL);
  2818. got_pg:
  2819. return page;
  2820. }
  2821. /*
  2822. * This is the 'heart' of the zoned buddy allocator.
  2823. */
  2824. struct page *
  2825. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
  2826. struct zonelist *zonelist, nodemask_t *nodemask)
  2827. {
  2828. struct zoneref *preferred_zoneref;
  2829. struct page *page = NULL;
  2830. unsigned int cpuset_mems_cookie;
  2831. int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
  2832. gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
  2833. struct alloc_context ac = {
  2834. .high_zoneidx = gfp_zone(gfp_mask),
  2835. .nodemask = nodemask,
  2836. .migratetype = gfpflags_to_migratetype(gfp_mask),
  2837. };
  2838. gfp_mask &= gfp_allowed_mask;
  2839. lockdep_trace_alloc(gfp_mask);
  2840. might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
  2841. if (should_fail_alloc_page(gfp_mask, order))
  2842. return NULL;
  2843. /*
  2844. * Check the zones suitable for the gfp_mask contain at least one
  2845. * valid zone. It's possible to have an empty zonelist as a result
  2846. * of __GFP_THISNODE and a memoryless node
  2847. */
  2848. if (unlikely(!zonelist->_zonerefs->zone))
  2849. return NULL;
  2850. if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
  2851. alloc_flags |= ALLOC_CMA;
  2852. retry_cpuset:
  2853. cpuset_mems_cookie = read_mems_allowed_begin();
  2854. /* We set it here, as __alloc_pages_slowpath might have changed it */
  2855. ac.zonelist = zonelist;
  2856. /* Dirty zone balancing only done in the fast path */
  2857. ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
  2858. /* The preferred zone is used for statistics later */
  2859. preferred_zoneref = first_zones_zonelist(ac.zonelist, ac.high_zoneidx,
  2860. ac.nodemask ? : &cpuset_current_mems_allowed,
  2861. &ac.preferred_zone);
  2862. if (!ac.preferred_zone)
  2863. goto out;
  2864. ac.classzone_idx = zonelist_zone_idx(preferred_zoneref);
  2865. /* First allocation attempt */
  2866. alloc_mask = gfp_mask|__GFP_HARDWALL;
  2867. page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
  2868. if (unlikely(!page)) {
  2869. /*
  2870. * Runtime PM, block IO and its error handling path
  2871. * can deadlock because I/O on the device might not
  2872. * complete.
  2873. */
  2874. alloc_mask = memalloc_noio_flags(gfp_mask);
  2875. ac.spread_dirty_pages = false;
  2876. page = __alloc_pages_slowpath(alloc_mask, order, &ac);
  2877. }
  2878. if (kmemcheck_enabled && page)
  2879. kmemcheck_pagealloc_alloc(page, order, gfp_mask);
  2880. trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
  2881. out:
  2882. /*
  2883. * When updating a task's mems_allowed, it is possible to race with
  2884. * parallel threads in such a way that an allocation can fail while
  2885. * the mask is being updated. If a page allocation is about to fail,
  2886. * check if the cpuset changed during allocation and if so, retry.
  2887. */
  2888. if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
  2889. goto retry_cpuset;
  2890. return page;
  2891. }
  2892. EXPORT_SYMBOL(__alloc_pages_nodemask);
  2893. /*
  2894. * Common helper functions.
  2895. */
  2896. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  2897. {
  2898. struct page *page;
  2899. /*
  2900. * __get_free_pages() returns a 32-bit address, which cannot represent
  2901. * a highmem page
  2902. */
  2903. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  2904. page = alloc_pages(gfp_mask, order);
  2905. if (!page)
  2906. return 0;
  2907. return (unsigned long) page_address(page);
  2908. }
  2909. EXPORT_SYMBOL(__get_free_pages);
  2910. unsigned long get_zeroed_page(gfp_t gfp_mask)
  2911. {
  2912. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  2913. }
  2914. EXPORT_SYMBOL(get_zeroed_page);
  2915. void __free_pages(struct page *page, unsigned int order)
  2916. {
  2917. if (put_page_testzero(page)) {
  2918. if (order == 0)
  2919. free_hot_cold_page(page, false);
  2920. else
  2921. __free_pages_ok(page, order);
  2922. }
  2923. }
  2924. EXPORT_SYMBOL(__free_pages);
  2925. void free_pages(unsigned long addr, unsigned int order)
  2926. {
  2927. if (addr != 0) {
  2928. VM_BUG_ON(!virt_addr_valid((void *)addr));
  2929. __free_pages(virt_to_page((void *)addr), order);
  2930. }
  2931. }
  2932. EXPORT_SYMBOL(free_pages);
  2933. /*
  2934. * Page Fragment:
  2935. * An arbitrary-length arbitrary-offset area of memory which resides
  2936. * within a 0 or higher order page. Multiple fragments within that page
  2937. * are individually refcounted, in the page's reference counter.
  2938. *
  2939. * The page_frag functions below provide a simple allocation framework for
  2940. * page fragments. This is used by the network stack and network device
  2941. * drivers to provide a backing region of memory for use as either an
  2942. * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  2943. */
  2944. static struct page *__page_frag_refill(struct page_frag_cache *nc,
  2945. gfp_t gfp_mask)
  2946. {
  2947. struct page *page = NULL;
  2948. gfp_t gfp = gfp_mask;
  2949. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  2950. gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
  2951. __GFP_NOMEMALLOC;
  2952. page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
  2953. PAGE_FRAG_CACHE_MAX_ORDER);
  2954. nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
  2955. #endif
  2956. if (unlikely(!page))
  2957. page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
  2958. nc->va = page ? page_address(page) : NULL;
  2959. return page;
  2960. }
  2961. void *__alloc_page_frag(struct page_frag_cache *nc,
  2962. unsigned int fragsz, gfp_t gfp_mask)
  2963. {
  2964. unsigned int size = PAGE_SIZE;
  2965. struct page *page;
  2966. int offset;
  2967. if (unlikely(!nc->va)) {
  2968. refill:
  2969. page = __page_frag_refill(nc, gfp_mask);
  2970. if (!page)
  2971. return NULL;
  2972. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  2973. /* if size can vary use size else just use PAGE_SIZE */
  2974. size = nc->size;
  2975. #endif
  2976. /* Even if we own the page, we do not use atomic_set().
  2977. * This would break get_page_unless_zero() users.
  2978. */
  2979. page_ref_add(page, size - 1);
  2980. /* reset page count bias and offset to start of new frag */
  2981. nc->pfmemalloc = page_is_pfmemalloc(page);
  2982. nc->pagecnt_bias = size;
  2983. nc->offset = size;
  2984. }
  2985. offset = nc->offset - fragsz;
  2986. if (unlikely(offset < 0)) {
  2987. page = virt_to_page(nc->va);
  2988. if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
  2989. goto refill;
  2990. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  2991. /* if size can vary use size else just use PAGE_SIZE */
  2992. size = nc->size;
  2993. #endif
  2994. /* OK, page count is 0, we can safely set it */
  2995. set_page_count(page, size);
  2996. /* reset page count bias and offset to start of new frag */
  2997. nc->pagecnt_bias = size;
  2998. offset = size - fragsz;
  2999. }
  3000. nc->pagecnt_bias--;
  3001. nc->offset = offset;
  3002. return nc->va + offset;
  3003. }
  3004. EXPORT_SYMBOL(__alloc_page_frag);
  3005. /*
  3006. * Frees a page fragment allocated out of either a compound or order 0 page.
  3007. */
  3008. void __free_page_frag(void *addr)
  3009. {
  3010. struct page *page = virt_to_head_page(addr);
  3011. if (unlikely(put_page_testzero(page)))
  3012. __free_pages_ok(page, compound_order(page));
  3013. }
  3014. EXPORT_SYMBOL(__free_page_frag);
  3015. /*
  3016. * alloc_kmem_pages charges newly allocated pages to the kmem resource counter
  3017. * of the current memory cgroup if __GFP_ACCOUNT is set, other than that it is
  3018. * equivalent to alloc_pages.
  3019. *
  3020. * It should be used when the caller would like to use kmalloc, but since the
  3021. * allocation is large, it has to fall back to the page allocator.
  3022. */
  3023. struct page *alloc_kmem_pages(gfp_t gfp_mask, unsigned int order)
  3024. {
  3025. struct page *page;
  3026. page = alloc_pages(gfp_mask, order);
  3027. if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
  3028. __free_pages(page, order);
  3029. page = NULL;
  3030. }
  3031. return page;
  3032. }
  3033. struct page *alloc_kmem_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
  3034. {
  3035. struct page *page;
  3036. page = alloc_pages_node(nid, gfp_mask, order);
  3037. if (page && memcg_kmem_charge(page, gfp_mask, order) != 0) {
  3038. __free_pages(page, order);
  3039. page = NULL;
  3040. }
  3041. return page;
  3042. }
  3043. /*
  3044. * __free_kmem_pages and free_kmem_pages will free pages allocated with
  3045. * alloc_kmem_pages.
  3046. */
  3047. void __free_kmem_pages(struct page *page, unsigned int order)
  3048. {
  3049. memcg_kmem_uncharge(page, order);
  3050. __free_pages(page, order);
  3051. }
  3052. void free_kmem_pages(unsigned long addr, unsigned int order)
  3053. {
  3054. if (addr != 0) {
  3055. VM_BUG_ON(!virt_addr_valid((void *)addr));
  3056. __free_kmem_pages(virt_to_page((void *)addr), order);
  3057. }
  3058. }
  3059. static void *make_alloc_exact(unsigned long addr, unsigned int order,
  3060. size_t size)
  3061. {
  3062. if (addr) {
  3063. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  3064. unsigned long used = addr + PAGE_ALIGN(size);
  3065. split_page(virt_to_page((void *)addr), order);
  3066. while (used < alloc_end) {
  3067. free_page(used);
  3068. used += PAGE_SIZE;
  3069. }
  3070. }
  3071. return (void *)addr;
  3072. }
  3073. /**
  3074. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  3075. * @size: the number of bytes to allocate
  3076. * @gfp_mask: GFP flags for the allocation
  3077. *
  3078. * This function is similar to alloc_pages(), except that it allocates the
  3079. * minimum number of pages to satisfy the request. alloc_pages() can only
  3080. * allocate memory in power-of-two pages.
  3081. *
  3082. * This function is also limited by MAX_ORDER.
  3083. *
  3084. * Memory allocated by this function must be released by free_pages_exact().
  3085. */
  3086. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  3087. {
  3088. unsigned int order = get_order(size);
  3089. unsigned long addr;
  3090. addr = __get_free_pages(gfp_mask, order);
  3091. return make_alloc_exact(addr, order, size);
  3092. }
  3093. EXPORT_SYMBOL(alloc_pages_exact);
  3094. /**
  3095. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  3096. * pages on a node.
  3097. * @nid: the preferred node ID where memory should be allocated
  3098. * @size: the number of bytes to allocate
  3099. * @gfp_mask: GFP flags for the allocation
  3100. *
  3101. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  3102. * back.
  3103. */
  3104. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  3105. {
  3106. unsigned int order = get_order(size);
  3107. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  3108. if (!p)
  3109. return NULL;
  3110. return make_alloc_exact((unsigned long)page_address(p), order, size);
  3111. }
  3112. /**
  3113. * free_pages_exact - release memory allocated via alloc_pages_exact()
  3114. * @virt: the value returned by alloc_pages_exact.
  3115. * @size: size of allocation, same value as passed to alloc_pages_exact().
  3116. *
  3117. * Release the memory allocated by a previous call to alloc_pages_exact.
  3118. */
  3119. void free_pages_exact(void *virt, size_t size)
  3120. {
  3121. unsigned long addr = (unsigned long)virt;
  3122. unsigned long end = addr + PAGE_ALIGN(size);
  3123. while (addr < end) {
  3124. free_page(addr);
  3125. addr += PAGE_SIZE;
  3126. }
  3127. }
  3128. EXPORT_SYMBOL(free_pages_exact);
  3129. /**
  3130. * nr_free_zone_pages - count number of pages beyond high watermark
  3131. * @offset: The zone index of the highest zone
  3132. *
  3133. * nr_free_zone_pages() counts the number of counts pages which are beyond the
  3134. * high watermark within all zones at or below a given zone index. For each
  3135. * zone, the number of pages is calculated as:
  3136. * managed_pages - high_pages
  3137. */
  3138. static unsigned long nr_free_zone_pages(int offset)
  3139. {
  3140. struct zoneref *z;
  3141. struct zone *zone;
  3142. /* Just pick one node, since fallback list is circular */
  3143. unsigned long sum = 0;
  3144. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  3145. for_each_zone_zonelist(zone, z, zonelist, offset) {
  3146. unsigned long size = zone->managed_pages;
  3147. unsigned long high = high_wmark_pages(zone);
  3148. if (size > high)
  3149. sum += size - high;
  3150. }
  3151. return sum;
  3152. }
  3153. /**
  3154. * nr_free_buffer_pages - count number of pages beyond high watermark
  3155. *
  3156. * nr_free_buffer_pages() counts the number of pages which are beyond the high
  3157. * watermark within ZONE_DMA and ZONE_NORMAL.
  3158. */
  3159. unsigned long nr_free_buffer_pages(void)
  3160. {
  3161. return nr_free_zone_pages(gfp_zone(GFP_USER));
  3162. }
  3163. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  3164. /**
  3165. * nr_free_pagecache_pages - count number of pages beyond high watermark
  3166. *
  3167. * nr_free_pagecache_pages() counts the number of pages which are beyond the
  3168. * high watermark within all zones.
  3169. */
  3170. unsigned long nr_free_pagecache_pages(void)
  3171. {
  3172. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  3173. }
  3174. static inline void show_node(struct zone *zone)
  3175. {
  3176. if (IS_ENABLED(CONFIG_NUMA))
  3177. printk("Node %d ", zone_to_nid(zone));
  3178. }
  3179. long si_mem_available(void)
  3180. {
  3181. long available;
  3182. unsigned long pagecache;
  3183. unsigned long wmark_low = 0;
  3184. unsigned long pages[NR_LRU_LISTS];
  3185. struct zone *zone;
  3186. int lru;
  3187. for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
  3188. pages[lru] = global_page_state(NR_LRU_BASE + lru);
  3189. for_each_zone(zone)
  3190. wmark_low += zone->watermark[WMARK_LOW];
  3191. /*
  3192. * Estimate the amount of memory available for userspace allocations,
  3193. * without causing swapping.
  3194. */
  3195. available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
  3196. /*
  3197. * Not all the page cache can be freed, otherwise the system will
  3198. * start swapping. Assume at least half of the page cache, or the
  3199. * low watermark worth of cache, needs to stay.
  3200. */
  3201. pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
  3202. pagecache -= min(pagecache / 2, wmark_low);
  3203. available += pagecache;
  3204. /*
  3205. * Part of the reclaimable slab consists of items that are in use,
  3206. * and cannot be freed. Cap this estimate at the low watermark.
  3207. */
  3208. available += global_page_state(NR_SLAB_RECLAIMABLE) -
  3209. min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
  3210. if (available < 0)
  3211. available = 0;
  3212. return available;
  3213. }
  3214. EXPORT_SYMBOL_GPL(si_mem_available);
  3215. void si_meminfo(struct sysinfo *val)
  3216. {
  3217. val->totalram = totalram_pages;
  3218. val->sharedram = global_page_state(NR_SHMEM);
  3219. val->freeram = global_page_state(NR_FREE_PAGES);
  3220. val->bufferram = nr_blockdev_pages();
  3221. val->totalhigh = totalhigh_pages;
  3222. val->freehigh = nr_free_highpages();
  3223. val->mem_unit = PAGE_SIZE;
  3224. }
  3225. EXPORT_SYMBOL(si_meminfo);
  3226. #ifdef CONFIG_NUMA
  3227. void si_meminfo_node(struct sysinfo *val, int nid)
  3228. {
  3229. int zone_type; /* needs to be signed */
  3230. unsigned long managed_pages = 0;
  3231. pg_data_t *pgdat = NODE_DATA(nid);
  3232. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
  3233. managed_pages += pgdat->node_zones[zone_type].managed_pages;
  3234. val->totalram = managed_pages;
  3235. val->sharedram = node_page_state(nid, NR_SHMEM);
  3236. val->freeram = node_page_state(nid, NR_FREE_PAGES);
  3237. #ifdef CONFIG_HIGHMEM
  3238. val->totalhigh = pgdat->node_zones[ZONE_HIGHMEM].managed_pages;
  3239. val->freehigh = zone_page_state(&pgdat->node_zones[ZONE_HIGHMEM],
  3240. NR_FREE_PAGES);
  3241. #else
  3242. val->totalhigh = 0;
  3243. val->freehigh = 0;
  3244. #endif
  3245. val->mem_unit = PAGE_SIZE;
  3246. }
  3247. #endif
  3248. /*
  3249. * Determine whether the node should be displayed or not, depending on whether
  3250. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  3251. */
  3252. bool skip_free_areas_node(unsigned int flags, int nid)
  3253. {
  3254. bool ret = false;
  3255. unsigned int cpuset_mems_cookie;
  3256. if (!(flags & SHOW_MEM_FILTER_NODES))
  3257. goto out;
  3258. do {
  3259. cpuset_mems_cookie = read_mems_allowed_begin();
  3260. ret = !node_isset(nid, cpuset_current_mems_allowed);
  3261. } while (read_mems_allowed_retry(cpuset_mems_cookie));
  3262. out:
  3263. return ret;
  3264. }
  3265. #define K(x) ((x) << (PAGE_SHIFT-10))
  3266. static void show_migration_types(unsigned char type)
  3267. {
  3268. static const char types[MIGRATE_TYPES] = {
  3269. [MIGRATE_UNMOVABLE] = 'U',
  3270. [MIGRATE_MOVABLE] = 'M',
  3271. [MIGRATE_RECLAIMABLE] = 'E',
  3272. [MIGRATE_HIGHATOMIC] = 'H',
  3273. #ifdef CONFIG_CMA
  3274. [MIGRATE_CMA] = 'C',
  3275. #endif
  3276. #ifdef CONFIG_MEMORY_ISOLATION
  3277. [MIGRATE_ISOLATE] = 'I',
  3278. #endif
  3279. };
  3280. char tmp[MIGRATE_TYPES + 1];
  3281. char *p = tmp;
  3282. int i;
  3283. for (i = 0; i < MIGRATE_TYPES; i++) {
  3284. if (type & (1 << i))
  3285. *p++ = types[i];
  3286. }
  3287. *p = '\0';
  3288. printk("(%s) ", tmp);
  3289. }
  3290. /*
  3291. * Show free area list (used inside shift_scroll-lock stuff)
  3292. * We also calculate the percentage fragmentation. We do this by counting the
  3293. * memory on each free list with the exception of the first item on the list.
  3294. *
  3295. * Bits in @filter:
  3296. * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
  3297. * cpuset.
  3298. */
  3299. void show_free_areas(unsigned int filter)
  3300. {
  3301. unsigned long free_pcp = 0;
  3302. int cpu;
  3303. struct zone *zone;
  3304. for_each_populated_zone(zone) {
  3305. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  3306. continue;
  3307. for_each_online_cpu(cpu)
  3308. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  3309. }
  3310. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  3311. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  3312. " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
  3313. " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  3314. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
  3315. " free:%lu free_pcp:%lu free_cma:%lu\n",
  3316. global_page_state(NR_ACTIVE_ANON),
  3317. global_page_state(NR_INACTIVE_ANON),
  3318. global_page_state(NR_ISOLATED_ANON),
  3319. global_page_state(NR_ACTIVE_FILE),
  3320. global_page_state(NR_INACTIVE_FILE),
  3321. global_page_state(NR_ISOLATED_FILE),
  3322. global_page_state(NR_UNEVICTABLE),
  3323. global_page_state(NR_FILE_DIRTY),
  3324. global_page_state(NR_WRITEBACK),
  3325. global_page_state(NR_UNSTABLE_NFS),
  3326. global_page_state(NR_SLAB_RECLAIMABLE),
  3327. global_page_state(NR_SLAB_UNRECLAIMABLE),
  3328. global_page_state(NR_FILE_MAPPED),
  3329. global_page_state(NR_SHMEM),
  3330. global_page_state(NR_PAGETABLE),
  3331. global_page_state(NR_BOUNCE),
  3332. global_page_state(NR_FREE_PAGES),
  3333. free_pcp,
  3334. global_page_state(NR_FREE_CMA_PAGES));
  3335. for_each_populated_zone(zone) {
  3336. int i;
  3337. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  3338. continue;
  3339. free_pcp = 0;
  3340. for_each_online_cpu(cpu)
  3341. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  3342. show_node(zone);
  3343. printk("%s"
  3344. " free:%lukB"
  3345. " min:%lukB"
  3346. " low:%lukB"
  3347. " high:%lukB"
  3348. " active_anon:%lukB"
  3349. " inactive_anon:%lukB"
  3350. " active_file:%lukB"
  3351. " inactive_file:%lukB"
  3352. " unevictable:%lukB"
  3353. " isolated(anon):%lukB"
  3354. " isolated(file):%lukB"
  3355. " present:%lukB"
  3356. " managed:%lukB"
  3357. " mlocked:%lukB"
  3358. " dirty:%lukB"
  3359. " writeback:%lukB"
  3360. " mapped:%lukB"
  3361. " shmem:%lukB"
  3362. " slab_reclaimable:%lukB"
  3363. " slab_unreclaimable:%lukB"
  3364. " kernel_stack:%lukB"
  3365. " pagetables:%lukB"
  3366. " unstable:%lukB"
  3367. " bounce:%lukB"
  3368. " free_pcp:%lukB"
  3369. " local_pcp:%ukB"
  3370. " free_cma:%lukB"
  3371. " writeback_tmp:%lukB"
  3372. " pages_scanned:%lu"
  3373. " all_unreclaimable? %s"
  3374. "\n",
  3375. zone->name,
  3376. K(zone_page_state(zone, NR_FREE_PAGES)),
  3377. K(min_wmark_pages(zone)),
  3378. K(low_wmark_pages(zone)),
  3379. K(high_wmark_pages(zone)),
  3380. K(zone_page_state(zone, NR_ACTIVE_ANON)),
  3381. K(zone_page_state(zone, NR_INACTIVE_ANON)),
  3382. K(zone_page_state(zone, NR_ACTIVE_FILE)),
  3383. K(zone_page_state(zone, NR_INACTIVE_FILE)),
  3384. K(zone_page_state(zone, NR_UNEVICTABLE)),
  3385. K(zone_page_state(zone, NR_ISOLATED_ANON)),
  3386. K(zone_page_state(zone, NR_ISOLATED_FILE)),
  3387. K(zone->present_pages),
  3388. K(zone->managed_pages),
  3389. K(zone_page_state(zone, NR_MLOCK)),
  3390. K(zone_page_state(zone, NR_FILE_DIRTY)),
  3391. K(zone_page_state(zone, NR_WRITEBACK)),
  3392. K(zone_page_state(zone, NR_FILE_MAPPED)),
  3393. K(zone_page_state(zone, NR_SHMEM)),
  3394. K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
  3395. K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
  3396. zone_page_state(zone, NR_KERNEL_STACK) *
  3397. THREAD_SIZE / 1024,
  3398. K(zone_page_state(zone, NR_PAGETABLE)),
  3399. K(zone_page_state(zone, NR_UNSTABLE_NFS)),
  3400. K(zone_page_state(zone, NR_BOUNCE)),
  3401. K(free_pcp),
  3402. K(this_cpu_read(zone->pageset->pcp.count)),
  3403. K(zone_page_state(zone, NR_FREE_CMA_PAGES)),
  3404. K(zone_page_state(zone, NR_WRITEBACK_TEMP)),
  3405. K(zone_page_state(zone, NR_PAGES_SCANNED)),
  3406. (!zone_reclaimable(zone) ? "yes" : "no")
  3407. );
  3408. printk("lowmem_reserve[]:");
  3409. for (i = 0; i < MAX_NR_ZONES; i++)
  3410. printk(" %ld", zone->lowmem_reserve[i]);
  3411. printk("\n");
  3412. }
  3413. for_each_populated_zone(zone) {
  3414. unsigned int order;
  3415. unsigned long nr[MAX_ORDER], flags, total = 0;
  3416. unsigned char types[MAX_ORDER];
  3417. if (skip_free_areas_node(filter, zone_to_nid(zone)))
  3418. continue;
  3419. show_node(zone);
  3420. printk("%s: ", zone->name);
  3421. spin_lock_irqsave(&zone->lock, flags);
  3422. for (order = 0; order < MAX_ORDER; order++) {
  3423. struct free_area *area = &zone->free_area[order];
  3424. int type;
  3425. nr[order] = area->nr_free;
  3426. total += nr[order] << order;
  3427. types[order] = 0;
  3428. for (type = 0; type < MIGRATE_TYPES; type++) {
  3429. if (!list_empty(&area->free_list[type]))
  3430. types[order] |= 1 << type;
  3431. }
  3432. }
  3433. spin_unlock_irqrestore(&zone->lock, flags);
  3434. for (order = 0; order < MAX_ORDER; order++) {
  3435. printk("%lu*%lukB ", nr[order], K(1UL) << order);
  3436. if (nr[order])
  3437. show_migration_types(types[order]);
  3438. }
  3439. printk("= %lukB\n", K(total));
  3440. }
  3441. hugetlb_show_meminfo();
  3442. printk("%ld total pagecache pages\n", global_page_state(NR_FILE_PAGES));
  3443. show_swap_cache_info();
  3444. }
  3445. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  3446. {
  3447. zoneref->zone = zone;
  3448. zoneref->zone_idx = zone_idx(zone);
  3449. }
  3450. /*
  3451. * Builds allocation fallback zone lists.
  3452. *
  3453. * Add all populated zones of a node to the zonelist.
  3454. */
  3455. static int build_zonelists_node(pg_data_t *pgdat, struct zonelist *zonelist,
  3456. int nr_zones)
  3457. {
  3458. struct zone *zone;
  3459. enum zone_type zone_type = MAX_NR_ZONES;
  3460. do {
  3461. zone_type--;
  3462. zone = pgdat->node_zones + zone_type;
  3463. if (populated_zone(zone)) {
  3464. zoneref_set_zone(zone,
  3465. &zonelist->_zonerefs[nr_zones++]);
  3466. check_highest_zone(zone_type);
  3467. }
  3468. } while (zone_type);
  3469. return nr_zones;
  3470. }
  3471. /*
  3472. * zonelist_order:
  3473. * 0 = automatic detection of better ordering.
  3474. * 1 = order by ([node] distance, -zonetype)
  3475. * 2 = order by (-zonetype, [node] distance)
  3476. *
  3477. * If not NUMA, ZONELIST_ORDER_ZONE and ZONELIST_ORDER_NODE will create
  3478. * the same zonelist. So only NUMA can configure this param.
  3479. */
  3480. #define ZONELIST_ORDER_DEFAULT 0
  3481. #define ZONELIST_ORDER_NODE 1
  3482. #define ZONELIST_ORDER_ZONE 2
  3483. /* zonelist order in the kernel.
  3484. * set_zonelist_order() will set this to NODE or ZONE.
  3485. */
  3486. static int current_zonelist_order = ZONELIST_ORDER_DEFAULT;
  3487. static char zonelist_order_name[3][8] = {"Default", "Node", "Zone"};
  3488. #ifdef CONFIG_NUMA
  3489. /* The value user specified ....changed by config */
  3490. static int user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  3491. /* string for sysctl */
  3492. #define NUMA_ZONELIST_ORDER_LEN 16
  3493. char numa_zonelist_order[16] = "default";
  3494. /*
  3495. * interface for configure zonelist ordering.
  3496. * command line option "numa_zonelist_order"
  3497. * = "[dD]efault - default, automatic configuration.
  3498. * = "[nN]ode - order by node locality, then by zone within node
  3499. * = "[zZ]one - order by zone, then by locality within zone
  3500. */
  3501. static int __parse_numa_zonelist_order(char *s)
  3502. {
  3503. if (*s == 'd' || *s == 'D') {
  3504. user_zonelist_order = ZONELIST_ORDER_DEFAULT;
  3505. } else if (*s == 'n' || *s == 'N') {
  3506. user_zonelist_order = ZONELIST_ORDER_NODE;
  3507. } else if (*s == 'z' || *s == 'Z') {
  3508. user_zonelist_order = ZONELIST_ORDER_ZONE;
  3509. } else {
  3510. pr_warn("Ignoring invalid numa_zonelist_order value: %s\n", s);
  3511. return -EINVAL;
  3512. }
  3513. return 0;
  3514. }
  3515. static __init int setup_numa_zonelist_order(char *s)
  3516. {
  3517. int ret;
  3518. if (!s)
  3519. return 0;
  3520. ret = __parse_numa_zonelist_order(s);
  3521. if (ret == 0)
  3522. strlcpy(numa_zonelist_order, s, NUMA_ZONELIST_ORDER_LEN);
  3523. return ret;
  3524. }
  3525. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  3526. /*
  3527. * sysctl handler for numa_zonelist_order
  3528. */
  3529. int numa_zonelist_order_handler(struct ctl_table *table, int write,
  3530. void __user *buffer, size_t *length,
  3531. loff_t *ppos)
  3532. {
  3533. char saved_string[NUMA_ZONELIST_ORDER_LEN];
  3534. int ret;
  3535. static DEFINE_MUTEX(zl_order_mutex);
  3536. mutex_lock(&zl_order_mutex);
  3537. if (write) {
  3538. if (strlen((char *)table->data) >= NUMA_ZONELIST_ORDER_LEN) {
  3539. ret = -EINVAL;
  3540. goto out;
  3541. }
  3542. strcpy(saved_string, (char *)table->data);
  3543. }
  3544. ret = proc_dostring(table, write, buffer, length, ppos);
  3545. if (ret)
  3546. goto out;
  3547. if (write) {
  3548. int oldval = user_zonelist_order;
  3549. ret = __parse_numa_zonelist_order((char *)table->data);
  3550. if (ret) {
  3551. /*
  3552. * bogus value. restore saved string
  3553. */
  3554. strncpy((char *)table->data, saved_string,
  3555. NUMA_ZONELIST_ORDER_LEN);
  3556. user_zonelist_order = oldval;
  3557. } else if (oldval != user_zonelist_order) {
  3558. mutex_lock(&zonelists_mutex);
  3559. build_all_zonelists(NULL, NULL);
  3560. mutex_unlock(&zonelists_mutex);
  3561. }
  3562. }
  3563. out:
  3564. mutex_unlock(&zl_order_mutex);
  3565. return ret;
  3566. }
  3567. #define MAX_NODE_LOAD (nr_online_nodes)
  3568. static int node_load[MAX_NUMNODES];
  3569. /**
  3570. * find_next_best_node - find the next node that should appear in a given node's fallback list
  3571. * @node: node whose fallback list we're appending
  3572. * @used_node_mask: nodemask_t of already used nodes
  3573. *
  3574. * We use a number of factors to determine which is the next node that should
  3575. * appear on a given node's fallback list. The node should not have appeared
  3576. * already in @node's fallback list, and it should be the next closest node
  3577. * according to the distance array (which contains arbitrary distance values
  3578. * from each node to each node in the system), and should also prefer nodes
  3579. * with no CPUs, since presumably they'll have very little allocation pressure
  3580. * on them otherwise.
  3581. * It returns -1 if no node is found.
  3582. */
  3583. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  3584. {
  3585. int n, val;
  3586. int min_val = INT_MAX;
  3587. int best_node = NUMA_NO_NODE;
  3588. const struct cpumask *tmp = cpumask_of_node(0);
  3589. /* Use the local node if we haven't already */
  3590. if (!node_isset(node, *used_node_mask)) {
  3591. node_set(node, *used_node_mask);
  3592. return node;
  3593. }
  3594. for_each_node_state(n, N_MEMORY) {
  3595. /* Don't want a node to appear more than once */
  3596. if (node_isset(n, *used_node_mask))
  3597. continue;
  3598. /* Use the distance array to find the distance */
  3599. val = node_distance(node, n);
  3600. /* Penalize nodes under us ("prefer the next node") */
  3601. val += (n < node);
  3602. /* Give preference to headless and unused nodes */
  3603. tmp = cpumask_of_node(n);
  3604. if (!cpumask_empty(tmp))
  3605. val += PENALTY_FOR_NODE_WITH_CPUS;
  3606. /* Slight preference for less loaded node */
  3607. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  3608. val += node_load[n];
  3609. if (val < min_val) {
  3610. min_val = val;
  3611. best_node = n;
  3612. }
  3613. }
  3614. if (best_node >= 0)
  3615. node_set(best_node, *used_node_mask);
  3616. return best_node;
  3617. }
  3618. /*
  3619. * Build zonelists ordered by node and zones within node.
  3620. * This results in maximum locality--normal zone overflows into local
  3621. * DMA zone, if any--but risks exhausting DMA zone.
  3622. */
  3623. static void build_zonelists_in_node_order(pg_data_t *pgdat, int node)
  3624. {
  3625. int j;
  3626. struct zonelist *zonelist;
  3627. zonelist = &pgdat->node_zonelists[0];
  3628. for (j = 0; zonelist->_zonerefs[j].zone != NULL; j++)
  3629. ;
  3630. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3631. zonelist->_zonerefs[j].zone = NULL;
  3632. zonelist->_zonerefs[j].zone_idx = 0;
  3633. }
  3634. /*
  3635. * Build gfp_thisnode zonelists
  3636. */
  3637. static void build_thisnode_zonelists(pg_data_t *pgdat)
  3638. {
  3639. int j;
  3640. struct zonelist *zonelist;
  3641. zonelist = &pgdat->node_zonelists[1];
  3642. j = build_zonelists_node(pgdat, zonelist, 0);
  3643. zonelist->_zonerefs[j].zone = NULL;
  3644. zonelist->_zonerefs[j].zone_idx = 0;
  3645. }
  3646. /*
  3647. * Build zonelists ordered by zone and nodes within zones.
  3648. * This results in conserving DMA zone[s] until all Normal memory is
  3649. * exhausted, but results in overflowing to remote node while memory
  3650. * may still exist in local DMA zone.
  3651. */
  3652. static int node_order[MAX_NUMNODES];
  3653. static void build_zonelists_in_zone_order(pg_data_t *pgdat, int nr_nodes)
  3654. {
  3655. int pos, j, node;
  3656. int zone_type; /* needs to be signed */
  3657. struct zone *z;
  3658. struct zonelist *zonelist;
  3659. zonelist = &pgdat->node_zonelists[0];
  3660. pos = 0;
  3661. for (zone_type = MAX_NR_ZONES - 1; zone_type >= 0; zone_type--) {
  3662. for (j = 0; j < nr_nodes; j++) {
  3663. node = node_order[j];
  3664. z = &NODE_DATA(node)->node_zones[zone_type];
  3665. if (populated_zone(z)) {
  3666. zoneref_set_zone(z,
  3667. &zonelist->_zonerefs[pos++]);
  3668. check_highest_zone(zone_type);
  3669. }
  3670. }
  3671. }
  3672. zonelist->_zonerefs[pos].zone = NULL;
  3673. zonelist->_zonerefs[pos].zone_idx = 0;
  3674. }
  3675. #if defined(CONFIG_64BIT)
  3676. /*
  3677. * Devices that require DMA32/DMA are relatively rare and do not justify a
  3678. * penalty to every machine in case the specialised case applies. Default
  3679. * to Node-ordering on 64-bit NUMA machines
  3680. */
  3681. static int default_zonelist_order(void)
  3682. {
  3683. return ZONELIST_ORDER_NODE;
  3684. }
  3685. #else
  3686. /*
  3687. * On 32-bit, the Normal zone needs to be preserved for allocations accessible
  3688. * by the kernel. If processes running on node 0 deplete the low memory zone
  3689. * then reclaim will occur more frequency increasing stalls and potentially
  3690. * be easier to OOM if a large percentage of the zone is under writeback or
  3691. * dirty. The problem is significantly worse if CONFIG_HIGHPTE is not set.
  3692. * Hence, default to zone ordering on 32-bit.
  3693. */
  3694. static int default_zonelist_order(void)
  3695. {
  3696. return ZONELIST_ORDER_ZONE;
  3697. }
  3698. #endif /* CONFIG_64BIT */
  3699. static void set_zonelist_order(void)
  3700. {
  3701. if (user_zonelist_order == ZONELIST_ORDER_DEFAULT)
  3702. current_zonelist_order = default_zonelist_order();
  3703. else
  3704. current_zonelist_order = user_zonelist_order;
  3705. }
  3706. static void build_zonelists(pg_data_t *pgdat)
  3707. {
  3708. int i, node, load;
  3709. nodemask_t used_mask;
  3710. int local_node, prev_node;
  3711. struct zonelist *zonelist;
  3712. unsigned int order = current_zonelist_order;
  3713. /* initialize zonelists */
  3714. for (i = 0; i < MAX_ZONELISTS; i++) {
  3715. zonelist = pgdat->node_zonelists + i;
  3716. zonelist->_zonerefs[0].zone = NULL;
  3717. zonelist->_zonerefs[0].zone_idx = 0;
  3718. }
  3719. /* NUMA-aware ordering of nodes */
  3720. local_node = pgdat->node_id;
  3721. load = nr_online_nodes;
  3722. prev_node = local_node;
  3723. nodes_clear(used_mask);
  3724. memset(node_order, 0, sizeof(node_order));
  3725. i = 0;
  3726. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  3727. /*
  3728. * We don't want to pressure a particular node.
  3729. * So adding penalty to the first node in same
  3730. * distance group to make it round-robin.
  3731. */
  3732. if (node_distance(local_node, node) !=
  3733. node_distance(local_node, prev_node))
  3734. node_load[node] = load;
  3735. prev_node = node;
  3736. load--;
  3737. if (order == ZONELIST_ORDER_NODE)
  3738. build_zonelists_in_node_order(pgdat, node);
  3739. else
  3740. node_order[i++] = node; /* remember order */
  3741. }
  3742. if (order == ZONELIST_ORDER_ZONE) {
  3743. /* calculate node order -- i.e., DMA last! */
  3744. build_zonelists_in_zone_order(pgdat, i);
  3745. }
  3746. build_thisnode_zonelists(pgdat);
  3747. }
  3748. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3749. /*
  3750. * Return node id of node used for "local" allocations.
  3751. * I.e., first node id of first zone in arg node's generic zonelist.
  3752. * Used for initializing percpu 'numa_mem', which is used primarily
  3753. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  3754. */
  3755. int local_memory_node(int node)
  3756. {
  3757. struct zone *zone;
  3758. (void)first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  3759. gfp_zone(GFP_KERNEL),
  3760. NULL,
  3761. &zone);
  3762. return zone->node;
  3763. }
  3764. #endif
  3765. #else /* CONFIG_NUMA */
  3766. static void set_zonelist_order(void)
  3767. {
  3768. current_zonelist_order = ZONELIST_ORDER_ZONE;
  3769. }
  3770. static void build_zonelists(pg_data_t *pgdat)
  3771. {
  3772. int node, local_node;
  3773. enum zone_type j;
  3774. struct zonelist *zonelist;
  3775. local_node = pgdat->node_id;
  3776. zonelist = &pgdat->node_zonelists[0];
  3777. j = build_zonelists_node(pgdat, zonelist, 0);
  3778. /*
  3779. * Now we build the zonelist so that it contains the zones
  3780. * of all the other nodes.
  3781. * We don't want to pressure a particular node, so when
  3782. * building the zones for node N, we make sure that the
  3783. * zones coming right after the local ones are those from
  3784. * node N+1 (modulo N)
  3785. */
  3786. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  3787. if (!node_online(node))
  3788. continue;
  3789. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3790. }
  3791. for (node = 0; node < local_node; node++) {
  3792. if (!node_online(node))
  3793. continue;
  3794. j = build_zonelists_node(NODE_DATA(node), zonelist, j);
  3795. }
  3796. zonelist->_zonerefs[j].zone = NULL;
  3797. zonelist->_zonerefs[j].zone_idx = 0;
  3798. }
  3799. #endif /* CONFIG_NUMA */
  3800. /*
  3801. * Boot pageset table. One per cpu which is going to be used for all
  3802. * zones and all nodes. The parameters will be set in such a way
  3803. * that an item put on a list will immediately be handed over to
  3804. * the buddy list. This is safe since pageset manipulation is done
  3805. * with interrupts disabled.
  3806. *
  3807. * The boot_pagesets must be kept even after bootup is complete for
  3808. * unused processors and/or zones. They do play a role for bootstrapping
  3809. * hotplugged processors.
  3810. *
  3811. * zoneinfo_show() and maybe other functions do
  3812. * not check if the processor is online before following the pageset pointer.
  3813. * Other parts of the kernel may not check if the zone is available.
  3814. */
  3815. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  3816. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  3817. static void setup_zone_pageset(struct zone *zone);
  3818. /*
  3819. * Global mutex to protect against size modification of zonelists
  3820. * as well as to serialize pageset setup for the new populated zone.
  3821. */
  3822. DEFINE_MUTEX(zonelists_mutex);
  3823. /* return values int ....just for stop_machine() */
  3824. static int __build_all_zonelists(void *data)
  3825. {
  3826. int nid;
  3827. int cpu;
  3828. pg_data_t *self = data;
  3829. #ifdef CONFIG_NUMA
  3830. memset(node_load, 0, sizeof(node_load));
  3831. #endif
  3832. if (self && !node_online(self->node_id)) {
  3833. build_zonelists(self);
  3834. }
  3835. for_each_online_node(nid) {
  3836. pg_data_t *pgdat = NODE_DATA(nid);
  3837. build_zonelists(pgdat);
  3838. }
  3839. /*
  3840. * Initialize the boot_pagesets that are going to be used
  3841. * for bootstrapping processors. The real pagesets for
  3842. * each zone will be allocated later when the per cpu
  3843. * allocator is available.
  3844. *
  3845. * boot_pagesets are used also for bootstrapping offline
  3846. * cpus if the system is already booted because the pagesets
  3847. * are needed to initialize allocators on a specific cpu too.
  3848. * F.e. the percpu allocator needs the page allocator which
  3849. * needs the percpu allocator in order to allocate its pagesets
  3850. * (a chicken-egg dilemma).
  3851. */
  3852. for_each_possible_cpu(cpu) {
  3853. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  3854. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  3855. /*
  3856. * We now know the "local memory node" for each node--
  3857. * i.e., the node of the first zone in the generic zonelist.
  3858. * Set up numa_mem percpu variable for on-line cpus. During
  3859. * boot, only the boot cpu should be on-line; we'll init the
  3860. * secondary cpus' numa_mem as they come on-line. During
  3861. * node/memory hotplug, we'll fixup all on-line cpus.
  3862. */
  3863. if (cpu_online(cpu))
  3864. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  3865. #endif
  3866. }
  3867. return 0;
  3868. }
  3869. static noinline void __init
  3870. build_all_zonelists_init(void)
  3871. {
  3872. __build_all_zonelists(NULL);
  3873. mminit_verify_zonelist();
  3874. cpuset_init_current_mems_allowed();
  3875. }
  3876. /*
  3877. * Called with zonelists_mutex held always
  3878. * unless system_state == SYSTEM_BOOTING.
  3879. *
  3880. * __ref due to (1) call of __meminit annotated setup_zone_pageset
  3881. * [we're only called with non-NULL zone through __meminit paths] and
  3882. * (2) call of __init annotated helper build_all_zonelists_init
  3883. * [protected by SYSTEM_BOOTING].
  3884. */
  3885. void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone)
  3886. {
  3887. set_zonelist_order();
  3888. if (system_state == SYSTEM_BOOTING) {
  3889. build_all_zonelists_init();
  3890. } else {
  3891. #ifdef CONFIG_MEMORY_HOTPLUG
  3892. if (zone)
  3893. setup_zone_pageset(zone);
  3894. #endif
  3895. /* we have to stop all cpus to guarantee there is no user
  3896. of zonelist */
  3897. stop_machine(__build_all_zonelists, pgdat, NULL);
  3898. /* cpuset refresh routine should be here */
  3899. }
  3900. vm_total_pages = nr_free_pagecache_pages();
  3901. /*
  3902. * Disable grouping by mobility if the number of pages in the
  3903. * system is too low to allow the mechanism to work. It would be
  3904. * more accurate, but expensive to check per-zone. This check is
  3905. * made on memory-hotadd so a system can start with mobility
  3906. * disabled and enable it later
  3907. */
  3908. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  3909. page_group_by_mobility_disabled = 1;
  3910. else
  3911. page_group_by_mobility_disabled = 0;
  3912. pr_info("Built %i zonelists in %s order, mobility grouping %s. Total pages: %ld\n",
  3913. nr_online_nodes,
  3914. zonelist_order_name[current_zonelist_order],
  3915. page_group_by_mobility_disabled ? "off" : "on",
  3916. vm_total_pages);
  3917. #ifdef CONFIG_NUMA
  3918. pr_info("Policy zone: %s\n", zone_names[policy_zone]);
  3919. #endif
  3920. }
  3921. /*
  3922. * Helper functions to size the waitqueue hash table.
  3923. * Essentially these want to choose hash table sizes sufficiently
  3924. * large so that collisions trying to wait on pages are rare.
  3925. * But in fact, the number of active page waitqueues on typical
  3926. * systems is ridiculously low, less than 200. So this is even
  3927. * conservative, even though it seems large.
  3928. *
  3929. * The constant PAGES_PER_WAITQUEUE specifies the ratio of pages to
  3930. * waitqueues, i.e. the size of the waitq table given the number of pages.
  3931. */
  3932. #define PAGES_PER_WAITQUEUE 256
  3933. #ifndef CONFIG_MEMORY_HOTPLUG
  3934. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  3935. {
  3936. unsigned long size = 1;
  3937. pages /= PAGES_PER_WAITQUEUE;
  3938. while (size < pages)
  3939. size <<= 1;
  3940. /*
  3941. * Once we have dozens or even hundreds of threads sleeping
  3942. * on IO we've got bigger problems than wait queue collision.
  3943. * Limit the size of the wait table to a reasonable size.
  3944. */
  3945. size = min(size, 4096UL);
  3946. return max(size, 4UL);
  3947. }
  3948. #else
  3949. /*
  3950. * A zone's size might be changed by hot-add, so it is not possible to determine
  3951. * a suitable size for its wait_table. So we use the maximum size now.
  3952. *
  3953. * The max wait table size = 4096 x sizeof(wait_queue_head_t). ie:
  3954. *
  3955. * i386 (preemption config) : 4096 x 16 = 64Kbyte.
  3956. * ia64, x86-64 (no preemption): 4096 x 20 = 80Kbyte.
  3957. * ia64, x86-64 (preemption) : 4096 x 24 = 96Kbyte.
  3958. *
  3959. * The maximum entries are prepared when a zone's memory is (512K + 256) pages
  3960. * or more by the traditional way. (See above). It equals:
  3961. *
  3962. * i386, x86-64, powerpc(4K page size) : = ( 2G + 1M)byte.
  3963. * ia64(16K page size) : = ( 8G + 4M)byte.
  3964. * powerpc (64K page size) : = (32G +16M)byte.
  3965. */
  3966. static inline unsigned long wait_table_hash_nr_entries(unsigned long pages)
  3967. {
  3968. return 4096UL;
  3969. }
  3970. #endif
  3971. /*
  3972. * This is an integer logarithm so that shifts can be used later
  3973. * to extract the more random high bits from the multiplicative
  3974. * hash function before the remainder is taken.
  3975. */
  3976. static inline unsigned long wait_table_bits(unsigned long size)
  3977. {
  3978. return ffz(~size);
  3979. }
  3980. /*
  3981. * Initially all pages are reserved - free ones are freed
  3982. * up by free_all_bootmem() once the early boot process is
  3983. * done. Non-atomic initialization, single-pass.
  3984. */
  3985. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  3986. unsigned long start_pfn, enum memmap_context context)
  3987. {
  3988. struct vmem_altmap *altmap = to_vmem_altmap(__pfn_to_phys(start_pfn));
  3989. unsigned long end_pfn = start_pfn + size;
  3990. pg_data_t *pgdat = NODE_DATA(nid);
  3991. unsigned long pfn;
  3992. unsigned long nr_initialised = 0;
  3993. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  3994. struct memblock_region *r = NULL, *tmp;
  3995. #endif
  3996. if (highest_memmap_pfn < end_pfn - 1)
  3997. highest_memmap_pfn = end_pfn - 1;
  3998. /*
  3999. * Honor reservation requested by the driver for this ZONE_DEVICE
  4000. * memory
  4001. */
  4002. if (altmap && start_pfn == altmap->base_pfn)
  4003. start_pfn += altmap->reserve;
  4004. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  4005. /*
  4006. * There can be holes in boot-time mem_map[]s handed to this
  4007. * function. They do not exist on hotplugged memory.
  4008. */
  4009. if (context != MEMMAP_EARLY)
  4010. goto not_early;
  4011. if (!early_pfn_valid(pfn))
  4012. continue;
  4013. if (!early_pfn_in_nid(pfn, nid))
  4014. continue;
  4015. if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
  4016. break;
  4017. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4018. /*
  4019. * If not mirrored_kernelcore and ZONE_MOVABLE exists, range
  4020. * from zone_movable_pfn[nid] to end of each node should be
  4021. * ZONE_MOVABLE not ZONE_NORMAL. skip it.
  4022. */
  4023. if (!mirrored_kernelcore && zone_movable_pfn[nid])
  4024. if (zone == ZONE_NORMAL && pfn >= zone_movable_pfn[nid])
  4025. continue;
  4026. /*
  4027. * Check given memblock attribute by firmware which can affect
  4028. * kernel memory layout. If zone==ZONE_MOVABLE but memory is
  4029. * mirrored, it's an overlapped memmap init. skip it.
  4030. */
  4031. if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
  4032. if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
  4033. for_each_memblock(memory, tmp)
  4034. if (pfn < memblock_region_memory_end_pfn(tmp))
  4035. break;
  4036. r = tmp;
  4037. }
  4038. if (pfn >= memblock_region_memory_base_pfn(r) &&
  4039. memblock_is_mirror(r)) {
  4040. /* already initialized as NORMAL */
  4041. pfn = memblock_region_memory_end_pfn(r);
  4042. continue;
  4043. }
  4044. }
  4045. #endif
  4046. not_early:
  4047. /*
  4048. * Mark the block movable so that blocks are reserved for
  4049. * movable at startup. This will force kernel allocations
  4050. * to reserve their blocks rather than leaking throughout
  4051. * the address space during boot when many long-lived
  4052. * kernel allocations are made.
  4053. *
  4054. * bitmap is created for zone's valid pfn range. but memmap
  4055. * can be created for invalid pages (for alignment)
  4056. * check here not to call set_pageblock_migratetype() against
  4057. * pfn out of zone.
  4058. */
  4059. if (!(pfn & (pageblock_nr_pages - 1))) {
  4060. struct page *page = pfn_to_page(pfn);
  4061. __init_single_page(page, pfn, zone, nid);
  4062. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4063. } else {
  4064. __init_single_pfn(pfn, zone, nid);
  4065. }
  4066. }
  4067. }
  4068. static void __meminit zone_init_free_lists(struct zone *zone)
  4069. {
  4070. unsigned int order, t;
  4071. for_each_migratetype_order(order, t) {
  4072. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  4073. zone->free_area[order].nr_free = 0;
  4074. }
  4075. }
  4076. #ifndef __HAVE_ARCH_MEMMAP_INIT
  4077. #define memmap_init(size, nid, zone, start_pfn) \
  4078. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
  4079. #endif
  4080. static int zone_batchsize(struct zone *zone)
  4081. {
  4082. #ifdef CONFIG_MMU
  4083. int batch;
  4084. /*
  4085. * The per-cpu-pages pools are set to around 1000th of the
  4086. * size of the zone. But no more than 1/2 of a meg.
  4087. *
  4088. * OK, so we don't know how big the cache is. So guess.
  4089. */
  4090. batch = zone->managed_pages / 1024;
  4091. if (batch * PAGE_SIZE > 512 * 1024)
  4092. batch = (512 * 1024) / PAGE_SIZE;
  4093. batch /= 4; /* We effectively *= 4 below */
  4094. if (batch < 1)
  4095. batch = 1;
  4096. /*
  4097. * Clamp the batch to a 2^n - 1 value. Having a power
  4098. * of 2 value was found to be more likely to have
  4099. * suboptimal cache aliasing properties in some cases.
  4100. *
  4101. * For example if 2 tasks are alternately allocating
  4102. * batches of pages, one task can end up with a lot
  4103. * of pages of one half of the possible page colors
  4104. * and the other with pages of the other colors.
  4105. */
  4106. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  4107. return batch;
  4108. #else
  4109. /* The deferral and batching of frees should be suppressed under NOMMU
  4110. * conditions.
  4111. *
  4112. * The problem is that NOMMU needs to be able to allocate large chunks
  4113. * of contiguous memory as there's no hardware page translation to
  4114. * assemble apparent contiguous memory from discontiguous pages.
  4115. *
  4116. * Queueing large contiguous runs of pages for batching, however,
  4117. * causes the pages to actually be freed in smaller chunks. As there
  4118. * can be a significant delay between the individual batches being
  4119. * recycled, this leads to the once large chunks of space being
  4120. * fragmented and becoming unavailable for high-order allocations.
  4121. */
  4122. return 0;
  4123. #endif
  4124. }
  4125. /*
  4126. * pcp->high and pcp->batch values are related and dependent on one another:
  4127. * ->batch must never be higher then ->high.
  4128. * The following function updates them in a safe manner without read side
  4129. * locking.
  4130. *
  4131. * Any new users of pcp->batch and pcp->high should ensure they can cope with
  4132. * those fields changing asynchronously (acording the the above rule).
  4133. *
  4134. * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  4135. * outside of boot time (or some other assurance that no concurrent updaters
  4136. * exist).
  4137. */
  4138. static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
  4139. unsigned long batch)
  4140. {
  4141. /* start with a fail safe value for batch */
  4142. pcp->batch = 1;
  4143. smp_wmb();
  4144. /* Update high, then batch, in order */
  4145. pcp->high = high;
  4146. smp_wmb();
  4147. pcp->batch = batch;
  4148. }
  4149. /* a companion to pageset_set_high() */
  4150. static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
  4151. {
  4152. pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
  4153. }
  4154. static void pageset_init(struct per_cpu_pageset *p)
  4155. {
  4156. struct per_cpu_pages *pcp;
  4157. int migratetype;
  4158. memset(p, 0, sizeof(*p));
  4159. pcp = &p->pcp;
  4160. pcp->count = 0;
  4161. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  4162. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  4163. }
  4164. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  4165. {
  4166. pageset_init(p);
  4167. pageset_set_batch(p, batch);
  4168. }
  4169. /*
  4170. * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
  4171. * to the value high for the pageset p.
  4172. */
  4173. static void pageset_set_high(struct per_cpu_pageset *p,
  4174. unsigned long high)
  4175. {
  4176. unsigned long batch = max(1UL, high / 4);
  4177. if ((high / 4) > (PAGE_SHIFT * 8))
  4178. batch = PAGE_SHIFT * 8;
  4179. pageset_update(&p->pcp, high, batch);
  4180. }
  4181. static void pageset_set_high_and_batch(struct zone *zone,
  4182. struct per_cpu_pageset *pcp)
  4183. {
  4184. if (percpu_pagelist_fraction)
  4185. pageset_set_high(pcp,
  4186. (zone->managed_pages /
  4187. percpu_pagelist_fraction));
  4188. else
  4189. pageset_set_batch(pcp, zone_batchsize(zone));
  4190. }
  4191. static void __meminit zone_pageset_init(struct zone *zone, int cpu)
  4192. {
  4193. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  4194. pageset_init(pcp);
  4195. pageset_set_high_and_batch(zone, pcp);
  4196. }
  4197. static void __meminit setup_zone_pageset(struct zone *zone)
  4198. {
  4199. int cpu;
  4200. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  4201. for_each_possible_cpu(cpu)
  4202. zone_pageset_init(zone, cpu);
  4203. }
  4204. /*
  4205. * Allocate per cpu pagesets and initialize them.
  4206. * Before this call only boot pagesets were available.
  4207. */
  4208. void __init setup_per_cpu_pageset(void)
  4209. {
  4210. struct zone *zone;
  4211. for_each_populated_zone(zone)
  4212. setup_zone_pageset(zone);
  4213. }
  4214. static noinline __init_refok
  4215. int zone_wait_table_init(struct zone *zone, unsigned long zone_size_pages)
  4216. {
  4217. int i;
  4218. size_t alloc_size;
  4219. /*
  4220. * The per-page waitqueue mechanism uses hashed waitqueues
  4221. * per zone.
  4222. */
  4223. zone->wait_table_hash_nr_entries =
  4224. wait_table_hash_nr_entries(zone_size_pages);
  4225. zone->wait_table_bits =
  4226. wait_table_bits(zone->wait_table_hash_nr_entries);
  4227. alloc_size = zone->wait_table_hash_nr_entries
  4228. * sizeof(wait_queue_head_t);
  4229. if (!slab_is_available()) {
  4230. zone->wait_table = (wait_queue_head_t *)
  4231. memblock_virt_alloc_node_nopanic(
  4232. alloc_size, zone->zone_pgdat->node_id);
  4233. } else {
  4234. /*
  4235. * This case means that a zone whose size was 0 gets new memory
  4236. * via memory hot-add.
  4237. * But it may be the case that a new node was hot-added. In
  4238. * this case vmalloc() will not be able to use this new node's
  4239. * memory - this wait_table must be initialized to use this new
  4240. * node itself as well.
  4241. * To use this new node's memory, further consideration will be
  4242. * necessary.
  4243. */
  4244. zone->wait_table = vmalloc(alloc_size);
  4245. }
  4246. if (!zone->wait_table)
  4247. return -ENOMEM;
  4248. for (i = 0; i < zone->wait_table_hash_nr_entries; ++i)
  4249. init_waitqueue_head(zone->wait_table + i);
  4250. return 0;
  4251. }
  4252. static __meminit void zone_pcp_init(struct zone *zone)
  4253. {
  4254. /*
  4255. * per cpu subsystem is not up at this point. The following code
  4256. * relies on the ability of the linker to provide the
  4257. * offset of a (static) per cpu variable into the per cpu area.
  4258. */
  4259. zone->pageset = &boot_pageset;
  4260. if (populated_zone(zone))
  4261. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  4262. zone->name, zone->present_pages,
  4263. zone_batchsize(zone));
  4264. }
  4265. int __meminit init_currently_empty_zone(struct zone *zone,
  4266. unsigned long zone_start_pfn,
  4267. unsigned long size)
  4268. {
  4269. struct pglist_data *pgdat = zone->zone_pgdat;
  4270. int ret;
  4271. ret = zone_wait_table_init(zone, size);
  4272. if (ret)
  4273. return ret;
  4274. pgdat->nr_zones = zone_idx(zone) + 1;
  4275. zone->zone_start_pfn = zone_start_pfn;
  4276. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  4277. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  4278. pgdat->node_id,
  4279. (unsigned long)zone_idx(zone),
  4280. zone_start_pfn, (zone_start_pfn + size));
  4281. zone_init_free_lists(zone);
  4282. return 0;
  4283. }
  4284. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4285. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  4286. /*
  4287. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  4288. */
  4289. int __meminit __early_pfn_to_nid(unsigned long pfn,
  4290. struct mminit_pfnnid_cache *state)
  4291. {
  4292. unsigned long start_pfn, end_pfn;
  4293. int nid;
  4294. if (state->last_start <= pfn && pfn < state->last_end)
  4295. return state->last_nid;
  4296. nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
  4297. if (nid != -1) {
  4298. state->last_start = start_pfn;
  4299. state->last_end = end_pfn;
  4300. state->last_nid = nid;
  4301. }
  4302. return nid;
  4303. }
  4304. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  4305. /**
  4306. * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  4307. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  4308. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
  4309. *
  4310. * If an architecture guarantees that all ranges registered contain no holes
  4311. * and may be freed, this this function may be used instead of calling
  4312. * memblock_free_early_nid() manually.
  4313. */
  4314. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  4315. {
  4316. unsigned long start_pfn, end_pfn;
  4317. int i, this_nid;
  4318. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  4319. start_pfn = min(start_pfn, max_low_pfn);
  4320. end_pfn = min(end_pfn, max_low_pfn);
  4321. if (start_pfn < end_pfn)
  4322. memblock_free_early_nid(PFN_PHYS(start_pfn),
  4323. (end_pfn - start_pfn) << PAGE_SHIFT,
  4324. this_nid);
  4325. }
  4326. }
  4327. /**
  4328. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  4329. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  4330. *
  4331. * If an architecture guarantees that all ranges registered contain no holes and may
  4332. * be freed, this function may be used instead of calling memory_present() manually.
  4333. */
  4334. void __init sparse_memory_present_with_active_regions(int nid)
  4335. {
  4336. unsigned long start_pfn, end_pfn;
  4337. int i, this_nid;
  4338. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  4339. memory_present(this_nid, start_pfn, end_pfn);
  4340. }
  4341. /**
  4342. * get_pfn_range_for_nid - Return the start and end page frames for a node
  4343. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  4344. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  4345. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  4346. *
  4347. * It returns the start and end page frame of a node based on information
  4348. * provided by memblock_set_node(). If called for a node
  4349. * with no available memory, a warning is printed and the start and end
  4350. * PFNs will be 0.
  4351. */
  4352. void __meminit get_pfn_range_for_nid(unsigned int nid,
  4353. unsigned long *start_pfn, unsigned long *end_pfn)
  4354. {
  4355. unsigned long this_start_pfn, this_end_pfn;
  4356. int i;
  4357. *start_pfn = -1UL;
  4358. *end_pfn = 0;
  4359. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  4360. *start_pfn = min(*start_pfn, this_start_pfn);
  4361. *end_pfn = max(*end_pfn, this_end_pfn);
  4362. }
  4363. if (*start_pfn == -1UL)
  4364. *start_pfn = 0;
  4365. }
  4366. /*
  4367. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  4368. * assumption is made that zones within a node are ordered in monotonic
  4369. * increasing memory addresses so that the "highest" populated zone is used
  4370. */
  4371. static void __init find_usable_zone_for_movable(void)
  4372. {
  4373. int zone_index;
  4374. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  4375. if (zone_index == ZONE_MOVABLE)
  4376. continue;
  4377. if (arch_zone_highest_possible_pfn[zone_index] >
  4378. arch_zone_lowest_possible_pfn[zone_index])
  4379. break;
  4380. }
  4381. VM_BUG_ON(zone_index == -1);
  4382. movable_zone = zone_index;
  4383. }
  4384. /*
  4385. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  4386. * because it is sized independent of architecture. Unlike the other zones,
  4387. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  4388. * in each node depending on the size of each node and how evenly kernelcore
  4389. * is distributed. This helper function adjusts the zone ranges
  4390. * provided by the architecture for a given node by using the end of the
  4391. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  4392. * zones within a node are in order of monotonic increases memory addresses
  4393. */
  4394. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  4395. unsigned long zone_type,
  4396. unsigned long node_start_pfn,
  4397. unsigned long node_end_pfn,
  4398. unsigned long *zone_start_pfn,
  4399. unsigned long *zone_end_pfn)
  4400. {
  4401. /* Only adjust if ZONE_MOVABLE is on this node */
  4402. if (zone_movable_pfn[nid]) {
  4403. /* Size ZONE_MOVABLE */
  4404. if (zone_type == ZONE_MOVABLE) {
  4405. *zone_start_pfn = zone_movable_pfn[nid];
  4406. *zone_end_pfn = min(node_end_pfn,
  4407. arch_zone_highest_possible_pfn[movable_zone]);
  4408. /* Check if this whole range is within ZONE_MOVABLE */
  4409. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  4410. *zone_start_pfn = *zone_end_pfn;
  4411. }
  4412. }
  4413. /*
  4414. * Return the number of pages a zone spans in a node, including holes
  4415. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  4416. */
  4417. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4418. unsigned long zone_type,
  4419. unsigned long node_start_pfn,
  4420. unsigned long node_end_pfn,
  4421. unsigned long *zone_start_pfn,
  4422. unsigned long *zone_end_pfn,
  4423. unsigned long *ignored)
  4424. {
  4425. /* When hotadd a new node from cpu_up(), the node should be empty */
  4426. if (!node_start_pfn && !node_end_pfn)
  4427. return 0;
  4428. /* Get the start and end of the zone */
  4429. *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  4430. *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  4431. adjust_zone_range_for_zone_movable(nid, zone_type,
  4432. node_start_pfn, node_end_pfn,
  4433. zone_start_pfn, zone_end_pfn);
  4434. /* Check that this node has pages within the zone's required range */
  4435. if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
  4436. return 0;
  4437. /* Move the zone boundaries inside the node if necessary */
  4438. *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
  4439. *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
  4440. /* Return the spanned pages */
  4441. return *zone_end_pfn - *zone_start_pfn;
  4442. }
  4443. /*
  4444. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  4445. * then all holes in the requested range will be accounted for.
  4446. */
  4447. unsigned long __meminit __absent_pages_in_range(int nid,
  4448. unsigned long range_start_pfn,
  4449. unsigned long range_end_pfn)
  4450. {
  4451. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  4452. unsigned long start_pfn, end_pfn;
  4453. int i;
  4454. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  4455. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  4456. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  4457. nr_absent -= end_pfn - start_pfn;
  4458. }
  4459. return nr_absent;
  4460. }
  4461. /**
  4462. * absent_pages_in_range - Return number of page frames in holes within a range
  4463. * @start_pfn: The start PFN to start searching for holes
  4464. * @end_pfn: The end PFN to stop searching for holes
  4465. *
  4466. * It returns the number of pages frames in memory holes within a range.
  4467. */
  4468. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  4469. unsigned long end_pfn)
  4470. {
  4471. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  4472. }
  4473. /* Return the number of page frames in holes in a zone on a node */
  4474. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  4475. unsigned long zone_type,
  4476. unsigned long node_start_pfn,
  4477. unsigned long node_end_pfn,
  4478. unsigned long *ignored)
  4479. {
  4480. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  4481. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  4482. unsigned long zone_start_pfn, zone_end_pfn;
  4483. unsigned long nr_absent;
  4484. /* When hotadd a new node from cpu_up(), the node should be empty */
  4485. if (!node_start_pfn && !node_end_pfn)
  4486. return 0;
  4487. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  4488. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  4489. adjust_zone_range_for_zone_movable(nid, zone_type,
  4490. node_start_pfn, node_end_pfn,
  4491. &zone_start_pfn, &zone_end_pfn);
  4492. nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  4493. /*
  4494. * ZONE_MOVABLE handling.
  4495. * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
  4496. * and vice versa.
  4497. */
  4498. if (zone_movable_pfn[nid]) {
  4499. if (mirrored_kernelcore) {
  4500. unsigned long start_pfn, end_pfn;
  4501. struct memblock_region *r;
  4502. for_each_memblock(memory, r) {
  4503. start_pfn = clamp(memblock_region_memory_base_pfn(r),
  4504. zone_start_pfn, zone_end_pfn);
  4505. end_pfn = clamp(memblock_region_memory_end_pfn(r),
  4506. zone_start_pfn, zone_end_pfn);
  4507. if (zone_type == ZONE_MOVABLE &&
  4508. memblock_is_mirror(r))
  4509. nr_absent += end_pfn - start_pfn;
  4510. if (zone_type == ZONE_NORMAL &&
  4511. !memblock_is_mirror(r))
  4512. nr_absent += end_pfn - start_pfn;
  4513. }
  4514. } else {
  4515. if (zone_type == ZONE_NORMAL)
  4516. nr_absent += node_end_pfn - zone_movable_pfn[nid];
  4517. }
  4518. }
  4519. return nr_absent;
  4520. }
  4521. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4522. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  4523. unsigned long zone_type,
  4524. unsigned long node_start_pfn,
  4525. unsigned long node_end_pfn,
  4526. unsigned long *zone_start_pfn,
  4527. unsigned long *zone_end_pfn,
  4528. unsigned long *zones_size)
  4529. {
  4530. unsigned int zone;
  4531. *zone_start_pfn = node_start_pfn;
  4532. for (zone = 0; zone < zone_type; zone++)
  4533. *zone_start_pfn += zones_size[zone];
  4534. *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
  4535. return zones_size[zone_type];
  4536. }
  4537. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  4538. unsigned long zone_type,
  4539. unsigned long node_start_pfn,
  4540. unsigned long node_end_pfn,
  4541. unsigned long *zholes_size)
  4542. {
  4543. if (!zholes_size)
  4544. return 0;
  4545. return zholes_size[zone_type];
  4546. }
  4547. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4548. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  4549. unsigned long node_start_pfn,
  4550. unsigned long node_end_pfn,
  4551. unsigned long *zones_size,
  4552. unsigned long *zholes_size)
  4553. {
  4554. unsigned long realtotalpages = 0, totalpages = 0;
  4555. enum zone_type i;
  4556. for (i = 0; i < MAX_NR_ZONES; i++) {
  4557. struct zone *zone = pgdat->node_zones + i;
  4558. unsigned long zone_start_pfn, zone_end_pfn;
  4559. unsigned long size, real_size;
  4560. size = zone_spanned_pages_in_node(pgdat->node_id, i,
  4561. node_start_pfn,
  4562. node_end_pfn,
  4563. &zone_start_pfn,
  4564. &zone_end_pfn,
  4565. zones_size);
  4566. real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
  4567. node_start_pfn, node_end_pfn,
  4568. zholes_size);
  4569. if (size)
  4570. zone->zone_start_pfn = zone_start_pfn;
  4571. else
  4572. zone->zone_start_pfn = 0;
  4573. zone->spanned_pages = size;
  4574. zone->present_pages = real_size;
  4575. totalpages += size;
  4576. realtotalpages += real_size;
  4577. }
  4578. pgdat->node_spanned_pages = totalpages;
  4579. pgdat->node_present_pages = realtotalpages;
  4580. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  4581. realtotalpages);
  4582. }
  4583. #ifndef CONFIG_SPARSEMEM
  4584. /*
  4585. * Calculate the size of the zone->blockflags rounded to an unsigned long
  4586. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  4587. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  4588. * round what is now in bits to nearest long in bits, then return it in
  4589. * bytes.
  4590. */
  4591. static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
  4592. {
  4593. unsigned long usemapsize;
  4594. zonesize += zone_start_pfn & (pageblock_nr_pages-1);
  4595. usemapsize = roundup(zonesize, pageblock_nr_pages);
  4596. usemapsize = usemapsize >> pageblock_order;
  4597. usemapsize *= NR_PAGEBLOCK_BITS;
  4598. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  4599. return usemapsize / 8;
  4600. }
  4601. static void __init setup_usemap(struct pglist_data *pgdat,
  4602. struct zone *zone,
  4603. unsigned long zone_start_pfn,
  4604. unsigned long zonesize)
  4605. {
  4606. unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
  4607. zone->pageblock_flags = NULL;
  4608. if (usemapsize)
  4609. zone->pageblock_flags =
  4610. memblock_virt_alloc_node_nopanic(usemapsize,
  4611. pgdat->node_id);
  4612. }
  4613. #else
  4614. static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
  4615. unsigned long zone_start_pfn, unsigned long zonesize) {}
  4616. #endif /* CONFIG_SPARSEMEM */
  4617. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  4618. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  4619. void __paginginit set_pageblock_order(void)
  4620. {
  4621. unsigned int order;
  4622. /* Check that pageblock_nr_pages has not already been setup */
  4623. if (pageblock_order)
  4624. return;
  4625. if (HPAGE_SHIFT > PAGE_SHIFT)
  4626. order = HUGETLB_PAGE_ORDER;
  4627. else
  4628. order = MAX_ORDER - 1;
  4629. /*
  4630. * Assume the largest contiguous order of interest is a huge page.
  4631. * This value may be variable depending on boot parameters on IA64 and
  4632. * powerpc.
  4633. */
  4634. pageblock_order = order;
  4635. }
  4636. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4637. /*
  4638. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  4639. * is unused as pageblock_order is set at compile-time. See
  4640. * include/linux/pageblock-flags.h for the values of pageblock_order based on
  4641. * the kernel config
  4642. */
  4643. void __paginginit set_pageblock_order(void)
  4644. {
  4645. }
  4646. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  4647. static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  4648. unsigned long present_pages)
  4649. {
  4650. unsigned long pages = spanned_pages;
  4651. /*
  4652. * Provide a more accurate estimation if there are holes within
  4653. * the zone and SPARSEMEM is in use. If there are holes within the
  4654. * zone, each populated memory region may cost us one or two extra
  4655. * memmap pages due to alignment because memmap pages for each
  4656. * populated regions may not naturally algined on page boundary.
  4657. * So the (present_pages >> 4) heuristic is a tradeoff for that.
  4658. */
  4659. if (spanned_pages > present_pages + (present_pages >> 4) &&
  4660. IS_ENABLED(CONFIG_SPARSEMEM))
  4661. pages = present_pages;
  4662. return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
  4663. }
  4664. /*
  4665. * Set up the zone data structures:
  4666. * - mark all pages reserved
  4667. * - mark all memory queues empty
  4668. * - clear the memory bitmaps
  4669. *
  4670. * NOTE: pgdat should get zeroed by caller.
  4671. */
  4672. static void __paginginit free_area_init_core(struct pglist_data *pgdat)
  4673. {
  4674. enum zone_type j;
  4675. int nid = pgdat->node_id;
  4676. int ret;
  4677. pgdat_resize_init(pgdat);
  4678. #ifdef CONFIG_NUMA_BALANCING
  4679. spin_lock_init(&pgdat->numabalancing_migrate_lock);
  4680. pgdat->numabalancing_migrate_nr_pages = 0;
  4681. pgdat->numabalancing_migrate_next_window = jiffies;
  4682. #endif
  4683. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4684. spin_lock_init(&pgdat->split_queue_lock);
  4685. INIT_LIST_HEAD(&pgdat->split_queue);
  4686. pgdat->split_queue_len = 0;
  4687. #endif
  4688. init_waitqueue_head(&pgdat->kswapd_wait);
  4689. init_waitqueue_head(&pgdat->pfmemalloc_wait);
  4690. #ifdef CONFIG_COMPACTION
  4691. init_waitqueue_head(&pgdat->kcompactd_wait);
  4692. #endif
  4693. pgdat_page_ext_init(pgdat);
  4694. for (j = 0; j < MAX_NR_ZONES; j++) {
  4695. struct zone *zone = pgdat->node_zones + j;
  4696. unsigned long size, realsize, freesize, memmap_pages;
  4697. unsigned long zone_start_pfn = zone->zone_start_pfn;
  4698. size = zone->spanned_pages;
  4699. realsize = freesize = zone->present_pages;
  4700. /*
  4701. * Adjust freesize so that it accounts for how much memory
  4702. * is used by this zone for memmap. This affects the watermark
  4703. * and per-cpu initialisations
  4704. */
  4705. memmap_pages = calc_memmap_size(size, realsize);
  4706. if (!is_highmem_idx(j)) {
  4707. if (freesize >= memmap_pages) {
  4708. freesize -= memmap_pages;
  4709. if (memmap_pages)
  4710. printk(KERN_DEBUG
  4711. " %s zone: %lu pages used for memmap\n",
  4712. zone_names[j], memmap_pages);
  4713. } else
  4714. pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
  4715. zone_names[j], memmap_pages, freesize);
  4716. }
  4717. /* Account for reserved pages */
  4718. if (j == 0 && freesize > dma_reserve) {
  4719. freesize -= dma_reserve;
  4720. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  4721. zone_names[0], dma_reserve);
  4722. }
  4723. if (!is_highmem_idx(j))
  4724. nr_kernel_pages += freesize;
  4725. /* Charge for highmem memmap if there are enough kernel pages */
  4726. else if (nr_kernel_pages > memmap_pages * 2)
  4727. nr_kernel_pages -= memmap_pages;
  4728. nr_all_pages += freesize;
  4729. /*
  4730. * Set an approximate value for lowmem here, it will be adjusted
  4731. * when the bootmem allocator frees pages into the buddy system.
  4732. * And all highmem pages will be managed by the buddy system.
  4733. */
  4734. zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
  4735. #ifdef CONFIG_NUMA
  4736. zone->node = nid;
  4737. zone->min_unmapped_pages = (freesize*sysctl_min_unmapped_ratio)
  4738. / 100;
  4739. zone->min_slab_pages = (freesize * sysctl_min_slab_ratio) / 100;
  4740. #endif
  4741. zone->name = zone_names[j];
  4742. spin_lock_init(&zone->lock);
  4743. spin_lock_init(&zone->lru_lock);
  4744. zone_seqlock_init(zone);
  4745. zone->zone_pgdat = pgdat;
  4746. zone_pcp_init(zone);
  4747. /* For bootup, initialized properly in watermark setup */
  4748. mod_zone_page_state(zone, NR_ALLOC_BATCH, zone->managed_pages);
  4749. lruvec_init(&zone->lruvec);
  4750. if (!size)
  4751. continue;
  4752. set_pageblock_order();
  4753. setup_usemap(pgdat, zone, zone_start_pfn, size);
  4754. ret = init_currently_empty_zone(zone, zone_start_pfn, size);
  4755. BUG_ON(ret);
  4756. memmap_init(size, nid, j, zone_start_pfn);
  4757. }
  4758. }
  4759. static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
  4760. {
  4761. unsigned long __maybe_unused start = 0;
  4762. unsigned long __maybe_unused offset = 0;
  4763. /* Skip empty nodes */
  4764. if (!pgdat->node_spanned_pages)
  4765. return;
  4766. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4767. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  4768. offset = pgdat->node_start_pfn - start;
  4769. /* ia64 gets its own node_mem_map, before this, without bootmem */
  4770. if (!pgdat->node_mem_map) {
  4771. unsigned long size, end;
  4772. struct page *map;
  4773. /*
  4774. * The zone's endpoints aren't required to be MAX_ORDER
  4775. * aligned but the node_mem_map endpoints must be in order
  4776. * for the buddy allocator to function correctly.
  4777. */
  4778. end = pgdat_end_pfn(pgdat);
  4779. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  4780. size = (end - start) * sizeof(struct page);
  4781. map = alloc_remap(pgdat->node_id, size);
  4782. if (!map)
  4783. map = memblock_virt_alloc_node_nopanic(size,
  4784. pgdat->node_id);
  4785. pgdat->node_mem_map = map + offset;
  4786. }
  4787. #ifndef CONFIG_NEED_MULTIPLE_NODES
  4788. /*
  4789. * With no DISCONTIG, the global mem_map is just set as node 0's
  4790. */
  4791. if (pgdat == NODE_DATA(0)) {
  4792. mem_map = NODE_DATA(0)->node_mem_map;
  4793. #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
  4794. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  4795. mem_map -= offset;
  4796. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  4797. }
  4798. #endif
  4799. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  4800. }
  4801. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  4802. unsigned long node_start_pfn, unsigned long *zholes_size)
  4803. {
  4804. pg_data_t *pgdat = NODE_DATA(nid);
  4805. unsigned long start_pfn = 0;
  4806. unsigned long end_pfn = 0;
  4807. /* pg_data_t should be reset to zero when it's allocated */
  4808. WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
  4809. reset_deferred_meminit(pgdat);
  4810. pgdat->node_id = nid;
  4811. pgdat->node_start_pfn = node_start_pfn;
  4812. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4813. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  4814. pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
  4815. (u64)start_pfn << PAGE_SHIFT,
  4816. end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
  4817. #else
  4818. start_pfn = node_start_pfn;
  4819. #endif
  4820. calculate_node_totalpages(pgdat, start_pfn, end_pfn,
  4821. zones_size, zholes_size);
  4822. alloc_node_mem_map(pgdat);
  4823. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  4824. printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n",
  4825. nid, (unsigned long)pgdat,
  4826. (unsigned long)pgdat->node_mem_map);
  4827. #endif
  4828. free_area_init_core(pgdat);
  4829. }
  4830. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4831. #if MAX_NUMNODES > 1
  4832. /*
  4833. * Figure out the number of possible node ids.
  4834. */
  4835. void __init setup_nr_node_ids(void)
  4836. {
  4837. unsigned int highest;
  4838. highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
  4839. nr_node_ids = highest + 1;
  4840. }
  4841. #endif
  4842. /**
  4843. * node_map_pfn_alignment - determine the maximum internode alignment
  4844. *
  4845. * This function should be called after node map is populated and sorted.
  4846. * It calculates the maximum power of two alignment which can distinguish
  4847. * all the nodes.
  4848. *
  4849. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  4850. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  4851. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  4852. * shifted, 1GiB is enough and this function will indicate so.
  4853. *
  4854. * This is used to test whether pfn -> nid mapping of the chosen memory
  4855. * model has fine enough granularity to avoid incorrect mapping for the
  4856. * populated node map.
  4857. *
  4858. * Returns the determined alignment in pfn's. 0 if there is no alignment
  4859. * requirement (single node).
  4860. */
  4861. unsigned long __init node_map_pfn_alignment(void)
  4862. {
  4863. unsigned long accl_mask = 0, last_end = 0;
  4864. unsigned long start, end, mask;
  4865. int last_nid = -1;
  4866. int i, nid;
  4867. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  4868. if (!start || last_nid < 0 || last_nid == nid) {
  4869. last_nid = nid;
  4870. last_end = end;
  4871. continue;
  4872. }
  4873. /*
  4874. * Start with a mask granular enough to pin-point to the
  4875. * start pfn and tick off bits one-by-one until it becomes
  4876. * too coarse to separate the current node from the last.
  4877. */
  4878. mask = ~((1 << __ffs(start)) - 1);
  4879. while (mask && last_end <= (start & (mask << 1)))
  4880. mask <<= 1;
  4881. /* accumulate all internode masks */
  4882. accl_mask |= mask;
  4883. }
  4884. /* convert mask to number of pages */
  4885. return ~accl_mask + 1;
  4886. }
  4887. /* Find the lowest pfn for a node */
  4888. static unsigned long __init find_min_pfn_for_node(int nid)
  4889. {
  4890. unsigned long min_pfn = ULONG_MAX;
  4891. unsigned long start_pfn;
  4892. int i;
  4893. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  4894. min_pfn = min(min_pfn, start_pfn);
  4895. if (min_pfn == ULONG_MAX) {
  4896. pr_warn("Could not find start_pfn for node %d\n", nid);
  4897. return 0;
  4898. }
  4899. return min_pfn;
  4900. }
  4901. /**
  4902. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  4903. *
  4904. * It returns the minimum PFN based on information provided via
  4905. * memblock_set_node().
  4906. */
  4907. unsigned long __init find_min_pfn_with_active_regions(void)
  4908. {
  4909. return find_min_pfn_for_node(MAX_NUMNODES);
  4910. }
  4911. /*
  4912. * early_calculate_totalpages()
  4913. * Sum pages in active regions for movable zone.
  4914. * Populate N_MEMORY for calculating usable_nodes.
  4915. */
  4916. static unsigned long __init early_calculate_totalpages(void)
  4917. {
  4918. unsigned long totalpages = 0;
  4919. unsigned long start_pfn, end_pfn;
  4920. int i, nid;
  4921. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  4922. unsigned long pages = end_pfn - start_pfn;
  4923. totalpages += pages;
  4924. if (pages)
  4925. node_set_state(nid, N_MEMORY);
  4926. }
  4927. return totalpages;
  4928. }
  4929. /*
  4930. * Find the PFN the Movable zone begins in each node. Kernel memory
  4931. * is spread evenly between nodes as long as the nodes have enough
  4932. * memory. When they don't, some nodes will have more kernelcore than
  4933. * others
  4934. */
  4935. static void __init find_zone_movable_pfns_for_nodes(void)
  4936. {
  4937. int i, nid;
  4938. unsigned long usable_startpfn;
  4939. unsigned long kernelcore_node, kernelcore_remaining;
  4940. /* save the state before borrow the nodemask */
  4941. nodemask_t saved_node_state = node_states[N_MEMORY];
  4942. unsigned long totalpages = early_calculate_totalpages();
  4943. int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  4944. struct memblock_region *r;
  4945. /* Need to find movable_zone earlier when movable_node is specified. */
  4946. find_usable_zone_for_movable();
  4947. /*
  4948. * If movable_node is specified, ignore kernelcore and movablecore
  4949. * options.
  4950. */
  4951. if (movable_node_is_enabled()) {
  4952. for_each_memblock(memory, r) {
  4953. if (!memblock_is_hotpluggable(r))
  4954. continue;
  4955. nid = r->nid;
  4956. usable_startpfn = PFN_DOWN(r->base);
  4957. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  4958. min(usable_startpfn, zone_movable_pfn[nid]) :
  4959. usable_startpfn;
  4960. }
  4961. goto out2;
  4962. }
  4963. /*
  4964. * If kernelcore=mirror is specified, ignore movablecore option
  4965. */
  4966. if (mirrored_kernelcore) {
  4967. bool mem_below_4gb_not_mirrored = false;
  4968. for_each_memblock(memory, r) {
  4969. if (memblock_is_mirror(r))
  4970. continue;
  4971. nid = r->nid;
  4972. usable_startpfn = memblock_region_memory_base_pfn(r);
  4973. if (usable_startpfn < 0x100000) {
  4974. mem_below_4gb_not_mirrored = true;
  4975. continue;
  4976. }
  4977. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  4978. min(usable_startpfn, zone_movable_pfn[nid]) :
  4979. usable_startpfn;
  4980. }
  4981. if (mem_below_4gb_not_mirrored)
  4982. pr_warn("This configuration results in unmirrored kernel memory.");
  4983. goto out2;
  4984. }
  4985. /*
  4986. * If movablecore=nn[KMG] was specified, calculate what size of
  4987. * kernelcore that corresponds so that memory usable for
  4988. * any allocation type is evenly spread. If both kernelcore
  4989. * and movablecore are specified, then the value of kernelcore
  4990. * will be used for required_kernelcore if it's greater than
  4991. * what movablecore would have allowed.
  4992. */
  4993. if (required_movablecore) {
  4994. unsigned long corepages;
  4995. /*
  4996. * Round-up so that ZONE_MOVABLE is at least as large as what
  4997. * was requested by the user
  4998. */
  4999. required_movablecore =
  5000. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  5001. required_movablecore = min(totalpages, required_movablecore);
  5002. corepages = totalpages - required_movablecore;
  5003. required_kernelcore = max(required_kernelcore, corepages);
  5004. }
  5005. /*
  5006. * If kernelcore was not specified or kernelcore size is larger
  5007. * than totalpages, there is no ZONE_MOVABLE.
  5008. */
  5009. if (!required_kernelcore || required_kernelcore >= totalpages)
  5010. goto out;
  5011. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  5012. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  5013. restart:
  5014. /* Spread kernelcore memory as evenly as possible throughout nodes */
  5015. kernelcore_node = required_kernelcore / usable_nodes;
  5016. for_each_node_state(nid, N_MEMORY) {
  5017. unsigned long start_pfn, end_pfn;
  5018. /*
  5019. * Recalculate kernelcore_node if the division per node
  5020. * now exceeds what is necessary to satisfy the requested
  5021. * amount of memory for the kernel
  5022. */
  5023. if (required_kernelcore < kernelcore_node)
  5024. kernelcore_node = required_kernelcore / usable_nodes;
  5025. /*
  5026. * As the map is walked, we track how much memory is usable
  5027. * by the kernel using kernelcore_remaining. When it is
  5028. * 0, the rest of the node is usable by ZONE_MOVABLE
  5029. */
  5030. kernelcore_remaining = kernelcore_node;
  5031. /* Go through each range of PFNs within this node */
  5032. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  5033. unsigned long size_pages;
  5034. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  5035. if (start_pfn >= end_pfn)
  5036. continue;
  5037. /* Account for what is only usable for kernelcore */
  5038. if (start_pfn < usable_startpfn) {
  5039. unsigned long kernel_pages;
  5040. kernel_pages = min(end_pfn, usable_startpfn)
  5041. - start_pfn;
  5042. kernelcore_remaining -= min(kernel_pages,
  5043. kernelcore_remaining);
  5044. required_kernelcore -= min(kernel_pages,
  5045. required_kernelcore);
  5046. /* Continue if range is now fully accounted */
  5047. if (end_pfn <= usable_startpfn) {
  5048. /*
  5049. * Push zone_movable_pfn to the end so
  5050. * that if we have to rebalance
  5051. * kernelcore across nodes, we will
  5052. * not double account here
  5053. */
  5054. zone_movable_pfn[nid] = end_pfn;
  5055. continue;
  5056. }
  5057. start_pfn = usable_startpfn;
  5058. }
  5059. /*
  5060. * The usable PFN range for ZONE_MOVABLE is from
  5061. * start_pfn->end_pfn. Calculate size_pages as the
  5062. * number of pages used as kernelcore
  5063. */
  5064. size_pages = end_pfn - start_pfn;
  5065. if (size_pages > kernelcore_remaining)
  5066. size_pages = kernelcore_remaining;
  5067. zone_movable_pfn[nid] = start_pfn + size_pages;
  5068. /*
  5069. * Some kernelcore has been met, update counts and
  5070. * break if the kernelcore for this node has been
  5071. * satisfied
  5072. */
  5073. required_kernelcore -= min(required_kernelcore,
  5074. size_pages);
  5075. kernelcore_remaining -= size_pages;
  5076. if (!kernelcore_remaining)
  5077. break;
  5078. }
  5079. }
  5080. /*
  5081. * If there is still required_kernelcore, we do another pass with one
  5082. * less node in the count. This will push zone_movable_pfn[nid] further
  5083. * along on the nodes that still have memory until kernelcore is
  5084. * satisfied
  5085. */
  5086. usable_nodes--;
  5087. if (usable_nodes && required_kernelcore > usable_nodes)
  5088. goto restart;
  5089. out2:
  5090. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  5091. for (nid = 0; nid < MAX_NUMNODES; nid++)
  5092. zone_movable_pfn[nid] =
  5093. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  5094. out:
  5095. /* restore the node_state */
  5096. node_states[N_MEMORY] = saved_node_state;
  5097. }
  5098. /* Any regular or high memory on that node ? */
  5099. static void check_for_memory(pg_data_t *pgdat, int nid)
  5100. {
  5101. enum zone_type zone_type;
  5102. if (N_MEMORY == N_NORMAL_MEMORY)
  5103. return;
  5104. for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
  5105. struct zone *zone = &pgdat->node_zones[zone_type];
  5106. if (populated_zone(zone)) {
  5107. node_set_state(nid, N_HIGH_MEMORY);
  5108. if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
  5109. zone_type <= ZONE_NORMAL)
  5110. node_set_state(nid, N_NORMAL_MEMORY);
  5111. break;
  5112. }
  5113. }
  5114. }
  5115. /**
  5116. * free_area_init_nodes - Initialise all pg_data_t and zone data
  5117. * @max_zone_pfn: an array of max PFNs for each zone
  5118. *
  5119. * This will call free_area_init_node() for each active node in the system.
  5120. * Using the page ranges provided by memblock_set_node(), the size of each
  5121. * zone in each node and their holes is calculated. If the maximum PFN
  5122. * between two adjacent zones match, it is assumed that the zone is empty.
  5123. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  5124. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  5125. * starts where the previous one ended. For example, ZONE_DMA32 starts
  5126. * at arch_max_dma_pfn.
  5127. */
  5128. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  5129. {
  5130. unsigned long start_pfn, end_pfn;
  5131. int i, nid;
  5132. /* Record where the zone boundaries are */
  5133. memset(arch_zone_lowest_possible_pfn, 0,
  5134. sizeof(arch_zone_lowest_possible_pfn));
  5135. memset(arch_zone_highest_possible_pfn, 0,
  5136. sizeof(arch_zone_highest_possible_pfn));
  5137. arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  5138. arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  5139. for (i = 1; i < MAX_NR_ZONES; i++) {
  5140. if (i == ZONE_MOVABLE)
  5141. continue;
  5142. arch_zone_lowest_possible_pfn[i] =
  5143. arch_zone_highest_possible_pfn[i-1];
  5144. arch_zone_highest_possible_pfn[i] =
  5145. max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  5146. }
  5147. arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  5148. arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  5149. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  5150. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  5151. find_zone_movable_pfns_for_nodes();
  5152. /* Print out the zone ranges */
  5153. pr_info("Zone ranges:\n");
  5154. for (i = 0; i < MAX_NR_ZONES; i++) {
  5155. if (i == ZONE_MOVABLE)
  5156. continue;
  5157. pr_info(" %-8s ", zone_names[i]);
  5158. if (arch_zone_lowest_possible_pfn[i] ==
  5159. arch_zone_highest_possible_pfn[i])
  5160. pr_cont("empty\n");
  5161. else
  5162. pr_cont("[mem %#018Lx-%#018Lx]\n",
  5163. (u64)arch_zone_lowest_possible_pfn[i]
  5164. << PAGE_SHIFT,
  5165. ((u64)arch_zone_highest_possible_pfn[i]
  5166. << PAGE_SHIFT) - 1);
  5167. }
  5168. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  5169. pr_info("Movable zone start for each node\n");
  5170. for (i = 0; i < MAX_NUMNODES; i++) {
  5171. if (zone_movable_pfn[i])
  5172. pr_info(" Node %d: %#018Lx\n", i,
  5173. (u64)zone_movable_pfn[i] << PAGE_SHIFT);
  5174. }
  5175. /* Print out the early node map */
  5176. pr_info("Early memory node ranges\n");
  5177. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  5178. pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
  5179. (u64)start_pfn << PAGE_SHIFT,
  5180. ((u64)end_pfn << PAGE_SHIFT) - 1);
  5181. /* Initialise every node */
  5182. mminit_verify_pageflags_layout();
  5183. setup_nr_node_ids();
  5184. for_each_online_node(nid) {
  5185. pg_data_t *pgdat = NODE_DATA(nid);
  5186. free_area_init_node(nid, NULL,
  5187. find_min_pfn_for_node(nid), NULL);
  5188. /* Any memory on that node */
  5189. if (pgdat->node_present_pages)
  5190. node_set_state(nid, N_MEMORY);
  5191. check_for_memory(pgdat, nid);
  5192. }
  5193. }
  5194. static int __init cmdline_parse_core(char *p, unsigned long *core)
  5195. {
  5196. unsigned long long coremem;
  5197. if (!p)
  5198. return -EINVAL;
  5199. coremem = memparse(p, &p);
  5200. *core = coremem >> PAGE_SHIFT;
  5201. /* Paranoid check that UL is enough for the coremem value */
  5202. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  5203. return 0;
  5204. }
  5205. /*
  5206. * kernelcore=size sets the amount of memory for use for allocations that
  5207. * cannot be reclaimed or migrated.
  5208. */
  5209. static int __init cmdline_parse_kernelcore(char *p)
  5210. {
  5211. /* parse kernelcore=mirror */
  5212. if (parse_option_str(p, "mirror")) {
  5213. mirrored_kernelcore = true;
  5214. return 0;
  5215. }
  5216. return cmdline_parse_core(p, &required_kernelcore);
  5217. }
  5218. /*
  5219. * movablecore=size sets the amount of memory for use for allocations that
  5220. * can be reclaimed or migrated.
  5221. */
  5222. static int __init cmdline_parse_movablecore(char *p)
  5223. {
  5224. return cmdline_parse_core(p, &required_movablecore);
  5225. }
  5226. early_param("kernelcore", cmdline_parse_kernelcore);
  5227. early_param("movablecore", cmdline_parse_movablecore);
  5228. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5229. void adjust_managed_page_count(struct page *page, long count)
  5230. {
  5231. spin_lock(&managed_page_count_lock);
  5232. page_zone(page)->managed_pages += count;
  5233. totalram_pages += count;
  5234. #ifdef CONFIG_HIGHMEM
  5235. if (PageHighMem(page))
  5236. totalhigh_pages += count;
  5237. #endif
  5238. spin_unlock(&managed_page_count_lock);
  5239. }
  5240. EXPORT_SYMBOL(adjust_managed_page_count);
  5241. unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
  5242. {
  5243. void *pos;
  5244. unsigned long pages = 0;
  5245. start = (void *)PAGE_ALIGN((unsigned long)start);
  5246. end = (void *)((unsigned long)end & PAGE_MASK);
  5247. for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
  5248. if ((unsigned int)poison <= 0xFF)
  5249. memset(pos, poison, PAGE_SIZE);
  5250. free_reserved_page(virt_to_page(pos));
  5251. }
  5252. if (pages && s)
  5253. pr_info("Freeing %s memory: %ldK (%p - %p)\n",
  5254. s, pages << (PAGE_SHIFT - 10), start, end);
  5255. return pages;
  5256. }
  5257. EXPORT_SYMBOL(free_reserved_area);
  5258. #ifdef CONFIG_HIGHMEM
  5259. void free_highmem_page(struct page *page)
  5260. {
  5261. __free_reserved_page(page);
  5262. totalram_pages++;
  5263. page_zone(page)->managed_pages++;
  5264. totalhigh_pages++;
  5265. }
  5266. #endif
  5267. void __init mem_init_print_info(const char *str)
  5268. {
  5269. unsigned long physpages, codesize, datasize, rosize, bss_size;
  5270. unsigned long init_code_size, init_data_size;
  5271. physpages = get_num_physpages();
  5272. codesize = _etext - _stext;
  5273. datasize = _edata - _sdata;
  5274. rosize = __end_rodata - __start_rodata;
  5275. bss_size = __bss_stop - __bss_start;
  5276. init_data_size = __init_end - __init_begin;
  5277. init_code_size = _einittext - _sinittext;
  5278. /*
  5279. * Detect special cases and adjust section sizes accordingly:
  5280. * 1) .init.* may be embedded into .data sections
  5281. * 2) .init.text.* may be out of [__init_begin, __init_end],
  5282. * please refer to arch/tile/kernel/vmlinux.lds.S.
  5283. * 3) .rodata.* may be embedded into .text or .data sections.
  5284. */
  5285. #define adj_init_size(start, end, size, pos, adj) \
  5286. do { \
  5287. if (start <= pos && pos < end && size > adj) \
  5288. size -= adj; \
  5289. } while (0)
  5290. adj_init_size(__init_begin, __init_end, init_data_size,
  5291. _sinittext, init_code_size);
  5292. adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
  5293. adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
  5294. adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
  5295. adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
  5296. #undef adj_init_size
  5297. pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
  5298. #ifdef CONFIG_HIGHMEM
  5299. ", %luK highmem"
  5300. #endif
  5301. "%s%s)\n",
  5302. nr_free_pages() << (PAGE_SHIFT - 10),
  5303. physpages << (PAGE_SHIFT - 10),
  5304. codesize >> 10, datasize >> 10, rosize >> 10,
  5305. (init_data_size + init_code_size) >> 10, bss_size >> 10,
  5306. (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
  5307. totalcma_pages << (PAGE_SHIFT - 10),
  5308. #ifdef CONFIG_HIGHMEM
  5309. totalhigh_pages << (PAGE_SHIFT - 10),
  5310. #endif
  5311. str ? ", " : "", str ? str : "");
  5312. }
  5313. /**
  5314. * set_dma_reserve - set the specified number of pages reserved in the first zone
  5315. * @new_dma_reserve: The number of pages to mark reserved
  5316. *
  5317. * The per-cpu batchsize and zone watermarks are determined by managed_pages.
  5318. * In the DMA zone, a significant percentage may be consumed by kernel image
  5319. * and other unfreeable allocations which can skew the watermarks badly. This
  5320. * function may optionally be used to account for unfreeable pages in the
  5321. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  5322. * smaller per-cpu batchsize.
  5323. */
  5324. void __init set_dma_reserve(unsigned long new_dma_reserve)
  5325. {
  5326. dma_reserve = new_dma_reserve;
  5327. }
  5328. void __init free_area_init(unsigned long *zones_size)
  5329. {
  5330. free_area_init_node(0, zones_size,
  5331. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  5332. }
  5333. static int page_alloc_cpu_notify(struct notifier_block *self,
  5334. unsigned long action, void *hcpu)
  5335. {
  5336. int cpu = (unsigned long)hcpu;
  5337. if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
  5338. lru_add_drain_cpu(cpu);
  5339. drain_pages(cpu);
  5340. /*
  5341. * Spill the event counters of the dead processor
  5342. * into the current processors event counters.
  5343. * This artificially elevates the count of the current
  5344. * processor.
  5345. */
  5346. vm_events_fold_cpu(cpu);
  5347. /*
  5348. * Zero the differential counters of the dead processor
  5349. * so that the vm statistics are consistent.
  5350. *
  5351. * This is only okay since the processor is dead and cannot
  5352. * race with what we are doing.
  5353. */
  5354. cpu_vm_stats_fold(cpu);
  5355. }
  5356. return NOTIFY_OK;
  5357. }
  5358. void __init page_alloc_init(void)
  5359. {
  5360. hotcpu_notifier(page_alloc_cpu_notify, 0);
  5361. }
  5362. /*
  5363. * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
  5364. * or min_free_kbytes changes.
  5365. */
  5366. static void calculate_totalreserve_pages(void)
  5367. {
  5368. struct pglist_data *pgdat;
  5369. unsigned long reserve_pages = 0;
  5370. enum zone_type i, j;
  5371. for_each_online_pgdat(pgdat) {
  5372. for (i = 0; i < MAX_NR_ZONES; i++) {
  5373. struct zone *zone = pgdat->node_zones + i;
  5374. long max = 0;
  5375. /* Find valid and maximum lowmem_reserve in the zone */
  5376. for (j = i; j < MAX_NR_ZONES; j++) {
  5377. if (zone->lowmem_reserve[j] > max)
  5378. max = zone->lowmem_reserve[j];
  5379. }
  5380. /* we treat the high watermark as reserved pages. */
  5381. max += high_wmark_pages(zone);
  5382. if (max > zone->managed_pages)
  5383. max = zone->managed_pages;
  5384. zone->totalreserve_pages = max;
  5385. reserve_pages += max;
  5386. }
  5387. }
  5388. totalreserve_pages = reserve_pages;
  5389. }
  5390. /*
  5391. * setup_per_zone_lowmem_reserve - called whenever
  5392. * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
  5393. * has a correct pages reserved value, so an adequate number of
  5394. * pages are left in the zone after a successful __alloc_pages().
  5395. */
  5396. static void setup_per_zone_lowmem_reserve(void)
  5397. {
  5398. struct pglist_data *pgdat;
  5399. enum zone_type j, idx;
  5400. for_each_online_pgdat(pgdat) {
  5401. for (j = 0; j < MAX_NR_ZONES; j++) {
  5402. struct zone *zone = pgdat->node_zones + j;
  5403. unsigned long managed_pages = zone->managed_pages;
  5404. zone->lowmem_reserve[j] = 0;
  5405. idx = j;
  5406. while (idx) {
  5407. struct zone *lower_zone;
  5408. idx--;
  5409. if (sysctl_lowmem_reserve_ratio[idx] < 1)
  5410. sysctl_lowmem_reserve_ratio[idx] = 1;
  5411. lower_zone = pgdat->node_zones + idx;
  5412. lower_zone->lowmem_reserve[j] = managed_pages /
  5413. sysctl_lowmem_reserve_ratio[idx];
  5414. managed_pages += lower_zone->managed_pages;
  5415. }
  5416. }
  5417. }
  5418. /* update totalreserve_pages */
  5419. calculate_totalreserve_pages();
  5420. }
  5421. static void __setup_per_zone_wmarks(void)
  5422. {
  5423. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  5424. unsigned long lowmem_pages = 0;
  5425. struct zone *zone;
  5426. unsigned long flags;
  5427. /* Calculate total number of !ZONE_HIGHMEM pages */
  5428. for_each_zone(zone) {
  5429. if (!is_highmem(zone))
  5430. lowmem_pages += zone->managed_pages;
  5431. }
  5432. for_each_zone(zone) {
  5433. u64 tmp;
  5434. spin_lock_irqsave(&zone->lock, flags);
  5435. tmp = (u64)pages_min * zone->managed_pages;
  5436. do_div(tmp, lowmem_pages);
  5437. if (is_highmem(zone)) {
  5438. /*
  5439. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  5440. * need highmem pages, so cap pages_min to a small
  5441. * value here.
  5442. *
  5443. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  5444. * deltas control asynch page reclaim, and so should
  5445. * not be capped for highmem.
  5446. */
  5447. unsigned long min_pages;
  5448. min_pages = zone->managed_pages / 1024;
  5449. min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
  5450. zone->watermark[WMARK_MIN] = min_pages;
  5451. } else {
  5452. /*
  5453. * If it's a lowmem zone, reserve a number of pages
  5454. * proportionate to the zone's size.
  5455. */
  5456. zone->watermark[WMARK_MIN] = tmp;
  5457. }
  5458. /*
  5459. * Set the kswapd watermarks distance according to the
  5460. * scale factor in proportion to available memory, but
  5461. * ensure a minimum size on small systems.
  5462. */
  5463. tmp = max_t(u64, tmp >> 2,
  5464. mult_frac(zone->managed_pages,
  5465. watermark_scale_factor, 10000));
  5466. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
  5467. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
  5468. __mod_zone_page_state(zone, NR_ALLOC_BATCH,
  5469. high_wmark_pages(zone) - low_wmark_pages(zone) -
  5470. atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]));
  5471. spin_unlock_irqrestore(&zone->lock, flags);
  5472. }
  5473. /* update totalreserve_pages */
  5474. calculate_totalreserve_pages();
  5475. }
  5476. /**
  5477. * setup_per_zone_wmarks - called when min_free_kbytes changes
  5478. * or when memory is hot-{added|removed}
  5479. *
  5480. * Ensures that the watermark[min,low,high] values for each zone are set
  5481. * correctly with respect to min_free_kbytes.
  5482. */
  5483. void setup_per_zone_wmarks(void)
  5484. {
  5485. mutex_lock(&zonelists_mutex);
  5486. __setup_per_zone_wmarks();
  5487. mutex_unlock(&zonelists_mutex);
  5488. }
  5489. /*
  5490. * The inactive anon list should be small enough that the VM never has to
  5491. * do too much work, but large enough that each inactive page has a chance
  5492. * to be referenced again before it is swapped out.
  5493. *
  5494. * The inactive_anon ratio is the target ratio of ACTIVE_ANON to
  5495. * INACTIVE_ANON pages on this zone's LRU, maintained by the
  5496. * pageout code. A zone->inactive_ratio of 3 means 3:1 or 25% of
  5497. * the anonymous pages are kept on the inactive list.
  5498. *
  5499. * total target max
  5500. * memory ratio inactive anon
  5501. * -------------------------------------
  5502. * 10MB 1 5MB
  5503. * 100MB 1 50MB
  5504. * 1GB 3 250MB
  5505. * 10GB 10 0.9GB
  5506. * 100GB 31 3GB
  5507. * 1TB 101 10GB
  5508. * 10TB 320 32GB
  5509. */
  5510. static void __meminit calculate_zone_inactive_ratio(struct zone *zone)
  5511. {
  5512. unsigned int gb, ratio;
  5513. /* Zone size in gigabytes */
  5514. gb = zone->managed_pages >> (30 - PAGE_SHIFT);
  5515. if (gb)
  5516. ratio = int_sqrt(10 * gb);
  5517. else
  5518. ratio = 1;
  5519. zone->inactive_ratio = ratio;
  5520. }
  5521. static void __meminit setup_per_zone_inactive_ratio(void)
  5522. {
  5523. struct zone *zone;
  5524. for_each_zone(zone)
  5525. calculate_zone_inactive_ratio(zone);
  5526. }
  5527. /*
  5528. * Initialise min_free_kbytes.
  5529. *
  5530. * For small machines we want it small (128k min). For large machines
  5531. * we want it large (64MB max). But it is not linear, because network
  5532. * bandwidth does not increase linearly with machine size. We use
  5533. *
  5534. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  5535. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  5536. *
  5537. * which yields
  5538. *
  5539. * 16MB: 512k
  5540. * 32MB: 724k
  5541. * 64MB: 1024k
  5542. * 128MB: 1448k
  5543. * 256MB: 2048k
  5544. * 512MB: 2896k
  5545. * 1024MB: 4096k
  5546. * 2048MB: 5792k
  5547. * 4096MB: 8192k
  5548. * 8192MB: 11584k
  5549. * 16384MB: 16384k
  5550. */
  5551. int __meminit init_per_zone_wmark_min(void)
  5552. {
  5553. unsigned long lowmem_kbytes;
  5554. int new_min_free_kbytes;
  5555. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  5556. new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  5557. if (new_min_free_kbytes > user_min_free_kbytes) {
  5558. min_free_kbytes = new_min_free_kbytes;
  5559. if (min_free_kbytes < 128)
  5560. min_free_kbytes = 128;
  5561. if (min_free_kbytes > 65536)
  5562. min_free_kbytes = 65536;
  5563. } else {
  5564. pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
  5565. new_min_free_kbytes, user_min_free_kbytes);
  5566. }
  5567. setup_per_zone_wmarks();
  5568. refresh_zone_stat_thresholds();
  5569. setup_per_zone_lowmem_reserve();
  5570. setup_per_zone_inactive_ratio();
  5571. return 0;
  5572. }
  5573. module_init(init_per_zone_wmark_min)
  5574. /*
  5575. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  5576. * that we can call two helper functions whenever min_free_kbytes
  5577. * changes.
  5578. */
  5579. int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
  5580. void __user *buffer, size_t *length, loff_t *ppos)
  5581. {
  5582. int rc;
  5583. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5584. if (rc)
  5585. return rc;
  5586. if (write) {
  5587. user_min_free_kbytes = min_free_kbytes;
  5588. setup_per_zone_wmarks();
  5589. }
  5590. return 0;
  5591. }
  5592. int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
  5593. void __user *buffer, size_t *length, loff_t *ppos)
  5594. {
  5595. int rc;
  5596. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5597. if (rc)
  5598. return rc;
  5599. if (write)
  5600. setup_per_zone_wmarks();
  5601. return 0;
  5602. }
  5603. #ifdef CONFIG_NUMA
  5604. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
  5605. void __user *buffer, size_t *length, loff_t *ppos)
  5606. {
  5607. struct zone *zone;
  5608. int rc;
  5609. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5610. if (rc)
  5611. return rc;
  5612. for_each_zone(zone)
  5613. zone->min_unmapped_pages = (zone->managed_pages *
  5614. sysctl_min_unmapped_ratio) / 100;
  5615. return 0;
  5616. }
  5617. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
  5618. void __user *buffer, size_t *length, loff_t *ppos)
  5619. {
  5620. struct zone *zone;
  5621. int rc;
  5622. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5623. if (rc)
  5624. return rc;
  5625. for_each_zone(zone)
  5626. zone->min_slab_pages = (zone->managed_pages *
  5627. sysctl_min_slab_ratio) / 100;
  5628. return 0;
  5629. }
  5630. #endif
  5631. /*
  5632. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  5633. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  5634. * whenever sysctl_lowmem_reserve_ratio changes.
  5635. *
  5636. * The reserve ratio obviously has absolutely no relation with the
  5637. * minimum watermarks. The lowmem reserve ratio can only make sense
  5638. * if in function of the boot time zone sizes.
  5639. */
  5640. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
  5641. void __user *buffer, size_t *length, loff_t *ppos)
  5642. {
  5643. proc_dointvec_minmax(table, write, buffer, length, ppos);
  5644. setup_per_zone_lowmem_reserve();
  5645. return 0;
  5646. }
  5647. /*
  5648. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  5649. * cpu. It is the fraction of total pages in each zone that a hot per cpu
  5650. * pagelist can have before it gets flushed back to buddy allocator.
  5651. */
  5652. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
  5653. void __user *buffer, size_t *length, loff_t *ppos)
  5654. {
  5655. struct zone *zone;
  5656. int old_percpu_pagelist_fraction;
  5657. int ret;
  5658. mutex_lock(&pcp_batch_high_lock);
  5659. old_percpu_pagelist_fraction = percpu_pagelist_fraction;
  5660. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  5661. if (!write || ret < 0)
  5662. goto out;
  5663. /* Sanity checking to avoid pcp imbalance */
  5664. if (percpu_pagelist_fraction &&
  5665. percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
  5666. percpu_pagelist_fraction = old_percpu_pagelist_fraction;
  5667. ret = -EINVAL;
  5668. goto out;
  5669. }
  5670. /* No change? */
  5671. if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
  5672. goto out;
  5673. for_each_populated_zone(zone) {
  5674. unsigned int cpu;
  5675. for_each_possible_cpu(cpu)
  5676. pageset_set_high_and_batch(zone,
  5677. per_cpu_ptr(zone->pageset, cpu));
  5678. }
  5679. out:
  5680. mutex_unlock(&pcp_batch_high_lock);
  5681. return ret;
  5682. }
  5683. #ifdef CONFIG_NUMA
  5684. int hashdist = HASHDIST_DEFAULT;
  5685. static int __init set_hashdist(char *str)
  5686. {
  5687. if (!str)
  5688. return 0;
  5689. hashdist = simple_strtoul(str, &str, 0);
  5690. return 1;
  5691. }
  5692. __setup("hashdist=", set_hashdist);
  5693. #endif
  5694. /*
  5695. * allocate a large system hash table from bootmem
  5696. * - it is assumed that the hash table must contain an exact power-of-2
  5697. * quantity of entries
  5698. * - limit is the number of hash buckets, not the total allocation size
  5699. */
  5700. void *__init alloc_large_system_hash(const char *tablename,
  5701. unsigned long bucketsize,
  5702. unsigned long numentries,
  5703. int scale,
  5704. int flags,
  5705. unsigned int *_hash_shift,
  5706. unsigned int *_hash_mask,
  5707. unsigned long low_limit,
  5708. unsigned long high_limit)
  5709. {
  5710. unsigned long long max = high_limit;
  5711. unsigned long log2qty, size;
  5712. void *table = NULL;
  5713. /* allow the kernel cmdline to have a say */
  5714. if (!numentries) {
  5715. /* round applicable memory size up to nearest megabyte */
  5716. numentries = nr_kernel_pages;
  5717. /* It isn't necessary when PAGE_SIZE >= 1MB */
  5718. if (PAGE_SHIFT < 20)
  5719. numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
  5720. /* limit to 1 bucket per 2^scale bytes of low memory */
  5721. if (scale > PAGE_SHIFT)
  5722. numentries >>= (scale - PAGE_SHIFT);
  5723. else
  5724. numentries <<= (PAGE_SHIFT - scale);
  5725. /* Make sure we've got at least a 0-order allocation.. */
  5726. if (unlikely(flags & HASH_SMALL)) {
  5727. /* Makes no sense without HASH_EARLY */
  5728. WARN_ON(!(flags & HASH_EARLY));
  5729. if (!(numentries >> *_hash_shift)) {
  5730. numentries = 1UL << *_hash_shift;
  5731. BUG_ON(!numentries);
  5732. }
  5733. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  5734. numentries = PAGE_SIZE / bucketsize;
  5735. }
  5736. numentries = roundup_pow_of_two(numentries);
  5737. /* limit allocation size to 1/16 total memory by default */
  5738. if (max == 0) {
  5739. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  5740. do_div(max, bucketsize);
  5741. }
  5742. max = min(max, 0x80000000ULL);
  5743. if (numentries < low_limit)
  5744. numentries = low_limit;
  5745. if (numentries > max)
  5746. numentries = max;
  5747. log2qty = ilog2(numentries);
  5748. do {
  5749. size = bucketsize << log2qty;
  5750. if (flags & HASH_EARLY)
  5751. table = memblock_virt_alloc_nopanic(size, 0);
  5752. else if (hashdist)
  5753. table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL);
  5754. else {
  5755. /*
  5756. * If bucketsize is not a power-of-two, we may free
  5757. * some pages at the end of hash table which
  5758. * alloc_pages_exact() automatically does
  5759. */
  5760. if (get_order(size) < MAX_ORDER) {
  5761. table = alloc_pages_exact(size, GFP_ATOMIC);
  5762. kmemleak_alloc(table, size, 1, GFP_ATOMIC);
  5763. }
  5764. }
  5765. } while (!table && size > PAGE_SIZE && --log2qty);
  5766. if (!table)
  5767. panic("Failed to allocate %s hash table\n", tablename);
  5768. pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
  5769. tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
  5770. if (_hash_shift)
  5771. *_hash_shift = log2qty;
  5772. if (_hash_mask)
  5773. *_hash_mask = (1 << log2qty) - 1;
  5774. return table;
  5775. }
  5776. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  5777. static inline unsigned long *get_pageblock_bitmap(struct zone *zone,
  5778. unsigned long pfn)
  5779. {
  5780. #ifdef CONFIG_SPARSEMEM
  5781. return __pfn_to_section(pfn)->pageblock_flags;
  5782. #else
  5783. return zone->pageblock_flags;
  5784. #endif /* CONFIG_SPARSEMEM */
  5785. }
  5786. static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
  5787. {
  5788. #ifdef CONFIG_SPARSEMEM
  5789. pfn &= (PAGES_PER_SECTION-1);
  5790. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5791. #else
  5792. pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
  5793. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  5794. #endif /* CONFIG_SPARSEMEM */
  5795. }
  5796. /**
  5797. * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  5798. * @page: The page within the block of interest
  5799. * @pfn: The target page frame number
  5800. * @end_bitidx: The last bit of interest to retrieve
  5801. * @mask: mask of bits that the caller is interested in
  5802. *
  5803. * Return: pageblock_bits flags
  5804. */
  5805. unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
  5806. unsigned long end_bitidx,
  5807. unsigned long mask)
  5808. {
  5809. struct zone *zone;
  5810. unsigned long *bitmap;
  5811. unsigned long bitidx, word_bitidx;
  5812. unsigned long word;
  5813. zone = page_zone(page);
  5814. bitmap = get_pageblock_bitmap(zone, pfn);
  5815. bitidx = pfn_to_bitidx(zone, pfn);
  5816. word_bitidx = bitidx / BITS_PER_LONG;
  5817. bitidx &= (BITS_PER_LONG-1);
  5818. word = bitmap[word_bitidx];
  5819. bitidx += end_bitidx;
  5820. return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
  5821. }
  5822. /**
  5823. * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  5824. * @page: The page within the block of interest
  5825. * @flags: The flags to set
  5826. * @pfn: The target page frame number
  5827. * @end_bitidx: The last bit of interest
  5828. * @mask: mask of bits that the caller is interested in
  5829. */
  5830. void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
  5831. unsigned long pfn,
  5832. unsigned long end_bitidx,
  5833. unsigned long mask)
  5834. {
  5835. struct zone *zone;
  5836. unsigned long *bitmap;
  5837. unsigned long bitidx, word_bitidx;
  5838. unsigned long old_word, word;
  5839. BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
  5840. zone = page_zone(page);
  5841. bitmap = get_pageblock_bitmap(zone, pfn);
  5842. bitidx = pfn_to_bitidx(zone, pfn);
  5843. word_bitidx = bitidx / BITS_PER_LONG;
  5844. bitidx &= (BITS_PER_LONG-1);
  5845. VM_BUG_ON_PAGE(!zone_spans_pfn(zone, pfn), page);
  5846. bitidx += end_bitidx;
  5847. mask <<= (BITS_PER_LONG - bitidx - 1);
  5848. flags <<= (BITS_PER_LONG - bitidx - 1);
  5849. word = READ_ONCE(bitmap[word_bitidx]);
  5850. for (;;) {
  5851. old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
  5852. if (word == old_word)
  5853. break;
  5854. word = old_word;
  5855. }
  5856. }
  5857. /*
  5858. * This function checks whether pageblock includes unmovable pages or not.
  5859. * If @count is not zero, it is okay to include less @count unmovable pages
  5860. *
  5861. * PageLRU check without isolation or lru_lock could race so that
  5862. * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
  5863. * expect this function should be exact.
  5864. */
  5865. bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
  5866. bool skip_hwpoisoned_pages)
  5867. {
  5868. unsigned long pfn, iter, found;
  5869. int mt;
  5870. /*
  5871. * For avoiding noise data, lru_add_drain_all() should be called
  5872. * If ZONE_MOVABLE, the zone never contains unmovable pages
  5873. */
  5874. if (zone_idx(zone) == ZONE_MOVABLE)
  5875. return false;
  5876. mt = get_pageblock_migratetype(page);
  5877. if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
  5878. return false;
  5879. pfn = page_to_pfn(page);
  5880. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  5881. unsigned long check = pfn + iter;
  5882. if (!pfn_valid_within(check))
  5883. continue;
  5884. page = pfn_to_page(check);
  5885. /*
  5886. * Hugepages are not in LRU lists, but they're movable.
  5887. * We need not scan over tail pages bacause we don't
  5888. * handle each tail page individually in migration.
  5889. */
  5890. if (PageHuge(page)) {
  5891. iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
  5892. continue;
  5893. }
  5894. /*
  5895. * We can't use page_count without pin a page
  5896. * because another CPU can free compound page.
  5897. * This check already skips compound tails of THP
  5898. * because their page->_count is zero at all time.
  5899. */
  5900. if (!page_ref_count(page)) {
  5901. if (PageBuddy(page))
  5902. iter += (1 << page_order(page)) - 1;
  5903. continue;
  5904. }
  5905. /*
  5906. * The HWPoisoned page may be not in buddy system, and
  5907. * page_count() is not 0.
  5908. */
  5909. if (skip_hwpoisoned_pages && PageHWPoison(page))
  5910. continue;
  5911. if (!PageLRU(page))
  5912. found++;
  5913. /*
  5914. * If there are RECLAIMABLE pages, we need to check
  5915. * it. But now, memory offline itself doesn't call
  5916. * shrink_node_slabs() and it still to be fixed.
  5917. */
  5918. /*
  5919. * If the page is not RAM, page_count()should be 0.
  5920. * we don't need more check. This is an _used_ not-movable page.
  5921. *
  5922. * The problematic thing here is PG_reserved pages. PG_reserved
  5923. * is set to both of a memory hole page and a _used_ kernel
  5924. * page at boot.
  5925. */
  5926. if (found > count)
  5927. return true;
  5928. }
  5929. return false;
  5930. }
  5931. bool is_pageblock_removable_nolock(struct page *page)
  5932. {
  5933. struct zone *zone;
  5934. unsigned long pfn;
  5935. /*
  5936. * We have to be careful here because we are iterating over memory
  5937. * sections which are not zone aware so we might end up outside of
  5938. * the zone but still within the section.
  5939. * We have to take care about the node as well. If the node is offline
  5940. * its NODE_DATA will be NULL - see page_zone.
  5941. */
  5942. if (!node_online(page_to_nid(page)))
  5943. return false;
  5944. zone = page_zone(page);
  5945. pfn = page_to_pfn(page);
  5946. if (!zone_spans_pfn(zone, pfn))
  5947. return false;
  5948. return !has_unmovable_pages(zone, page, 0, true);
  5949. }
  5950. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  5951. static unsigned long pfn_max_align_down(unsigned long pfn)
  5952. {
  5953. return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
  5954. pageblock_nr_pages) - 1);
  5955. }
  5956. static unsigned long pfn_max_align_up(unsigned long pfn)
  5957. {
  5958. return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
  5959. pageblock_nr_pages));
  5960. }
  5961. /* [start, end) must belong to a single zone. */
  5962. static int __alloc_contig_migrate_range(struct compact_control *cc,
  5963. unsigned long start, unsigned long end)
  5964. {
  5965. /* This function is based on compact_zone() from compaction.c. */
  5966. unsigned long nr_reclaimed;
  5967. unsigned long pfn = start;
  5968. unsigned int tries = 0;
  5969. int ret = 0;
  5970. migrate_prep();
  5971. while (pfn < end || !list_empty(&cc->migratepages)) {
  5972. if (fatal_signal_pending(current)) {
  5973. ret = -EINTR;
  5974. break;
  5975. }
  5976. if (list_empty(&cc->migratepages)) {
  5977. cc->nr_migratepages = 0;
  5978. pfn = isolate_migratepages_range(cc, pfn, end);
  5979. if (!pfn) {
  5980. ret = -EINTR;
  5981. break;
  5982. }
  5983. tries = 0;
  5984. } else if (++tries == 5) {
  5985. ret = ret < 0 ? ret : -EBUSY;
  5986. break;
  5987. }
  5988. nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
  5989. &cc->migratepages);
  5990. cc->nr_migratepages -= nr_reclaimed;
  5991. ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
  5992. NULL, 0, cc->mode, MR_CMA);
  5993. }
  5994. if (ret < 0) {
  5995. putback_movable_pages(&cc->migratepages);
  5996. return ret;
  5997. }
  5998. return 0;
  5999. }
  6000. /**
  6001. * alloc_contig_range() -- tries to allocate given range of pages
  6002. * @start: start PFN to allocate
  6003. * @end: one-past-the-last PFN to allocate
  6004. * @migratetype: migratetype of the underlaying pageblocks (either
  6005. * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
  6006. * in range must have the same migratetype and it must
  6007. * be either of the two.
  6008. *
  6009. * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
  6010. * aligned, however it's the caller's responsibility to guarantee that
  6011. * we are the only thread that changes migrate type of pageblocks the
  6012. * pages fall in.
  6013. *
  6014. * The PFN range must belong to a single zone.
  6015. *
  6016. * Returns zero on success or negative error code. On success all
  6017. * pages which PFN is in [start, end) are allocated for the caller and
  6018. * need to be freed with free_contig_range().
  6019. */
  6020. int alloc_contig_range(unsigned long start, unsigned long end,
  6021. unsigned migratetype)
  6022. {
  6023. unsigned long outer_start, outer_end;
  6024. unsigned int order;
  6025. int ret = 0;
  6026. struct compact_control cc = {
  6027. .nr_migratepages = 0,
  6028. .order = -1,
  6029. .zone = page_zone(pfn_to_page(start)),
  6030. .mode = MIGRATE_SYNC,
  6031. .ignore_skip_hint = true,
  6032. };
  6033. INIT_LIST_HEAD(&cc.migratepages);
  6034. /*
  6035. * What we do here is we mark all pageblocks in range as
  6036. * MIGRATE_ISOLATE. Because pageblock and max order pages may
  6037. * have different sizes, and due to the way page allocator
  6038. * work, we align the range to biggest of the two pages so
  6039. * that page allocator won't try to merge buddies from
  6040. * different pageblocks and change MIGRATE_ISOLATE to some
  6041. * other migration type.
  6042. *
  6043. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  6044. * migrate the pages from an unaligned range (ie. pages that
  6045. * we are interested in). This will put all the pages in
  6046. * range back to page allocator as MIGRATE_ISOLATE.
  6047. *
  6048. * When this is done, we take the pages in range from page
  6049. * allocator removing them from the buddy system. This way
  6050. * page allocator will never consider using them.
  6051. *
  6052. * This lets us mark the pageblocks back as
  6053. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  6054. * aligned range but not in the unaligned, original range are
  6055. * put back to page allocator so that buddy can use them.
  6056. */
  6057. ret = start_isolate_page_range(pfn_max_align_down(start),
  6058. pfn_max_align_up(end), migratetype,
  6059. false);
  6060. if (ret)
  6061. return ret;
  6062. /*
  6063. * In case of -EBUSY, we'd like to know which page causes problem.
  6064. * So, just fall through. We will check it in test_pages_isolated().
  6065. */
  6066. ret = __alloc_contig_migrate_range(&cc, start, end);
  6067. if (ret && ret != -EBUSY)
  6068. goto done;
  6069. /*
  6070. * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
  6071. * aligned blocks that are marked as MIGRATE_ISOLATE. What's
  6072. * more, all pages in [start, end) are free in page allocator.
  6073. * What we are going to do is to allocate all pages from
  6074. * [start, end) (that is remove them from page allocator).
  6075. *
  6076. * The only problem is that pages at the beginning and at the
  6077. * end of interesting range may be not aligned with pages that
  6078. * page allocator holds, ie. they can be part of higher order
  6079. * pages. Because of this, we reserve the bigger range and
  6080. * once this is done free the pages we are not interested in.
  6081. *
  6082. * We don't have to hold zone->lock here because the pages are
  6083. * isolated thus they won't get removed from buddy.
  6084. */
  6085. lru_add_drain_all();
  6086. drain_all_pages(cc.zone);
  6087. order = 0;
  6088. outer_start = start;
  6089. while (!PageBuddy(pfn_to_page(outer_start))) {
  6090. if (++order >= MAX_ORDER) {
  6091. outer_start = start;
  6092. break;
  6093. }
  6094. outer_start &= ~0UL << order;
  6095. }
  6096. if (outer_start != start) {
  6097. order = page_order(pfn_to_page(outer_start));
  6098. /*
  6099. * outer_start page could be small order buddy page and
  6100. * it doesn't include start page. Adjust outer_start
  6101. * in this case to report failed page properly
  6102. * on tracepoint in test_pages_isolated()
  6103. */
  6104. if (outer_start + (1UL << order) <= start)
  6105. outer_start = start;
  6106. }
  6107. /* Make sure the range is really isolated. */
  6108. if (test_pages_isolated(outer_start, end, false)) {
  6109. pr_info("%s: [%lx, %lx) PFNs busy\n",
  6110. __func__, outer_start, end);
  6111. ret = -EBUSY;
  6112. goto done;
  6113. }
  6114. /* Grab isolated pages from freelists. */
  6115. outer_end = isolate_freepages_range(&cc, outer_start, end);
  6116. if (!outer_end) {
  6117. ret = -EBUSY;
  6118. goto done;
  6119. }
  6120. /* Free head and tail (if any) */
  6121. if (start != outer_start)
  6122. free_contig_range(outer_start, start - outer_start);
  6123. if (end != outer_end)
  6124. free_contig_range(end, outer_end - end);
  6125. done:
  6126. undo_isolate_page_range(pfn_max_align_down(start),
  6127. pfn_max_align_up(end), migratetype);
  6128. return ret;
  6129. }
  6130. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  6131. {
  6132. unsigned int count = 0;
  6133. for (; nr_pages--; pfn++) {
  6134. struct page *page = pfn_to_page(pfn);
  6135. count += page_count(page) != 1;
  6136. __free_page(page);
  6137. }
  6138. WARN(count != 0, "%d pages are still in use!\n", count);
  6139. }
  6140. #endif
  6141. #ifdef CONFIG_MEMORY_HOTPLUG
  6142. /*
  6143. * The zone indicated has a new number of managed_pages; batch sizes and percpu
  6144. * page high values need to be recalulated.
  6145. */
  6146. void __meminit zone_pcp_update(struct zone *zone)
  6147. {
  6148. unsigned cpu;
  6149. mutex_lock(&pcp_batch_high_lock);
  6150. for_each_possible_cpu(cpu)
  6151. pageset_set_high_and_batch(zone,
  6152. per_cpu_ptr(zone->pageset, cpu));
  6153. mutex_unlock(&pcp_batch_high_lock);
  6154. }
  6155. #endif
  6156. void zone_pcp_reset(struct zone *zone)
  6157. {
  6158. unsigned long flags;
  6159. int cpu;
  6160. struct per_cpu_pageset *pset;
  6161. /* avoid races with drain_pages() */
  6162. local_irq_save(flags);
  6163. if (zone->pageset != &boot_pageset) {
  6164. for_each_online_cpu(cpu) {
  6165. pset = per_cpu_ptr(zone->pageset, cpu);
  6166. drain_zonestat(zone, pset);
  6167. }
  6168. free_percpu(zone->pageset);
  6169. zone->pageset = &boot_pageset;
  6170. }
  6171. local_irq_restore(flags);
  6172. }
  6173. #ifdef CONFIG_MEMORY_HOTREMOVE
  6174. /*
  6175. * All pages in the range must be isolated before calling this.
  6176. */
  6177. void
  6178. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  6179. {
  6180. struct page *page;
  6181. struct zone *zone;
  6182. unsigned int order, i;
  6183. unsigned long pfn;
  6184. unsigned long flags;
  6185. /* find the first valid pfn */
  6186. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  6187. if (pfn_valid(pfn))
  6188. break;
  6189. if (pfn == end_pfn)
  6190. return;
  6191. zone = page_zone(pfn_to_page(pfn));
  6192. spin_lock_irqsave(&zone->lock, flags);
  6193. pfn = start_pfn;
  6194. while (pfn < end_pfn) {
  6195. if (!pfn_valid(pfn)) {
  6196. pfn++;
  6197. continue;
  6198. }
  6199. page = pfn_to_page(pfn);
  6200. /*
  6201. * The HWPoisoned page may be not in buddy system, and
  6202. * page_count() is not 0.
  6203. */
  6204. if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
  6205. pfn++;
  6206. SetPageReserved(page);
  6207. continue;
  6208. }
  6209. BUG_ON(page_count(page));
  6210. BUG_ON(!PageBuddy(page));
  6211. order = page_order(page);
  6212. #ifdef CONFIG_DEBUG_VM
  6213. pr_info("remove from free list %lx %d %lx\n",
  6214. pfn, 1 << order, end_pfn);
  6215. #endif
  6216. list_del(&page->lru);
  6217. rmv_page_order(page);
  6218. zone->free_area[order].nr_free--;
  6219. for (i = 0; i < (1 << order); i++)
  6220. SetPageReserved((page+i));
  6221. pfn += (1 << order);
  6222. }
  6223. spin_unlock_irqrestore(&zone->lock, flags);
  6224. }
  6225. #endif
  6226. bool is_free_buddy_page(struct page *page)
  6227. {
  6228. struct zone *zone = page_zone(page);
  6229. unsigned long pfn = page_to_pfn(page);
  6230. unsigned long flags;
  6231. unsigned int order;
  6232. spin_lock_irqsave(&zone->lock, flags);
  6233. for (order = 0; order < MAX_ORDER; order++) {
  6234. struct page *page_head = page - (pfn & ((1 << order) - 1));
  6235. if (PageBuddy(page_head) && page_order(page_head) >= order)
  6236. break;
  6237. }
  6238. spin_unlock_irqrestore(&zone->lock, flags);
  6239. return order < MAX_ORDER;
  6240. }