page_alloc.c 220 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024702570267027702870297030703170327033703470357036703770387039704070417042704370447045704670477048704970507051705270537054705570567057705870597060706170627063706470657066706770687069707070717072707370747075707670777078707970807081708270837084708570867087708870897090709170927093709470957096709770987099710071017102710371047105710671077108710971107111711271137114711571167117711871197120712171227123712471257126712771287129713071317132713371347135713671377138713971407141714271437144714571467147714871497150715171527153715471557156715771587159716071617162716371647165716671677168716971707171717271737174717571767177717871797180718171827183718471857186718771887189719071917192719371947195719671977198719972007201720272037204720572067207720872097210721172127213721472157216721772187219722072217222722372247225722672277228722972307231723272337234723572367237723872397240724172427243724472457246724772487249725072517252725372547255725672577258725972607261726272637264726572667267726872697270727172727273727472757276727772787279728072817282728372847285728672877288728972907291729272937294729572967297729872997300730173027303730473057306730773087309731073117312731373147315731673177318731973207321732273237324732573267327732873297330733173327333733473357336733773387339734073417342734373447345734673477348734973507351735273537354735573567357735873597360736173627363736473657366736773687369737073717372737373747375737673777378737973807381738273837384738573867387738873897390739173927393739473957396739773987399740074017402740374047405740674077408740974107411741274137414741574167417741874197420742174227423742474257426742774287429743074317432743374347435743674377438743974407441744274437444744574467447744874497450745174527453745474557456745774587459746074617462746374647465746674677468746974707471747274737474747574767477747874797480748174827483748474857486748774887489749074917492749374947495749674977498749975007501750275037504750575067507750875097510751175127513751475157516751775187519752075217522752375247525752675277528752975307531753275337534753575367537753875397540754175427543754475457546754775487549755075517552755375547555755675577558755975607561756275637564756575667567756875697570757175727573757475757576757775787579758075817582758375847585758675877588758975907591759275937594759575967597759875997600760176027603760476057606760776087609761076117612761376147615761676177618761976207621762276237624762576267627762876297630763176327633763476357636763776387639764076417642764376447645764676477648764976507651765276537654765576567657765876597660766176627663766476657666766776687669767076717672767376747675767676777678767976807681768276837684768576867687768876897690769176927693769476957696769776987699770077017702770377047705770677077708770977107711771277137714771577167717771877197720772177227723772477257726772777287729773077317732773377347735773677377738773977407741774277437744774577467747774877497750775177527753775477557756775777587759776077617762776377647765776677677768776977707771777277737774777577767777777877797780778177827783778477857786778777887789779077917792779377947795779677977798779978007801780278037804780578067807780878097810781178127813781478157816781778187819782078217822782378247825782678277828782978307831783278337834783578367837783878397840784178427843784478457846784778487849785078517852785378547855785678577858785978607861786278637864786578667867786878697870787178727873787478757876787778787879788078817882788378847885788678877888788978907891789278937894789578967897789878997900790179027903790479057906790779087909791079117912791379147915791679177918791979207921792279237924792579267927792879297930793179327933793479357936793779387939794079417942794379447945794679477948794979507951795279537954795579567957795879597960796179627963796479657966796779687969797079717972797379747975797679777978797979807981798279837984798579867987798879897990799179927993799479957996799779987999800080018002800380048005800680078008800980108011801280138014801580168017801880198020802180228023802480258026802780288029803080318032803380348035803680378038803980408041804280438044
  1. /*
  2. * linux/mm/page_alloc.c
  3. *
  4. * Manages the free list, the system allocates free pages here.
  5. * Note that kmalloc() lives in slab.c
  6. *
  7. * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
  8. * Swap reorganised 29.12.95, Stephen Tweedie
  9. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  10. * Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
  11. * Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
  12. * Zone balancing, Kanoj Sarcar, SGI, Jan 2000
  13. * Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
  14. * (lots of bits borrowed from Ingo Molnar & Andrew Morton)
  15. */
  16. #include <linux/stddef.h>
  17. #include <linux/mm.h>
  18. #include <linux/swap.h>
  19. #include <linux/interrupt.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/compiler.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kasan.h>
  27. #include <linux/module.h>
  28. #include <linux/suspend.h>
  29. #include <linux/pagevec.h>
  30. #include <linux/blkdev.h>
  31. #include <linux/slab.h>
  32. #include <linux/ratelimit.h>
  33. #include <linux/oom.h>
  34. #include <linux/notifier.h>
  35. #include <linux/topology.h>
  36. #include <linux/sysctl.h>
  37. #include <linux/cpu.h>
  38. #include <linux/cpuset.h>
  39. #include <linux/memory_hotplug.h>
  40. #include <linux/nodemask.h>
  41. #include <linux/vmalloc.h>
  42. #include <linux/vmstat.h>
  43. #include <linux/mempolicy.h>
  44. #include <linux/memremap.h>
  45. #include <linux/stop_machine.h>
  46. #include <linux/sort.h>
  47. #include <linux/pfn.h>
  48. #include <linux/backing-dev.h>
  49. #include <linux/fault-inject.h>
  50. #include <linux/page-isolation.h>
  51. #include <linux/page_ext.h>
  52. #include <linux/debugobjects.h>
  53. #include <linux/kmemleak.h>
  54. #include <linux/compaction.h>
  55. #include <trace/events/kmem.h>
  56. #include <trace/events/oom.h>
  57. #include <linux/prefetch.h>
  58. #include <linux/mm_inline.h>
  59. #include <linux/migrate.h>
  60. #include <linux/hugetlb.h>
  61. #include <linux/sched/rt.h>
  62. #include <linux/sched/mm.h>
  63. #include <linux/page_owner.h>
  64. #include <linux/kthread.h>
  65. #include <linux/memcontrol.h>
  66. #include <linux/ftrace.h>
  67. #include <linux/lockdep.h>
  68. #include <linux/nmi.h>
  69. #include <asm/sections.h>
  70. #include <asm/tlbflush.h>
  71. #include <asm/div64.h>
  72. #include "internal.h"
  73. /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
  74. static DEFINE_MUTEX(pcp_batch_high_lock);
  75. #define MIN_PERCPU_PAGELIST_FRACTION (8)
  76. #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
  77. DEFINE_PER_CPU(int, numa_node);
  78. EXPORT_PER_CPU_SYMBOL(numa_node);
  79. #endif
  80. DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);
  81. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  82. /*
  83. * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
  84. * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
  85. * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
  86. * defined in <linux/topology.h>.
  87. */
  88. DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */
  89. EXPORT_PER_CPU_SYMBOL(_numa_mem_);
  90. int _node_numa_mem_[MAX_NUMNODES];
  91. #endif
  92. /* work_structs for global per-cpu drains */
  93. DEFINE_MUTEX(pcpu_drain_mutex);
  94. DEFINE_PER_CPU(struct work_struct, pcpu_drain);
  95. #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
  96. volatile unsigned long latent_entropy __latent_entropy;
  97. EXPORT_SYMBOL(latent_entropy);
  98. #endif
  99. /*
  100. * Array of node states.
  101. */
  102. nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
  103. [N_POSSIBLE] = NODE_MASK_ALL,
  104. [N_ONLINE] = { { [0] = 1UL } },
  105. #ifndef CONFIG_NUMA
  106. [N_NORMAL_MEMORY] = { { [0] = 1UL } },
  107. #ifdef CONFIG_HIGHMEM
  108. [N_HIGH_MEMORY] = { { [0] = 1UL } },
  109. #endif
  110. [N_MEMORY] = { { [0] = 1UL } },
  111. [N_CPU] = { { [0] = 1UL } },
  112. #endif /* NUMA */
  113. };
  114. EXPORT_SYMBOL(node_states);
  115. /* Protect totalram_pages and zone->managed_pages */
  116. static DEFINE_SPINLOCK(managed_page_count_lock);
  117. unsigned long totalram_pages __read_mostly;
  118. unsigned long totalreserve_pages __read_mostly;
  119. unsigned long totalcma_pages __read_mostly;
  120. int percpu_pagelist_fraction;
  121. gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
  122. /*
  123. * A cached value of the page's pageblock's migratetype, used when the page is
  124. * put on a pcplist. Used to avoid the pageblock migratetype lookup when
  125. * freeing from pcplists in most cases, at the cost of possibly becoming stale.
  126. * Also the migratetype set in the page does not necessarily match the pcplist
  127. * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
  128. * other index - this ensures that it will be put on the correct CMA freelist.
  129. */
  130. static inline int get_pcppage_migratetype(struct page *page)
  131. {
  132. return page->index;
  133. }
  134. static inline void set_pcppage_migratetype(struct page *page, int migratetype)
  135. {
  136. page->index = migratetype;
  137. }
  138. #ifdef CONFIG_PM_SLEEP
  139. /*
  140. * The following functions are used by the suspend/hibernate code to temporarily
  141. * change gfp_allowed_mask in order to avoid using I/O during memory allocations
  142. * while devices are suspended. To avoid races with the suspend/hibernate code,
  143. * they should always be called with pm_mutex held (gfp_allowed_mask also should
  144. * only be modified with pm_mutex held, unless the suspend/hibernate code is
  145. * guaranteed not to run in parallel with that modification).
  146. */
  147. static gfp_t saved_gfp_mask;
  148. void pm_restore_gfp_mask(void)
  149. {
  150. WARN_ON(!mutex_is_locked(&pm_mutex));
  151. if (saved_gfp_mask) {
  152. gfp_allowed_mask = saved_gfp_mask;
  153. saved_gfp_mask = 0;
  154. }
  155. }
  156. void pm_restrict_gfp_mask(void)
  157. {
  158. WARN_ON(!mutex_is_locked(&pm_mutex));
  159. WARN_ON(saved_gfp_mask);
  160. saved_gfp_mask = gfp_allowed_mask;
  161. gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
  162. }
  163. bool pm_suspended_storage(void)
  164. {
  165. if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
  166. return false;
  167. return true;
  168. }
  169. #endif /* CONFIG_PM_SLEEP */
  170. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  171. unsigned int pageblock_order __read_mostly;
  172. #endif
  173. static void __free_pages_ok(struct page *page, unsigned int order);
  174. /*
  175. * results with 256, 32 in the lowmem_reserve sysctl:
  176. * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
  177. * 1G machine -> (16M dma, 784M normal, 224M high)
  178. * NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
  179. * HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
  180. * HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
  181. *
  182. * TBD: should special case ZONE_DMA32 machines here - in those we normally
  183. * don't need any ZONE_NORMAL reservation
  184. */
  185. int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
  186. #ifdef CONFIG_ZONE_DMA
  187. [ZONE_DMA] = 256,
  188. #endif
  189. #ifdef CONFIG_ZONE_DMA32
  190. [ZONE_DMA32] = 256,
  191. #endif
  192. [ZONE_NORMAL] = 32,
  193. #ifdef CONFIG_HIGHMEM
  194. [ZONE_HIGHMEM] = 0,
  195. #endif
  196. [ZONE_MOVABLE] = 0,
  197. };
  198. EXPORT_SYMBOL(totalram_pages);
  199. static char * const zone_names[MAX_NR_ZONES] = {
  200. #ifdef CONFIG_ZONE_DMA
  201. "DMA",
  202. #endif
  203. #ifdef CONFIG_ZONE_DMA32
  204. "DMA32",
  205. #endif
  206. "Normal",
  207. #ifdef CONFIG_HIGHMEM
  208. "HighMem",
  209. #endif
  210. "Movable",
  211. #ifdef CONFIG_ZONE_DEVICE
  212. "Device",
  213. #endif
  214. };
  215. char * const migratetype_names[MIGRATE_TYPES] = {
  216. "Unmovable",
  217. "Movable",
  218. "Reclaimable",
  219. "HighAtomic",
  220. #ifdef CONFIG_CMA
  221. "CMA",
  222. #endif
  223. #ifdef CONFIG_MEMORY_ISOLATION
  224. "Isolate",
  225. #endif
  226. };
  227. compound_page_dtor * const compound_page_dtors[] = {
  228. NULL,
  229. free_compound_page,
  230. #ifdef CONFIG_HUGETLB_PAGE
  231. free_huge_page,
  232. #endif
  233. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  234. free_transhuge_page,
  235. #endif
  236. };
  237. int min_free_kbytes = 1024;
  238. int user_min_free_kbytes = -1;
  239. int watermark_scale_factor = 10;
  240. static unsigned long nr_kernel_pages __meminitdata;
  241. static unsigned long nr_all_pages __meminitdata;
  242. static unsigned long dma_reserve __meminitdata;
  243. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  244. static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __meminitdata;
  245. static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __meminitdata;
  246. static unsigned long required_kernelcore __initdata;
  247. static unsigned long required_kernelcore_percent __initdata;
  248. static unsigned long required_movablecore __initdata;
  249. static unsigned long required_movablecore_percent __initdata;
  250. static unsigned long zone_movable_pfn[MAX_NUMNODES] __meminitdata;
  251. static bool mirrored_kernelcore __meminitdata;
  252. /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
  253. int movable_zone;
  254. EXPORT_SYMBOL(movable_zone);
  255. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  256. #if MAX_NUMNODES > 1
  257. int nr_node_ids __read_mostly = MAX_NUMNODES;
  258. int nr_online_nodes __read_mostly = 1;
  259. EXPORT_SYMBOL(nr_node_ids);
  260. EXPORT_SYMBOL(nr_online_nodes);
  261. #endif
  262. int page_group_by_mobility_disabled __read_mostly;
  263. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  264. /* Returns true if the struct page for the pfn is uninitialised */
  265. static inline bool __meminit early_page_uninitialised(unsigned long pfn)
  266. {
  267. int nid = early_pfn_to_nid(pfn);
  268. if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
  269. return true;
  270. return false;
  271. }
  272. /*
  273. * Returns false when the remaining initialisation should be deferred until
  274. * later in the boot cycle when it can be parallelised.
  275. */
  276. static inline bool update_defer_init(pg_data_t *pgdat,
  277. unsigned long pfn, unsigned long zone_end,
  278. unsigned long *nr_initialised)
  279. {
  280. /* Always populate low zones for address-constrained allocations */
  281. if (zone_end < pgdat_end_pfn(pgdat))
  282. return true;
  283. (*nr_initialised)++;
  284. if ((*nr_initialised > pgdat->static_init_pgcnt) &&
  285. (pfn & (PAGES_PER_SECTION - 1)) == 0) {
  286. pgdat->first_deferred_pfn = pfn;
  287. return false;
  288. }
  289. return true;
  290. }
  291. #else
  292. static inline bool early_page_uninitialised(unsigned long pfn)
  293. {
  294. return false;
  295. }
  296. static inline bool update_defer_init(pg_data_t *pgdat,
  297. unsigned long pfn, unsigned long zone_end,
  298. unsigned long *nr_initialised)
  299. {
  300. return true;
  301. }
  302. #endif
  303. /* Return a pointer to the bitmap storing bits affecting a block of pages */
  304. static inline unsigned long *get_pageblock_bitmap(struct page *page,
  305. unsigned long pfn)
  306. {
  307. #ifdef CONFIG_SPARSEMEM
  308. return __pfn_to_section(pfn)->pageblock_flags;
  309. #else
  310. return page_zone(page)->pageblock_flags;
  311. #endif /* CONFIG_SPARSEMEM */
  312. }
  313. static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
  314. {
  315. #ifdef CONFIG_SPARSEMEM
  316. pfn &= (PAGES_PER_SECTION-1);
  317. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  318. #else
  319. pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
  320. return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
  321. #endif /* CONFIG_SPARSEMEM */
  322. }
  323. /**
  324. * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  325. * @page: The page within the block of interest
  326. * @pfn: The target page frame number
  327. * @end_bitidx: The last bit of interest to retrieve
  328. * @mask: mask of bits that the caller is interested in
  329. *
  330. * Return: pageblock_bits flags
  331. */
  332. static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
  333. unsigned long pfn,
  334. unsigned long end_bitidx,
  335. unsigned long mask)
  336. {
  337. unsigned long *bitmap;
  338. unsigned long bitidx, word_bitidx;
  339. unsigned long word;
  340. bitmap = get_pageblock_bitmap(page, pfn);
  341. bitidx = pfn_to_bitidx(page, pfn);
  342. word_bitidx = bitidx / BITS_PER_LONG;
  343. bitidx &= (BITS_PER_LONG-1);
  344. word = bitmap[word_bitidx];
  345. bitidx += end_bitidx;
  346. return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
  347. }
  348. unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
  349. unsigned long end_bitidx,
  350. unsigned long mask)
  351. {
  352. return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
  353. }
  354. static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
  355. {
  356. return __get_pfnblock_flags_mask(page, pfn, PB_migrate_end, MIGRATETYPE_MASK);
  357. }
  358. /**
  359. * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  360. * @page: The page within the block of interest
  361. * @flags: The flags to set
  362. * @pfn: The target page frame number
  363. * @end_bitidx: The last bit of interest
  364. * @mask: mask of bits that the caller is interested in
  365. */
  366. void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
  367. unsigned long pfn,
  368. unsigned long end_bitidx,
  369. unsigned long mask)
  370. {
  371. unsigned long *bitmap;
  372. unsigned long bitidx, word_bitidx;
  373. unsigned long old_word, word;
  374. BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
  375. bitmap = get_pageblock_bitmap(page, pfn);
  376. bitidx = pfn_to_bitidx(page, pfn);
  377. word_bitidx = bitidx / BITS_PER_LONG;
  378. bitidx &= (BITS_PER_LONG-1);
  379. VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);
  380. bitidx += end_bitidx;
  381. mask <<= (BITS_PER_LONG - bitidx - 1);
  382. flags <<= (BITS_PER_LONG - bitidx - 1);
  383. word = READ_ONCE(bitmap[word_bitidx]);
  384. for (;;) {
  385. old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
  386. if (word == old_word)
  387. break;
  388. word = old_word;
  389. }
  390. }
  391. void set_pageblock_migratetype(struct page *page, int migratetype)
  392. {
  393. if (unlikely(page_group_by_mobility_disabled &&
  394. migratetype < MIGRATE_PCPTYPES))
  395. migratetype = MIGRATE_UNMOVABLE;
  396. set_pageblock_flags_group(page, (unsigned long)migratetype,
  397. PB_migrate, PB_migrate_end);
  398. }
  399. #ifdef CONFIG_DEBUG_VM
  400. static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
  401. {
  402. int ret = 0;
  403. unsigned seq;
  404. unsigned long pfn = page_to_pfn(page);
  405. unsigned long sp, start_pfn;
  406. do {
  407. seq = zone_span_seqbegin(zone);
  408. start_pfn = zone->zone_start_pfn;
  409. sp = zone->spanned_pages;
  410. if (!zone_spans_pfn(zone, pfn))
  411. ret = 1;
  412. } while (zone_span_seqretry(zone, seq));
  413. if (ret)
  414. pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
  415. pfn, zone_to_nid(zone), zone->name,
  416. start_pfn, start_pfn + sp);
  417. return ret;
  418. }
  419. static int page_is_consistent(struct zone *zone, struct page *page)
  420. {
  421. if (!pfn_valid_within(page_to_pfn(page)))
  422. return 0;
  423. if (zone != page_zone(page))
  424. return 0;
  425. return 1;
  426. }
  427. /*
  428. * Temporary debugging check for pages not lying within a given zone.
  429. */
  430. static int __maybe_unused bad_range(struct zone *zone, struct page *page)
  431. {
  432. if (page_outside_zone_boundaries(zone, page))
  433. return 1;
  434. if (!page_is_consistent(zone, page))
  435. return 1;
  436. return 0;
  437. }
  438. #else
  439. static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
  440. {
  441. return 0;
  442. }
  443. #endif
  444. static void bad_page(struct page *page, const char *reason,
  445. unsigned long bad_flags)
  446. {
  447. static unsigned long resume;
  448. static unsigned long nr_shown;
  449. static unsigned long nr_unshown;
  450. /*
  451. * Allow a burst of 60 reports, then keep quiet for that minute;
  452. * or allow a steady drip of one report per second.
  453. */
  454. if (nr_shown == 60) {
  455. if (time_before(jiffies, resume)) {
  456. nr_unshown++;
  457. goto out;
  458. }
  459. if (nr_unshown) {
  460. pr_alert(
  461. "BUG: Bad page state: %lu messages suppressed\n",
  462. nr_unshown);
  463. nr_unshown = 0;
  464. }
  465. nr_shown = 0;
  466. }
  467. if (nr_shown++ == 0)
  468. resume = jiffies + 60 * HZ;
  469. pr_alert("BUG: Bad page state in process %s pfn:%05lx\n",
  470. current->comm, page_to_pfn(page));
  471. __dump_page(page, reason);
  472. bad_flags &= page->flags;
  473. if (bad_flags)
  474. pr_alert("bad because of flags: %#lx(%pGp)\n",
  475. bad_flags, &bad_flags);
  476. dump_page_owner(page);
  477. print_modules();
  478. dump_stack();
  479. out:
  480. /* Leave bad fields for debug, except PageBuddy could make trouble */
  481. page_mapcount_reset(page); /* remove PageBuddy */
  482. add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
  483. }
  484. /*
  485. * Higher-order pages are called "compound pages". They are structured thusly:
  486. *
  487. * The first PAGE_SIZE page is called the "head page" and have PG_head set.
  488. *
  489. * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
  490. * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
  491. *
  492. * The first tail page's ->compound_dtor holds the offset in array of compound
  493. * page destructors. See compound_page_dtors.
  494. *
  495. * The first tail page's ->compound_order holds the order of allocation.
  496. * This usage means that zero-order pages may not be compound.
  497. */
  498. void free_compound_page(struct page *page)
  499. {
  500. __free_pages_ok(page, compound_order(page));
  501. }
  502. void prep_compound_page(struct page *page, unsigned int order)
  503. {
  504. int i;
  505. int nr_pages = 1 << order;
  506. set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
  507. set_compound_order(page, order);
  508. __SetPageHead(page);
  509. for (i = 1; i < nr_pages; i++) {
  510. struct page *p = page + i;
  511. set_page_count(p, 0);
  512. p->mapping = TAIL_MAPPING;
  513. set_compound_head(p, page);
  514. }
  515. atomic_set(compound_mapcount_ptr(page), -1);
  516. }
  517. #ifdef CONFIG_DEBUG_PAGEALLOC
  518. unsigned int _debug_guardpage_minorder;
  519. bool _debug_pagealloc_enabled __read_mostly
  520. = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
  521. EXPORT_SYMBOL(_debug_pagealloc_enabled);
  522. bool _debug_guardpage_enabled __read_mostly;
  523. static int __init early_debug_pagealloc(char *buf)
  524. {
  525. if (!buf)
  526. return -EINVAL;
  527. return kstrtobool(buf, &_debug_pagealloc_enabled);
  528. }
  529. early_param("debug_pagealloc", early_debug_pagealloc);
  530. static bool need_debug_guardpage(void)
  531. {
  532. /* If we don't use debug_pagealloc, we don't need guard page */
  533. if (!debug_pagealloc_enabled())
  534. return false;
  535. if (!debug_guardpage_minorder())
  536. return false;
  537. return true;
  538. }
  539. static void init_debug_guardpage(void)
  540. {
  541. if (!debug_pagealloc_enabled())
  542. return;
  543. if (!debug_guardpage_minorder())
  544. return;
  545. _debug_guardpage_enabled = true;
  546. }
  547. struct page_ext_operations debug_guardpage_ops = {
  548. .need = need_debug_guardpage,
  549. .init = init_debug_guardpage,
  550. };
  551. static int __init debug_guardpage_minorder_setup(char *buf)
  552. {
  553. unsigned long res;
  554. if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) {
  555. pr_err("Bad debug_guardpage_minorder value\n");
  556. return 0;
  557. }
  558. _debug_guardpage_minorder = res;
  559. pr_info("Setting debug_guardpage_minorder to %lu\n", res);
  560. return 0;
  561. }
  562. early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
  563. static inline bool set_page_guard(struct zone *zone, struct page *page,
  564. unsigned int order, int migratetype)
  565. {
  566. struct page_ext *page_ext;
  567. if (!debug_guardpage_enabled())
  568. return false;
  569. if (order >= debug_guardpage_minorder())
  570. return false;
  571. page_ext = lookup_page_ext(page);
  572. if (unlikely(!page_ext))
  573. return false;
  574. __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  575. INIT_LIST_HEAD(&page->lru);
  576. set_page_private(page, order);
  577. /* Guard pages are not available for any usage */
  578. __mod_zone_freepage_state(zone, -(1 << order), migratetype);
  579. return true;
  580. }
  581. static inline void clear_page_guard(struct zone *zone, struct page *page,
  582. unsigned int order, int migratetype)
  583. {
  584. struct page_ext *page_ext;
  585. if (!debug_guardpage_enabled())
  586. return;
  587. page_ext = lookup_page_ext(page);
  588. if (unlikely(!page_ext))
  589. return;
  590. __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
  591. set_page_private(page, 0);
  592. if (!is_migrate_isolate(migratetype))
  593. __mod_zone_freepage_state(zone, (1 << order), migratetype);
  594. }
  595. #else
  596. struct page_ext_operations debug_guardpage_ops;
  597. static inline bool set_page_guard(struct zone *zone, struct page *page,
  598. unsigned int order, int migratetype) { return false; }
  599. static inline void clear_page_guard(struct zone *zone, struct page *page,
  600. unsigned int order, int migratetype) {}
  601. #endif
  602. static inline void set_page_order(struct page *page, unsigned int order)
  603. {
  604. set_page_private(page, order);
  605. __SetPageBuddy(page);
  606. }
  607. static inline void rmv_page_order(struct page *page)
  608. {
  609. __ClearPageBuddy(page);
  610. set_page_private(page, 0);
  611. }
  612. /*
  613. * This function checks whether a page is free && is the buddy
  614. * we can do coalesce a page and its buddy if
  615. * (a) the buddy is not in a hole (check before calling!) &&
  616. * (b) the buddy is in the buddy system &&
  617. * (c) a page and its buddy have the same order &&
  618. * (d) a page and its buddy are in the same zone.
  619. *
  620. * For recording whether a page is in the buddy system, we set ->_mapcount
  621. * PAGE_BUDDY_MAPCOUNT_VALUE.
  622. * Setting, clearing, and testing _mapcount PAGE_BUDDY_MAPCOUNT_VALUE is
  623. * serialized by zone->lock.
  624. *
  625. * For recording page's order, we use page_private(page).
  626. */
  627. static inline int page_is_buddy(struct page *page, struct page *buddy,
  628. unsigned int order)
  629. {
  630. if (page_is_guard(buddy) && page_order(buddy) == order) {
  631. if (page_zone_id(page) != page_zone_id(buddy))
  632. return 0;
  633. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  634. return 1;
  635. }
  636. if (PageBuddy(buddy) && page_order(buddy) == order) {
  637. /*
  638. * zone check is done late to avoid uselessly
  639. * calculating zone/node ids for pages that could
  640. * never merge.
  641. */
  642. if (page_zone_id(page) != page_zone_id(buddy))
  643. return 0;
  644. VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
  645. return 1;
  646. }
  647. return 0;
  648. }
  649. /*
  650. * Freeing function for a buddy system allocator.
  651. *
  652. * The concept of a buddy system is to maintain direct-mapped table
  653. * (containing bit values) for memory blocks of various "orders".
  654. * The bottom level table contains the map for the smallest allocatable
  655. * units of memory (here, pages), and each level above it describes
  656. * pairs of units from the levels below, hence, "buddies".
  657. * At a high level, all that happens here is marking the table entry
  658. * at the bottom level available, and propagating the changes upward
  659. * as necessary, plus some accounting needed to play nicely with other
  660. * parts of the VM system.
  661. * At each level, we keep a list of pages, which are heads of continuous
  662. * free pages of length of (1 << order) and marked with _mapcount
  663. * PAGE_BUDDY_MAPCOUNT_VALUE. Page's order is recorded in page_private(page)
  664. * field.
  665. * So when we are allocating or freeing one, we can derive the state of the
  666. * other. That is, if we allocate a small block, and both were
  667. * free, the remainder of the region must be split into blocks.
  668. * If a block is freed, and its buddy is also free, then this
  669. * triggers coalescing into a block of larger size.
  670. *
  671. * -- nyc
  672. */
  673. static inline void __free_one_page(struct page *page,
  674. unsigned long pfn,
  675. struct zone *zone, unsigned int order,
  676. int migratetype)
  677. {
  678. unsigned long combined_pfn;
  679. unsigned long uninitialized_var(buddy_pfn);
  680. struct page *buddy;
  681. unsigned int max_order;
  682. max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1);
  683. VM_BUG_ON(!zone_is_initialized(zone));
  684. VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
  685. VM_BUG_ON(migratetype == -1);
  686. if (likely(!is_migrate_isolate(migratetype)))
  687. __mod_zone_freepage_state(zone, 1 << order, migratetype);
  688. VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
  689. VM_BUG_ON_PAGE(bad_range(zone, page), page);
  690. continue_merging:
  691. while (order < max_order - 1) {
  692. buddy_pfn = __find_buddy_pfn(pfn, order);
  693. buddy = page + (buddy_pfn - pfn);
  694. if (!pfn_valid_within(buddy_pfn))
  695. goto done_merging;
  696. if (!page_is_buddy(page, buddy, order))
  697. goto done_merging;
  698. /*
  699. * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
  700. * merge with it and move up one order.
  701. */
  702. if (page_is_guard(buddy)) {
  703. clear_page_guard(zone, buddy, order, migratetype);
  704. } else {
  705. list_del(&buddy->lru);
  706. zone->free_area[order].nr_free--;
  707. rmv_page_order(buddy);
  708. }
  709. combined_pfn = buddy_pfn & pfn;
  710. page = page + (combined_pfn - pfn);
  711. pfn = combined_pfn;
  712. order++;
  713. }
  714. if (max_order < MAX_ORDER) {
  715. /* If we are here, it means order is >= pageblock_order.
  716. * We want to prevent merge between freepages on isolate
  717. * pageblock and normal pageblock. Without this, pageblock
  718. * isolation could cause incorrect freepage or CMA accounting.
  719. *
  720. * We don't want to hit this code for the more frequent
  721. * low-order merging.
  722. */
  723. if (unlikely(has_isolate_pageblock(zone))) {
  724. int buddy_mt;
  725. buddy_pfn = __find_buddy_pfn(pfn, order);
  726. buddy = page + (buddy_pfn - pfn);
  727. buddy_mt = get_pageblock_migratetype(buddy);
  728. if (migratetype != buddy_mt
  729. && (is_migrate_isolate(migratetype) ||
  730. is_migrate_isolate(buddy_mt)))
  731. goto done_merging;
  732. }
  733. max_order++;
  734. goto continue_merging;
  735. }
  736. done_merging:
  737. set_page_order(page, order);
  738. /*
  739. * If this is not the largest possible page, check if the buddy
  740. * of the next-highest order is free. If it is, it's possible
  741. * that pages are being freed that will coalesce soon. In case,
  742. * that is happening, add the free page to the tail of the list
  743. * so it's less likely to be used soon and more likely to be merged
  744. * as a higher order page
  745. */
  746. if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
  747. struct page *higher_page, *higher_buddy;
  748. combined_pfn = buddy_pfn & pfn;
  749. higher_page = page + (combined_pfn - pfn);
  750. buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
  751. higher_buddy = higher_page + (buddy_pfn - combined_pfn);
  752. if (pfn_valid_within(buddy_pfn) &&
  753. page_is_buddy(higher_page, higher_buddy, order + 1)) {
  754. list_add_tail(&page->lru,
  755. &zone->free_area[order].free_list[migratetype]);
  756. goto out;
  757. }
  758. }
  759. list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
  760. out:
  761. zone->free_area[order].nr_free++;
  762. }
  763. /*
  764. * A bad page could be due to a number of fields. Instead of multiple branches,
  765. * try and check multiple fields with one check. The caller must do a detailed
  766. * check if necessary.
  767. */
  768. static inline bool page_expected_state(struct page *page,
  769. unsigned long check_flags)
  770. {
  771. if (unlikely(atomic_read(&page->_mapcount) != -1))
  772. return false;
  773. if (unlikely((unsigned long)page->mapping |
  774. page_ref_count(page) |
  775. #ifdef CONFIG_MEMCG
  776. (unsigned long)page->mem_cgroup |
  777. #endif
  778. (page->flags & check_flags)))
  779. return false;
  780. return true;
  781. }
  782. static void free_pages_check_bad(struct page *page)
  783. {
  784. const char *bad_reason;
  785. unsigned long bad_flags;
  786. bad_reason = NULL;
  787. bad_flags = 0;
  788. if (unlikely(atomic_read(&page->_mapcount) != -1))
  789. bad_reason = "nonzero mapcount";
  790. if (unlikely(page->mapping != NULL))
  791. bad_reason = "non-NULL mapping";
  792. if (unlikely(page_ref_count(page) != 0))
  793. bad_reason = "nonzero _refcount";
  794. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_FREE)) {
  795. bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
  796. bad_flags = PAGE_FLAGS_CHECK_AT_FREE;
  797. }
  798. #ifdef CONFIG_MEMCG
  799. if (unlikely(page->mem_cgroup))
  800. bad_reason = "page still charged to cgroup";
  801. #endif
  802. bad_page(page, bad_reason, bad_flags);
  803. }
  804. static inline int free_pages_check(struct page *page)
  805. {
  806. if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
  807. return 0;
  808. /* Something has gone sideways, find it */
  809. free_pages_check_bad(page);
  810. return 1;
  811. }
  812. static int free_tail_pages_check(struct page *head_page, struct page *page)
  813. {
  814. int ret = 1;
  815. /*
  816. * We rely page->lru.next never has bit 0 set, unless the page
  817. * is PageTail(). Let's make sure that's true even for poisoned ->lru.
  818. */
  819. BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
  820. if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
  821. ret = 0;
  822. goto out;
  823. }
  824. switch (page - head_page) {
  825. case 1:
  826. /* the first tail page: ->mapping is compound_mapcount() */
  827. if (unlikely(compound_mapcount(page))) {
  828. bad_page(page, "nonzero compound_mapcount", 0);
  829. goto out;
  830. }
  831. break;
  832. case 2:
  833. /*
  834. * the second tail page: ->mapping is
  835. * page_deferred_list().next -- ignore value.
  836. */
  837. break;
  838. default:
  839. if (page->mapping != TAIL_MAPPING) {
  840. bad_page(page, "corrupted mapping in tail page", 0);
  841. goto out;
  842. }
  843. break;
  844. }
  845. if (unlikely(!PageTail(page))) {
  846. bad_page(page, "PageTail not set", 0);
  847. goto out;
  848. }
  849. if (unlikely(compound_head(page) != head_page)) {
  850. bad_page(page, "compound_head not consistent", 0);
  851. goto out;
  852. }
  853. ret = 0;
  854. out:
  855. page->mapping = NULL;
  856. clear_compound_head(page);
  857. return ret;
  858. }
  859. static __always_inline bool free_pages_prepare(struct page *page,
  860. unsigned int order, bool check_free)
  861. {
  862. int bad = 0;
  863. VM_BUG_ON_PAGE(PageTail(page), page);
  864. trace_mm_page_free(page, order);
  865. /*
  866. * Check tail pages before head page information is cleared to
  867. * avoid checking PageCompound for order-0 pages.
  868. */
  869. if (unlikely(order)) {
  870. bool compound = PageCompound(page);
  871. int i;
  872. VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
  873. if (compound)
  874. ClearPageDoubleMap(page);
  875. for (i = 1; i < (1 << order); i++) {
  876. if (compound)
  877. bad += free_tail_pages_check(page, page + i);
  878. if (unlikely(free_pages_check(page + i))) {
  879. bad++;
  880. continue;
  881. }
  882. (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  883. }
  884. }
  885. if (PageMappingFlags(page))
  886. page->mapping = NULL;
  887. if (memcg_kmem_enabled() && PageKmemcg(page))
  888. memcg_kmem_uncharge(page, order);
  889. if (check_free)
  890. bad += free_pages_check(page);
  891. if (bad)
  892. return false;
  893. page_cpupid_reset_last(page);
  894. page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
  895. reset_page_owner(page, order);
  896. if (!PageHighMem(page)) {
  897. debug_check_no_locks_freed(page_address(page),
  898. PAGE_SIZE << order);
  899. debug_check_no_obj_freed(page_address(page),
  900. PAGE_SIZE << order);
  901. }
  902. arch_free_page(page, order);
  903. kernel_poison_pages(page, 1 << order, 0);
  904. kernel_map_pages(page, 1 << order, 0);
  905. kasan_free_pages(page, order);
  906. return true;
  907. }
  908. #ifdef CONFIG_DEBUG_VM
  909. static inline bool free_pcp_prepare(struct page *page)
  910. {
  911. return free_pages_prepare(page, 0, true);
  912. }
  913. static inline bool bulkfree_pcp_prepare(struct page *page)
  914. {
  915. return false;
  916. }
  917. #else
  918. static bool free_pcp_prepare(struct page *page)
  919. {
  920. return free_pages_prepare(page, 0, false);
  921. }
  922. static bool bulkfree_pcp_prepare(struct page *page)
  923. {
  924. return free_pages_check(page);
  925. }
  926. #endif /* CONFIG_DEBUG_VM */
  927. static inline void prefetch_buddy(struct page *page)
  928. {
  929. unsigned long pfn = page_to_pfn(page);
  930. unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
  931. struct page *buddy = page + (buddy_pfn - pfn);
  932. prefetch(buddy);
  933. }
  934. /*
  935. * Frees a number of pages from the PCP lists
  936. * Assumes all pages on list are in same zone, and of same order.
  937. * count is the number of pages to free.
  938. *
  939. * If the zone was previously in an "all pages pinned" state then look to
  940. * see if this freeing clears that state.
  941. *
  942. * And clear the zone's pages_scanned counter, to hold off the "all pages are
  943. * pinned" detection logic.
  944. */
  945. static void free_pcppages_bulk(struct zone *zone, int count,
  946. struct per_cpu_pages *pcp)
  947. {
  948. int migratetype = 0;
  949. int batch_free = 0;
  950. int prefetch_nr = 0;
  951. bool isolated_pageblocks;
  952. struct page *page, *tmp;
  953. LIST_HEAD(head);
  954. while (count) {
  955. struct list_head *list;
  956. /*
  957. * Remove pages from lists in a round-robin fashion. A
  958. * batch_free count is maintained that is incremented when an
  959. * empty list is encountered. This is so more pages are freed
  960. * off fuller lists instead of spinning excessively around empty
  961. * lists
  962. */
  963. do {
  964. batch_free++;
  965. if (++migratetype == MIGRATE_PCPTYPES)
  966. migratetype = 0;
  967. list = &pcp->lists[migratetype];
  968. } while (list_empty(list));
  969. /* This is the only non-empty list. Free them all. */
  970. if (batch_free == MIGRATE_PCPTYPES)
  971. batch_free = count;
  972. do {
  973. page = list_last_entry(list, struct page, lru);
  974. /* must delete to avoid corrupting pcp list */
  975. list_del(&page->lru);
  976. pcp->count--;
  977. if (bulkfree_pcp_prepare(page))
  978. continue;
  979. list_add_tail(&page->lru, &head);
  980. /*
  981. * We are going to put the page back to the global
  982. * pool, prefetch its buddy to speed up later access
  983. * under zone->lock. It is believed the overhead of
  984. * an additional test and calculating buddy_pfn here
  985. * can be offset by reduced memory latency later. To
  986. * avoid excessive prefetching due to large count, only
  987. * prefetch buddy for the first pcp->batch nr of pages.
  988. */
  989. if (prefetch_nr++ < pcp->batch)
  990. prefetch_buddy(page);
  991. } while (--count && --batch_free && !list_empty(list));
  992. }
  993. spin_lock(&zone->lock);
  994. isolated_pageblocks = has_isolate_pageblock(zone);
  995. /*
  996. * Use safe version since after __free_one_page(),
  997. * page->lru.next will not point to original list.
  998. */
  999. list_for_each_entry_safe(page, tmp, &head, lru) {
  1000. int mt = get_pcppage_migratetype(page);
  1001. /* MIGRATE_ISOLATE page should not go to pcplists */
  1002. VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
  1003. /* Pageblock could have been isolated meanwhile */
  1004. if (unlikely(isolated_pageblocks))
  1005. mt = get_pageblock_migratetype(page);
  1006. __free_one_page(page, page_to_pfn(page), zone, 0, mt);
  1007. trace_mm_page_pcpu_drain(page, 0, mt);
  1008. }
  1009. spin_unlock(&zone->lock);
  1010. }
  1011. static void free_one_page(struct zone *zone,
  1012. struct page *page, unsigned long pfn,
  1013. unsigned int order,
  1014. int migratetype)
  1015. {
  1016. spin_lock(&zone->lock);
  1017. if (unlikely(has_isolate_pageblock(zone) ||
  1018. is_migrate_isolate(migratetype))) {
  1019. migratetype = get_pfnblock_migratetype(page, pfn);
  1020. }
  1021. __free_one_page(page, pfn, zone, order, migratetype);
  1022. spin_unlock(&zone->lock);
  1023. }
  1024. static void __meminit __init_single_page(struct page *page, unsigned long pfn,
  1025. unsigned long zone, int nid)
  1026. {
  1027. mm_zero_struct_page(page);
  1028. set_page_links(page, zone, nid, pfn);
  1029. init_page_count(page);
  1030. page_mapcount_reset(page);
  1031. page_cpupid_reset_last(page);
  1032. INIT_LIST_HEAD(&page->lru);
  1033. #ifdef WANT_PAGE_VIRTUAL
  1034. /* The shift won't overflow because ZONE_NORMAL is below 4G. */
  1035. if (!is_highmem_idx(zone))
  1036. set_page_address(page, __va(pfn << PAGE_SHIFT));
  1037. #endif
  1038. }
  1039. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1040. static void __meminit init_reserved_page(unsigned long pfn)
  1041. {
  1042. pg_data_t *pgdat;
  1043. int nid, zid;
  1044. if (!early_page_uninitialised(pfn))
  1045. return;
  1046. nid = early_pfn_to_nid(pfn);
  1047. pgdat = NODE_DATA(nid);
  1048. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1049. struct zone *zone = &pgdat->node_zones[zid];
  1050. if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
  1051. break;
  1052. }
  1053. __init_single_page(pfn_to_page(pfn), pfn, zid, nid);
  1054. }
  1055. #else
  1056. static inline void init_reserved_page(unsigned long pfn)
  1057. {
  1058. }
  1059. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  1060. /*
  1061. * Initialised pages do not have PageReserved set. This function is
  1062. * called for each range allocated by the bootmem allocator and
  1063. * marks the pages PageReserved. The remaining valid pages are later
  1064. * sent to the buddy page allocator.
  1065. */
  1066. void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
  1067. {
  1068. unsigned long start_pfn = PFN_DOWN(start);
  1069. unsigned long end_pfn = PFN_UP(end);
  1070. for (; start_pfn < end_pfn; start_pfn++) {
  1071. if (pfn_valid(start_pfn)) {
  1072. struct page *page = pfn_to_page(start_pfn);
  1073. init_reserved_page(start_pfn);
  1074. /* Avoid false-positive PageTail() */
  1075. INIT_LIST_HEAD(&page->lru);
  1076. SetPageReserved(page);
  1077. }
  1078. }
  1079. }
  1080. static void __free_pages_ok(struct page *page, unsigned int order)
  1081. {
  1082. unsigned long flags;
  1083. int migratetype;
  1084. unsigned long pfn = page_to_pfn(page);
  1085. if (!free_pages_prepare(page, order, true))
  1086. return;
  1087. migratetype = get_pfnblock_migratetype(page, pfn);
  1088. local_irq_save(flags);
  1089. __count_vm_events(PGFREE, 1 << order);
  1090. free_one_page(page_zone(page), page, pfn, order, migratetype);
  1091. local_irq_restore(flags);
  1092. }
  1093. static void __init __free_pages_boot_core(struct page *page, unsigned int order)
  1094. {
  1095. unsigned int nr_pages = 1 << order;
  1096. struct page *p = page;
  1097. unsigned int loop;
  1098. prefetchw(p);
  1099. for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
  1100. prefetchw(p + 1);
  1101. __ClearPageReserved(p);
  1102. set_page_count(p, 0);
  1103. }
  1104. __ClearPageReserved(p);
  1105. set_page_count(p, 0);
  1106. page_zone(page)->managed_pages += nr_pages;
  1107. set_page_refcounted(page);
  1108. __free_pages(page, order);
  1109. }
  1110. #if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
  1111. defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
  1112. static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
  1113. int __meminit early_pfn_to_nid(unsigned long pfn)
  1114. {
  1115. static DEFINE_SPINLOCK(early_pfn_lock);
  1116. int nid;
  1117. spin_lock(&early_pfn_lock);
  1118. nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
  1119. if (nid < 0)
  1120. nid = first_online_node;
  1121. spin_unlock(&early_pfn_lock);
  1122. return nid;
  1123. }
  1124. #endif
  1125. #ifdef CONFIG_NODES_SPAN_OTHER_NODES
  1126. static inline bool __meminit __maybe_unused
  1127. meminit_pfn_in_nid(unsigned long pfn, int node,
  1128. struct mminit_pfnnid_cache *state)
  1129. {
  1130. int nid;
  1131. nid = __early_pfn_to_nid(pfn, state);
  1132. if (nid >= 0 && nid != node)
  1133. return false;
  1134. return true;
  1135. }
  1136. /* Only safe to use early in boot when initialisation is single-threaded */
  1137. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  1138. {
  1139. return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
  1140. }
  1141. #else
  1142. static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
  1143. {
  1144. return true;
  1145. }
  1146. static inline bool __meminit __maybe_unused
  1147. meminit_pfn_in_nid(unsigned long pfn, int node,
  1148. struct mminit_pfnnid_cache *state)
  1149. {
  1150. return true;
  1151. }
  1152. #endif
  1153. void __init __free_pages_bootmem(struct page *page, unsigned long pfn,
  1154. unsigned int order)
  1155. {
  1156. if (early_page_uninitialised(pfn))
  1157. return;
  1158. return __free_pages_boot_core(page, order);
  1159. }
  1160. /*
  1161. * Check that the whole (or subset of) a pageblock given by the interval of
  1162. * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
  1163. * with the migration of free compaction scanner. The scanners then need to
  1164. * use only pfn_valid_within() check for arches that allow holes within
  1165. * pageblocks.
  1166. *
  1167. * Return struct page pointer of start_pfn, or NULL if checks were not passed.
  1168. *
  1169. * It's possible on some configurations to have a setup like node0 node1 node0
  1170. * i.e. it's possible that all pages within a zones range of pages do not
  1171. * belong to a single zone. We assume that a border between node0 and node1
  1172. * can occur within a single pageblock, but not a node0 node1 node0
  1173. * interleaving within a single pageblock. It is therefore sufficient to check
  1174. * the first and last page of a pageblock and avoid checking each individual
  1175. * page in a pageblock.
  1176. */
  1177. struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
  1178. unsigned long end_pfn, struct zone *zone)
  1179. {
  1180. struct page *start_page;
  1181. struct page *end_page;
  1182. /* end_pfn is one past the range we are checking */
  1183. end_pfn--;
  1184. if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
  1185. return NULL;
  1186. start_page = pfn_to_online_page(start_pfn);
  1187. if (!start_page)
  1188. return NULL;
  1189. if (page_zone(start_page) != zone)
  1190. return NULL;
  1191. end_page = pfn_to_page(end_pfn);
  1192. /* This gives a shorter code than deriving page_zone(end_page) */
  1193. if (page_zone_id(start_page) != page_zone_id(end_page))
  1194. return NULL;
  1195. return start_page;
  1196. }
  1197. void set_zone_contiguous(struct zone *zone)
  1198. {
  1199. unsigned long block_start_pfn = zone->zone_start_pfn;
  1200. unsigned long block_end_pfn;
  1201. block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
  1202. for (; block_start_pfn < zone_end_pfn(zone);
  1203. block_start_pfn = block_end_pfn,
  1204. block_end_pfn += pageblock_nr_pages) {
  1205. block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));
  1206. if (!__pageblock_pfn_to_page(block_start_pfn,
  1207. block_end_pfn, zone))
  1208. return;
  1209. }
  1210. /* We confirm that there is no hole */
  1211. zone->contiguous = true;
  1212. }
  1213. void clear_zone_contiguous(struct zone *zone)
  1214. {
  1215. zone->contiguous = false;
  1216. }
  1217. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1218. static void __init deferred_free_range(unsigned long pfn,
  1219. unsigned long nr_pages)
  1220. {
  1221. struct page *page;
  1222. unsigned long i;
  1223. if (!nr_pages)
  1224. return;
  1225. page = pfn_to_page(pfn);
  1226. /* Free a large naturally-aligned chunk if possible */
  1227. if (nr_pages == pageblock_nr_pages &&
  1228. (pfn & (pageblock_nr_pages - 1)) == 0) {
  1229. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1230. __free_pages_boot_core(page, pageblock_order);
  1231. return;
  1232. }
  1233. for (i = 0; i < nr_pages; i++, page++, pfn++) {
  1234. if ((pfn & (pageblock_nr_pages - 1)) == 0)
  1235. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  1236. __free_pages_boot_core(page, 0);
  1237. }
  1238. }
  1239. /* Completion tracking for deferred_init_memmap() threads */
  1240. static atomic_t pgdat_init_n_undone __initdata;
  1241. static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
  1242. static inline void __init pgdat_init_report_one_done(void)
  1243. {
  1244. if (atomic_dec_and_test(&pgdat_init_n_undone))
  1245. complete(&pgdat_init_all_done_comp);
  1246. }
  1247. /*
  1248. * Returns true if page needs to be initialized or freed to buddy allocator.
  1249. *
  1250. * First we check if pfn is valid on architectures where it is possible to have
  1251. * holes within pageblock_nr_pages. On systems where it is not possible, this
  1252. * function is optimized out.
  1253. *
  1254. * Then, we check if a current large page is valid by only checking the validity
  1255. * of the head pfn.
  1256. *
  1257. * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
  1258. * within a node: a pfn is between start and end of a node, but does not belong
  1259. * to this memory node.
  1260. */
  1261. static inline bool __init
  1262. deferred_pfn_valid(int nid, unsigned long pfn,
  1263. struct mminit_pfnnid_cache *nid_init_state)
  1264. {
  1265. if (!pfn_valid_within(pfn))
  1266. return false;
  1267. if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
  1268. return false;
  1269. if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
  1270. return false;
  1271. return true;
  1272. }
  1273. /*
  1274. * Free pages to buddy allocator. Try to free aligned pages in
  1275. * pageblock_nr_pages sizes.
  1276. */
  1277. static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
  1278. unsigned long end_pfn)
  1279. {
  1280. struct mminit_pfnnid_cache nid_init_state = { };
  1281. unsigned long nr_pgmask = pageblock_nr_pages - 1;
  1282. unsigned long nr_free = 0;
  1283. for (; pfn < end_pfn; pfn++) {
  1284. if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
  1285. deferred_free_range(pfn - nr_free, nr_free);
  1286. nr_free = 0;
  1287. } else if (!(pfn & nr_pgmask)) {
  1288. deferred_free_range(pfn - nr_free, nr_free);
  1289. nr_free = 1;
  1290. touch_nmi_watchdog();
  1291. } else {
  1292. nr_free++;
  1293. }
  1294. }
  1295. /* Free the last block of pages to allocator */
  1296. deferred_free_range(pfn - nr_free, nr_free);
  1297. }
  1298. /*
  1299. * Initialize struct pages. We minimize pfn page lookups and scheduler checks
  1300. * by performing it only once every pageblock_nr_pages.
  1301. * Return number of pages initialized.
  1302. */
  1303. static unsigned long __init deferred_init_pages(int nid, int zid,
  1304. unsigned long pfn,
  1305. unsigned long end_pfn)
  1306. {
  1307. struct mminit_pfnnid_cache nid_init_state = { };
  1308. unsigned long nr_pgmask = pageblock_nr_pages - 1;
  1309. unsigned long nr_pages = 0;
  1310. struct page *page = NULL;
  1311. for (; pfn < end_pfn; pfn++) {
  1312. if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
  1313. page = NULL;
  1314. continue;
  1315. } else if (!page || !(pfn & nr_pgmask)) {
  1316. page = pfn_to_page(pfn);
  1317. touch_nmi_watchdog();
  1318. } else {
  1319. page++;
  1320. }
  1321. __init_single_page(page, pfn, zid, nid);
  1322. nr_pages++;
  1323. }
  1324. return (nr_pages);
  1325. }
  1326. /* Initialise remaining memory on a node */
  1327. static int __init deferred_init_memmap(void *data)
  1328. {
  1329. pg_data_t *pgdat = data;
  1330. int nid = pgdat->node_id;
  1331. unsigned long start = jiffies;
  1332. unsigned long nr_pages = 0;
  1333. unsigned long spfn, epfn, first_init_pfn, flags;
  1334. phys_addr_t spa, epa;
  1335. int zid;
  1336. struct zone *zone;
  1337. const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
  1338. u64 i;
  1339. /* Bind memory initialisation thread to a local node if possible */
  1340. if (!cpumask_empty(cpumask))
  1341. set_cpus_allowed_ptr(current, cpumask);
  1342. pgdat_resize_lock(pgdat, &flags);
  1343. first_init_pfn = pgdat->first_deferred_pfn;
  1344. if (first_init_pfn == ULONG_MAX) {
  1345. pgdat_resize_unlock(pgdat, &flags);
  1346. pgdat_init_report_one_done();
  1347. return 0;
  1348. }
  1349. /* Sanity check boundaries */
  1350. BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
  1351. BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
  1352. pgdat->first_deferred_pfn = ULONG_MAX;
  1353. /* Only the highest zone is deferred so find it */
  1354. for (zid = 0; zid < MAX_NR_ZONES; zid++) {
  1355. zone = pgdat->node_zones + zid;
  1356. if (first_init_pfn < zone_end_pfn(zone))
  1357. break;
  1358. }
  1359. first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
  1360. /*
  1361. * Initialize and free pages. We do it in two loops: first we initialize
  1362. * struct page, than free to buddy allocator, because while we are
  1363. * freeing pages we can access pages that are ahead (computing buddy
  1364. * page in __free_one_page()).
  1365. */
  1366. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
  1367. spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
  1368. epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
  1369. nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
  1370. }
  1371. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
  1372. spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
  1373. epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
  1374. deferred_free_pages(nid, zid, spfn, epfn);
  1375. }
  1376. pgdat_resize_unlock(pgdat, &flags);
  1377. /* Sanity check that the next zone really is unpopulated */
  1378. WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
  1379. pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
  1380. jiffies_to_msecs(jiffies - start));
  1381. pgdat_init_report_one_done();
  1382. return 0;
  1383. }
  1384. /*
  1385. * During boot we initialize deferred pages on-demand, as needed, but once
  1386. * page_alloc_init_late() has finished, the deferred pages are all initialized,
  1387. * and we can permanently disable that path.
  1388. */
  1389. static DEFINE_STATIC_KEY_TRUE(deferred_pages);
  1390. /*
  1391. * If this zone has deferred pages, try to grow it by initializing enough
  1392. * deferred pages to satisfy the allocation specified by order, rounded up to
  1393. * the nearest PAGES_PER_SECTION boundary. So we're adding memory in increments
  1394. * of SECTION_SIZE bytes by initializing struct pages in increments of
  1395. * PAGES_PER_SECTION * sizeof(struct page) bytes.
  1396. *
  1397. * Return true when zone was grown, otherwise return false. We return true even
  1398. * when we grow less than requested, to let the caller decide if there are
  1399. * enough pages to satisfy the allocation.
  1400. *
  1401. * Note: We use noinline because this function is needed only during boot, and
  1402. * it is called from a __ref function _deferred_grow_zone. This way we are
  1403. * making sure that it is not inlined into permanent text section.
  1404. */
  1405. static noinline bool __init
  1406. deferred_grow_zone(struct zone *zone, unsigned int order)
  1407. {
  1408. int zid = zone_idx(zone);
  1409. int nid = zone_to_nid(zone);
  1410. pg_data_t *pgdat = NODE_DATA(nid);
  1411. unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
  1412. unsigned long nr_pages = 0;
  1413. unsigned long first_init_pfn, spfn, epfn, t, flags;
  1414. unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
  1415. phys_addr_t spa, epa;
  1416. u64 i;
  1417. /* Only the last zone may have deferred pages */
  1418. if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
  1419. return false;
  1420. pgdat_resize_lock(pgdat, &flags);
  1421. /*
  1422. * If deferred pages have been initialized while we were waiting for
  1423. * the lock, return true, as the zone was grown. The caller will retry
  1424. * this zone. We won't return to this function since the caller also
  1425. * has this static branch.
  1426. */
  1427. if (!static_branch_unlikely(&deferred_pages)) {
  1428. pgdat_resize_unlock(pgdat, &flags);
  1429. return true;
  1430. }
  1431. /*
  1432. * If someone grew this zone while we were waiting for spinlock, return
  1433. * true, as there might be enough pages already.
  1434. */
  1435. if (first_deferred_pfn != pgdat->first_deferred_pfn) {
  1436. pgdat_resize_unlock(pgdat, &flags);
  1437. return true;
  1438. }
  1439. first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn);
  1440. if (first_init_pfn >= pgdat_end_pfn(pgdat)) {
  1441. pgdat_resize_unlock(pgdat, &flags);
  1442. return false;
  1443. }
  1444. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
  1445. spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
  1446. epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
  1447. while (spfn < epfn && nr_pages < nr_pages_needed) {
  1448. t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
  1449. first_deferred_pfn = min(t, epfn);
  1450. nr_pages += deferred_init_pages(nid, zid, spfn,
  1451. first_deferred_pfn);
  1452. spfn = first_deferred_pfn;
  1453. }
  1454. if (nr_pages >= nr_pages_needed)
  1455. break;
  1456. }
  1457. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
  1458. spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
  1459. epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
  1460. deferred_free_pages(nid, zid, spfn, epfn);
  1461. if (first_deferred_pfn == epfn)
  1462. break;
  1463. }
  1464. pgdat->first_deferred_pfn = first_deferred_pfn;
  1465. pgdat_resize_unlock(pgdat, &flags);
  1466. return nr_pages > 0;
  1467. }
  1468. /*
  1469. * deferred_grow_zone() is __init, but it is called from
  1470. * get_page_from_freelist() during early boot until deferred_pages permanently
  1471. * disables this call. This is why we have refdata wrapper to avoid warning,
  1472. * and to ensure that the function body gets unloaded.
  1473. */
  1474. static bool __ref
  1475. _deferred_grow_zone(struct zone *zone, unsigned int order)
  1476. {
  1477. return deferred_grow_zone(zone, order);
  1478. }
  1479. #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
  1480. void __init page_alloc_init_late(void)
  1481. {
  1482. struct zone *zone;
  1483. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  1484. int nid;
  1485. /* There will be num_node_state(N_MEMORY) threads */
  1486. atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
  1487. for_each_node_state(nid, N_MEMORY) {
  1488. kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
  1489. }
  1490. /* Block until all are initialised */
  1491. wait_for_completion(&pgdat_init_all_done_comp);
  1492. /*
  1493. * We initialized the rest of the deferred pages. Permanently disable
  1494. * on-demand struct page initialization.
  1495. */
  1496. static_branch_disable(&deferred_pages);
  1497. /* Reinit limits that are based on free pages after the kernel is up */
  1498. files_maxfiles_init();
  1499. #endif
  1500. #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
  1501. /* Discard memblock private memory */
  1502. memblock_discard();
  1503. #endif
  1504. for_each_populated_zone(zone)
  1505. set_zone_contiguous(zone);
  1506. }
  1507. #ifdef CONFIG_CMA
  1508. /* Free whole pageblock and set its migration type to MIGRATE_CMA. */
  1509. void __init init_cma_reserved_pageblock(struct page *page)
  1510. {
  1511. unsigned i = pageblock_nr_pages;
  1512. struct page *p = page;
  1513. do {
  1514. __ClearPageReserved(p);
  1515. set_page_count(p, 0);
  1516. } while (++p, --i);
  1517. set_pageblock_migratetype(page, MIGRATE_CMA);
  1518. if (pageblock_order >= MAX_ORDER) {
  1519. i = pageblock_nr_pages;
  1520. p = page;
  1521. do {
  1522. set_page_refcounted(p);
  1523. __free_pages(p, MAX_ORDER - 1);
  1524. p += MAX_ORDER_NR_PAGES;
  1525. } while (i -= MAX_ORDER_NR_PAGES);
  1526. } else {
  1527. set_page_refcounted(page);
  1528. __free_pages(page, pageblock_order);
  1529. }
  1530. adjust_managed_page_count(page, pageblock_nr_pages);
  1531. }
  1532. #endif
  1533. /*
  1534. * The order of subdivision here is critical for the IO subsystem.
  1535. * Please do not alter this order without good reasons and regression
  1536. * testing. Specifically, as large blocks of memory are subdivided,
  1537. * the order in which smaller blocks are delivered depends on the order
  1538. * they're subdivided in this function. This is the primary factor
  1539. * influencing the order in which pages are delivered to the IO
  1540. * subsystem according to empirical testing, and this is also justified
  1541. * by considering the behavior of a buddy system containing a single
  1542. * large block of memory acted on by a series of small allocations.
  1543. * This behavior is a critical factor in sglist merging's success.
  1544. *
  1545. * -- nyc
  1546. */
  1547. static inline void expand(struct zone *zone, struct page *page,
  1548. int low, int high, struct free_area *area,
  1549. int migratetype)
  1550. {
  1551. unsigned long size = 1 << high;
  1552. while (high > low) {
  1553. area--;
  1554. high--;
  1555. size >>= 1;
  1556. VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
  1557. /*
  1558. * Mark as guard pages (or page), that will allow to
  1559. * merge back to allocator when buddy will be freed.
  1560. * Corresponding page table entries will not be touched,
  1561. * pages will stay not present in virtual address space
  1562. */
  1563. if (set_page_guard(zone, &page[size], high, migratetype))
  1564. continue;
  1565. list_add(&page[size].lru, &area->free_list[migratetype]);
  1566. area->nr_free++;
  1567. set_page_order(&page[size], high);
  1568. }
  1569. }
  1570. static void check_new_page_bad(struct page *page)
  1571. {
  1572. const char *bad_reason = NULL;
  1573. unsigned long bad_flags = 0;
  1574. if (unlikely(atomic_read(&page->_mapcount) != -1))
  1575. bad_reason = "nonzero mapcount";
  1576. if (unlikely(page->mapping != NULL))
  1577. bad_reason = "non-NULL mapping";
  1578. if (unlikely(page_ref_count(page) != 0))
  1579. bad_reason = "nonzero _count";
  1580. if (unlikely(page->flags & __PG_HWPOISON)) {
  1581. bad_reason = "HWPoisoned (hardware-corrupted)";
  1582. bad_flags = __PG_HWPOISON;
  1583. /* Don't complain about hwpoisoned pages */
  1584. page_mapcount_reset(page); /* remove PageBuddy */
  1585. return;
  1586. }
  1587. if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
  1588. bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
  1589. bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
  1590. }
  1591. #ifdef CONFIG_MEMCG
  1592. if (unlikely(page->mem_cgroup))
  1593. bad_reason = "page still charged to cgroup";
  1594. #endif
  1595. bad_page(page, bad_reason, bad_flags);
  1596. }
  1597. /*
  1598. * This page is about to be returned from the page allocator
  1599. */
  1600. static inline int check_new_page(struct page *page)
  1601. {
  1602. if (likely(page_expected_state(page,
  1603. PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
  1604. return 0;
  1605. check_new_page_bad(page);
  1606. return 1;
  1607. }
  1608. static inline bool free_pages_prezeroed(void)
  1609. {
  1610. return IS_ENABLED(CONFIG_PAGE_POISONING_ZERO) &&
  1611. page_poisoning_enabled();
  1612. }
  1613. #ifdef CONFIG_DEBUG_VM
  1614. static bool check_pcp_refill(struct page *page)
  1615. {
  1616. return false;
  1617. }
  1618. static bool check_new_pcp(struct page *page)
  1619. {
  1620. return check_new_page(page);
  1621. }
  1622. #else
  1623. static bool check_pcp_refill(struct page *page)
  1624. {
  1625. return check_new_page(page);
  1626. }
  1627. static bool check_new_pcp(struct page *page)
  1628. {
  1629. return false;
  1630. }
  1631. #endif /* CONFIG_DEBUG_VM */
  1632. static bool check_new_pages(struct page *page, unsigned int order)
  1633. {
  1634. int i;
  1635. for (i = 0; i < (1 << order); i++) {
  1636. struct page *p = page + i;
  1637. if (unlikely(check_new_page(p)))
  1638. return true;
  1639. }
  1640. return false;
  1641. }
  1642. inline void post_alloc_hook(struct page *page, unsigned int order,
  1643. gfp_t gfp_flags)
  1644. {
  1645. set_page_private(page, 0);
  1646. set_page_refcounted(page);
  1647. arch_alloc_page(page, order);
  1648. kernel_map_pages(page, 1 << order, 1);
  1649. kernel_poison_pages(page, 1 << order, 1);
  1650. kasan_alloc_pages(page, order);
  1651. set_page_owner(page, order, gfp_flags);
  1652. }
  1653. static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
  1654. unsigned int alloc_flags)
  1655. {
  1656. int i;
  1657. post_alloc_hook(page, order, gfp_flags);
  1658. if (!free_pages_prezeroed() && (gfp_flags & __GFP_ZERO))
  1659. for (i = 0; i < (1 << order); i++)
  1660. clear_highpage(page + i);
  1661. if (order && (gfp_flags & __GFP_COMP))
  1662. prep_compound_page(page, order);
  1663. /*
  1664. * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
  1665. * allocate the page. The expectation is that the caller is taking
  1666. * steps that will free more memory. The caller should avoid the page
  1667. * being used for !PFMEMALLOC purposes.
  1668. */
  1669. if (alloc_flags & ALLOC_NO_WATERMARKS)
  1670. set_page_pfmemalloc(page);
  1671. else
  1672. clear_page_pfmemalloc(page);
  1673. }
  1674. /*
  1675. * Go through the free lists for the given migratetype and remove
  1676. * the smallest available page from the freelists
  1677. */
  1678. static __always_inline
  1679. struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
  1680. int migratetype)
  1681. {
  1682. unsigned int current_order;
  1683. struct free_area *area;
  1684. struct page *page;
  1685. /* Find a page of the appropriate size in the preferred list */
  1686. for (current_order = order; current_order < MAX_ORDER; ++current_order) {
  1687. area = &(zone->free_area[current_order]);
  1688. page = list_first_entry_or_null(&area->free_list[migratetype],
  1689. struct page, lru);
  1690. if (!page)
  1691. continue;
  1692. list_del(&page->lru);
  1693. rmv_page_order(page);
  1694. area->nr_free--;
  1695. expand(zone, page, order, current_order, area, migratetype);
  1696. set_pcppage_migratetype(page, migratetype);
  1697. return page;
  1698. }
  1699. return NULL;
  1700. }
  1701. /*
  1702. * This array describes the order lists are fallen back to when
  1703. * the free lists for the desirable migrate type are depleted
  1704. */
  1705. static int fallbacks[MIGRATE_TYPES][4] = {
  1706. [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1707. [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_TYPES },
  1708. [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
  1709. #ifdef CONFIG_CMA
  1710. [MIGRATE_CMA] = { MIGRATE_TYPES }, /* Never used */
  1711. #endif
  1712. #ifdef CONFIG_MEMORY_ISOLATION
  1713. [MIGRATE_ISOLATE] = { MIGRATE_TYPES }, /* Never used */
  1714. #endif
  1715. };
  1716. #ifdef CONFIG_CMA
  1717. static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  1718. unsigned int order)
  1719. {
  1720. return __rmqueue_smallest(zone, order, MIGRATE_CMA);
  1721. }
  1722. #else
  1723. static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
  1724. unsigned int order) { return NULL; }
  1725. #endif
  1726. /*
  1727. * Move the free pages in a range to the free lists of the requested type.
  1728. * Note that start_page and end_pages are not aligned on a pageblock
  1729. * boundary. If alignment is required, use move_freepages_block()
  1730. */
  1731. static int move_freepages(struct zone *zone,
  1732. struct page *start_page, struct page *end_page,
  1733. int migratetype, int *num_movable)
  1734. {
  1735. struct page *page;
  1736. unsigned int order;
  1737. int pages_moved = 0;
  1738. #ifndef CONFIG_HOLES_IN_ZONE
  1739. /*
  1740. * page_zone is not safe to call in this context when
  1741. * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
  1742. * anyway as we check zone boundaries in move_freepages_block().
  1743. * Remove at a later date when no bug reports exist related to
  1744. * grouping pages by mobility
  1745. */
  1746. VM_BUG_ON(pfn_valid(page_to_pfn(start_page)) &&
  1747. pfn_valid(page_to_pfn(end_page)) &&
  1748. page_zone(start_page) != page_zone(end_page));
  1749. #endif
  1750. if (num_movable)
  1751. *num_movable = 0;
  1752. for (page = start_page; page <= end_page;) {
  1753. if (!pfn_valid_within(page_to_pfn(page))) {
  1754. page++;
  1755. continue;
  1756. }
  1757. /* Make sure we are not inadvertently changing nodes */
  1758. VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
  1759. if (!PageBuddy(page)) {
  1760. /*
  1761. * We assume that pages that could be isolated for
  1762. * migration are movable. But we don't actually try
  1763. * isolating, as that would be expensive.
  1764. */
  1765. if (num_movable &&
  1766. (PageLRU(page) || __PageMovable(page)))
  1767. (*num_movable)++;
  1768. page++;
  1769. continue;
  1770. }
  1771. order = page_order(page);
  1772. list_move(&page->lru,
  1773. &zone->free_area[order].free_list[migratetype]);
  1774. page += 1 << order;
  1775. pages_moved += 1 << order;
  1776. }
  1777. return pages_moved;
  1778. }
  1779. int move_freepages_block(struct zone *zone, struct page *page,
  1780. int migratetype, int *num_movable)
  1781. {
  1782. unsigned long start_pfn, end_pfn;
  1783. struct page *start_page, *end_page;
  1784. start_pfn = page_to_pfn(page);
  1785. start_pfn = start_pfn & ~(pageblock_nr_pages-1);
  1786. start_page = pfn_to_page(start_pfn);
  1787. end_page = start_page + pageblock_nr_pages - 1;
  1788. end_pfn = start_pfn + pageblock_nr_pages - 1;
  1789. /* Do not cross zone boundaries */
  1790. if (!zone_spans_pfn(zone, start_pfn))
  1791. start_page = page;
  1792. if (!zone_spans_pfn(zone, end_pfn))
  1793. return 0;
  1794. return move_freepages(zone, start_page, end_page, migratetype,
  1795. num_movable);
  1796. }
  1797. static void change_pageblock_range(struct page *pageblock_page,
  1798. int start_order, int migratetype)
  1799. {
  1800. int nr_pageblocks = 1 << (start_order - pageblock_order);
  1801. while (nr_pageblocks--) {
  1802. set_pageblock_migratetype(pageblock_page, migratetype);
  1803. pageblock_page += pageblock_nr_pages;
  1804. }
  1805. }
  1806. /*
  1807. * When we are falling back to another migratetype during allocation, try to
  1808. * steal extra free pages from the same pageblocks to satisfy further
  1809. * allocations, instead of polluting multiple pageblocks.
  1810. *
  1811. * If we are stealing a relatively large buddy page, it is likely there will
  1812. * be more free pages in the pageblock, so try to steal them all. For
  1813. * reclaimable and unmovable allocations, we steal regardless of page size,
  1814. * as fragmentation caused by those allocations polluting movable pageblocks
  1815. * is worse than movable allocations stealing from unmovable and reclaimable
  1816. * pageblocks.
  1817. */
  1818. static bool can_steal_fallback(unsigned int order, int start_mt)
  1819. {
  1820. /*
  1821. * Leaving this order check is intended, although there is
  1822. * relaxed order check in next check. The reason is that
  1823. * we can actually steal whole pageblock if this condition met,
  1824. * but, below check doesn't guarantee it and that is just heuristic
  1825. * so could be changed anytime.
  1826. */
  1827. if (order >= pageblock_order)
  1828. return true;
  1829. if (order >= pageblock_order / 2 ||
  1830. start_mt == MIGRATE_RECLAIMABLE ||
  1831. start_mt == MIGRATE_UNMOVABLE ||
  1832. page_group_by_mobility_disabled)
  1833. return true;
  1834. return false;
  1835. }
  1836. /*
  1837. * This function implements actual steal behaviour. If order is large enough,
  1838. * we can steal whole pageblock. If not, we first move freepages in this
  1839. * pageblock to our migratetype and determine how many already-allocated pages
  1840. * are there in the pageblock with a compatible migratetype. If at least half
  1841. * of pages are free or compatible, we can change migratetype of the pageblock
  1842. * itself, so pages freed in the future will be put on the correct free list.
  1843. */
  1844. static void steal_suitable_fallback(struct zone *zone, struct page *page,
  1845. int start_type, bool whole_block)
  1846. {
  1847. unsigned int current_order = page_order(page);
  1848. struct free_area *area;
  1849. int free_pages, movable_pages, alike_pages;
  1850. int old_block_type;
  1851. old_block_type = get_pageblock_migratetype(page);
  1852. /*
  1853. * This can happen due to races and we want to prevent broken
  1854. * highatomic accounting.
  1855. */
  1856. if (is_migrate_highatomic(old_block_type))
  1857. goto single_page;
  1858. /* Take ownership for orders >= pageblock_order */
  1859. if (current_order >= pageblock_order) {
  1860. change_pageblock_range(page, current_order, start_type);
  1861. goto single_page;
  1862. }
  1863. /* We are not allowed to try stealing from the whole block */
  1864. if (!whole_block)
  1865. goto single_page;
  1866. free_pages = move_freepages_block(zone, page, start_type,
  1867. &movable_pages);
  1868. /*
  1869. * Determine how many pages are compatible with our allocation.
  1870. * For movable allocation, it's the number of movable pages which
  1871. * we just obtained. For other types it's a bit more tricky.
  1872. */
  1873. if (start_type == MIGRATE_MOVABLE) {
  1874. alike_pages = movable_pages;
  1875. } else {
  1876. /*
  1877. * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
  1878. * to MOVABLE pageblock, consider all non-movable pages as
  1879. * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
  1880. * vice versa, be conservative since we can't distinguish the
  1881. * exact migratetype of non-movable pages.
  1882. */
  1883. if (old_block_type == MIGRATE_MOVABLE)
  1884. alike_pages = pageblock_nr_pages
  1885. - (free_pages + movable_pages);
  1886. else
  1887. alike_pages = 0;
  1888. }
  1889. /* moving whole block can fail due to zone boundary conditions */
  1890. if (!free_pages)
  1891. goto single_page;
  1892. /*
  1893. * If a sufficient number of pages in the block are either free or of
  1894. * comparable migratability as our allocation, claim the whole block.
  1895. */
  1896. if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
  1897. page_group_by_mobility_disabled)
  1898. set_pageblock_migratetype(page, start_type);
  1899. return;
  1900. single_page:
  1901. area = &zone->free_area[current_order];
  1902. list_move(&page->lru, &area->free_list[start_type]);
  1903. }
  1904. /*
  1905. * Check whether there is a suitable fallback freepage with requested order.
  1906. * If only_stealable is true, this function returns fallback_mt only if
  1907. * we can steal other freepages all together. This would help to reduce
  1908. * fragmentation due to mixed migratetype pages in one pageblock.
  1909. */
  1910. int find_suitable_fallback(struct free_area *area, unsigned int order,
  1911. int migratetype, bool only_stealable, bool *can_steal)
  1912. {
  1913. int i;
  1914. int fallback_mt;
  1915. if (area->nr_free == 0)
  1916. return -1;
  1917. *can_steal = false;
  1918. for (i = 0;; i++) {
  1919. fallback_mt = fallbacks[migratetype][i];
  1920. if (fallback_mt == MIGRATE_TYPES)
  1921. break;
  1922. if (list_empty(&area->free_list[fallback_mt]))
  1923. continue;
  1924. if (can_steal_fallback(order, migratetype))
  1925. *can_steal = true;
  1926. if (!only_stealable)
  1927. return fallback_mt;
  1928. if (*can_steal)
  1929. return fallback_mt;
  1930. }
  1931. return -1;
  1932. }
  1933. /*
  1934. * Reserve a pageblock for exclusive use of high-order atomic allocations if
  1935. * there are no empty page blocks that contain a page with a suitable order
  1936. */
  1937. static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
  1938. unsigned int alloc_order)
  1939. {
  1940. int mt;
  1941. unsigned long max_managed, flags;
  1942. /*
  1943. * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
  1944. * Check is race-prone but harmless.
  1945. */
  1946. max_managed = (zone->managed_pages / 100) + pageblock_nr_pages;
  1947. if (zone->nr_reserved_highatomic >= max_managed)
  1948. return;
  1949. spin_lock_irqsave(&zone->lock, flags);
  1950. /* Recheck the nr_reserved_highatomic limit under the lock */
  1951. if (zone->nr_reserved_highatomic >= max_managed)
  1952. goto out_unlock;
  1953. /* Yoink! */
  1954. mt = get_pageblock_migratetype(page);
  1955. if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
  1956. && !is_migrate_cma(mt)) {
  1957. zone->nr_reserved_highatomic += pageblock_nr_pages;
  1958. set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
  1959. move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
  1960. }
  1961. out_unlock:
  1962. spin_unlock_irqrestore(&zone->lock, flags);
  1963. }
  1964. /*
  1965. * Used when an allocation is about to fail under memory pressure. This
  1966. * potentially hurts the reliability of high-order allocations when under
  1967. * intense memory pressure but failed atomic allocations should be easier
  1968. * to recover from than an OOM.
  1969. *
  1970. * If @force is true, try to unreserve a pageblock even though highatomic
  1971. * pageblock is exhausted.
  1972. */
  1973. static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
  1974. bool force)
  1975. {
  1976. struct zonelist *zonelist = ac->zonelist;
  1977. unsigned long flags;
  1978. struct zoneref *z;
  1979. struct zone *zone;
  1980. struct page *page;
  1981. int order;
  1982. bool ret;
  1983. for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
  1984. ac->nodemask) {
  1985. /*
  1986. * Preserve at least one pageblock unless memory pressure
  1987. * is really high.
  1988. */
  1989. if (!force && zone->nr_reserved_highatomic <=
  1990. pageblock_nr_pages)
  1991. continue;
  1992. spin_lock_irqsave(&zone->lock, flags);
  1993. for (order = 0; order < MAX_ORDER; order++) {
  1994. struct free_area *area = &(zone->free_area[order]);
  1995. page = list_first_entry_or_null(
  1996. &area->free_list[MIGRATE_HIGHATOMIC],
  1997. struct page, lru);
  1998. if (!page)
  1999. continue;
  2000. /*
  2001. * In page freeing path, migratetype change is racy so
  2002. * we can counter several free pages in a pageblock
  2003. * in this loop althoug we changed the pageblock type
  2004. * from highatomic to ac->migratetype. So we should
  2005. * adjust the count once.
  2006. */
  2007. if (is_migrate_highatomic_page(page)) {
  2008. /*
  2009. * It should never happen but changes to
  2010. * locking could inadvertently allow a per-cpu
  2011. * drain to add pages to MIGRATE_HIGHATOMIC
  2012. * while unreserving so be safe and watch for
  2013. * underflows.
  2014. */
  2015. zone->nr_reserved_highatomic -= min(
  2016. pageblock_nr_pages,
  2017. zone->nr_reserved_highatomic);
  2018. }
  2019. /*
  2020. * Convert to ac->migratetype and avoid the normal
  2021. * pageblock stealing heuristics. Minimally, the caller
  2022. * is doing the work and needs the pages. More
  2023. * importantly, if the block was always converted to
  2024. * MIGRATE_UNMOVABLE or another type then the number
  2025. * of pageblocks that cannot be completely freed
  2026. * may increase.
  2027. */
  2028. set_pageblock_migratetype(page, ac->migratetype);
  2029. ret = move_freepages_block(zone, page, ac->migratetype,
  2030. NULL);
  2031. if (ret) {
  2032. spin_unlock_irqrestore(&zone->lock, flags);
  2033. return ret;
  2034. }
  2035. }
  2036. spin_unlock_irqrestore(&zone->lock, flags);
  2037. }
  2038. return false;
  2039. }
  2040. /*
  2041. * Try finding a free buddy page on the fallback list and put it on the free
  2042. * list of requested migratetype, possibly along with other pages from the same
  2043. * block, depending on fragmentation avoidance heuristics. Returns true if
  2044. * fallback was found so that __rmqueue_smallest() can grab it.
  2045. *
  2046. * The use of signed ints for order and current_order is a deliberate
  2047. * deviation from the rest of this file, to make the for loop
  2048. * condition simpler.
  2049. */
  2050. static __always_inline bool
  2051. __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
  2052. {
  2053. struct free_area *area;
  2054. int current_order;
  2055. struct page *page;
  2056. int fallback_mt;
  2057. bool can_steal;
  2058. /*
  2059. * Find the largest available free page in the other list. This roughly
  2060. * approximates finding the pageblock with the most free pages, which
  2061. * would be too costly to do exactly.
  2062. */
  2063. for (current_order = MAX_ORDER - 1; current_order >= order;
  2064. --current_order) {
  2065. area = &(zone->free_area[current_order]);
  2066. fallback_mt = find_suitable_fallback(area, current_order,
  2067. start_migratetype, false, &can_steal);
  2068. if (fallback_mt == -1)
  2069. continue;
  2070. /*
  2071. * We cannot steal all free pages from the pageblock and the
  2072. * requested migratetype is movable. In that case it's better to
  2073. * steal and split the smallest available page instead of the
  2074. * largest available page, because even if the next movable
  2075. * allocation falls back into a different pageblock than this
  2076. * one, it won't cause permanent fragmentation.
  2077. */
  2078. if (!can_steal && start_migratetype == MIGRATE_MOVABLE
  2079. && current_order > order)
  2080. goto find_smallest;
  2081. goto do_steal;
  2082. }
  2083. return false;
  2084. find_smallest:
  2085. for (current_order = order; current_order < MAX_ORDER;
  2086. current_order++) {
  2087. area = &(zone->free_area[current_order]);
  2088. fallback_mt = find_suitable_fallback(area, current_order,
  2089. start_migratetype, false, &can_steal);
  2090. if (fallback_mt != -1)
  2091. break;
  2092. }
  2093. /*
  2094. * This should not happen - we already found a suitable fallback
  2095. * when looking for the largest page.
  2096. */
  2097. VM_BUG_ON(current_order == MAX_ORDER);
  2098. do_steal:
  2099. page = list_first_entry(&area->free_list[fallback_mt],
  2100. struct page, lru);
  2101. steal_suitable_fallback(zone, page, start_migratetype, can_steal);
  2102. trace_mm_page_alloc_extfrag(page, order, current_order,
  2103. start_migratetype, fallback_mt);
  2104. return true;
  2105. }
  2106. /*
  2107. * Do the hard work of removing an element from the buddy allocator.
  2108. * Call me with the zone->lock already held.
  2109. */
  2110. static __always_inline struct page *
  2111. __rmqueue(struct zone *zone, unsigned int order, int migratetype)
  2112. {
  2113. struct page *page;
  2114. retry:
  2115. page = __rmqueue_smallest(zone, order, migratetype);
  2116. if (unlikely(!page)) {
  2117. if (migratetype == MIGRATE_MOVABLE)
  2118. page = __rmqueue_cma_fallback(zone, order);
  2119. if (!page && __rmqueue_fallback(zone, order, migratetype))
  2120. goto retry;
  2121. }
  2122. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  2123. return page;
  2124. }
  2125. /*
  2126. * Obtain a specified number of elements from the buddy allocator, all under
  2127. * a single hold of the lock, for efficiency. Add them to the supplied list.
  2128. * Returns the number of new pages which were placed at *list.
  2129. */
  2130. static int rmqueue_bulk(struct zone *zone, unsigned int order,
  2131. unsigned long count, struct list_head *list,
  2132. int migratetype)
  2133. {
  2134. int i, alloced = 0;
  2135. spin_lock(&zone->lock);
  2136. for (i = 0; i < count; ++i) {
  2137. struct page *page = __rmqueue(zone, order, migratetype);
  2138. if (unlikely(page == NULL))
  2139. break;
  2140. if (unlikely(check_pcp_refill(page)))
  2141. continue;
  2142. /*
  2143. * Split buddy pages returned by expand() are received here in
  2144. * physical page order. The page is added to the tail of
  2145. * caller's list. From the callers perspective, the linked list
  2146. * is ordered by page number under some conditions. This is
  2147. * useful for IO devices that can forward direction from the
  2148. * head, thus also in the physical page order. This is useful
  2149. * for IO devices that can merge IO requests if the physical
  2150. * pages are ordered properly.
  2151. */
  2152. list_add_tail(&page->lru, list);
  2153. alloced++;
  2154. if (is_migrate_cma(get_pcppage_migratetype(page)))
  2155. __mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
  2156. -(1 << order));
  2157. }
  2158. /*
  2159. * i pages were removed from the buddy list even if some leak due
  2160. * to check_pcp_refill failing so adjust NR_FREE_PAGES based
  2161. * on i. Do not confuse with 'alloced' which is the number of
  2162. * pages added to the pcp list.
  2163. */
  2164. __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
  2165. spin_unlock(&zone->lock);
  2166. return alloced;
  2167. }
  2168. #ifdef CONFIG_NUMA
  2169. /*
  2170. * Called from the vmstat counter updater to drain pagesets of this
  2171. * currently executing processor on remote nodes after they have
  2172. * expired.
  2173. *
  2174. * Note that this function must be called with the thread pinned to
  2175. * a single processor.
  2176. */
  2177. void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
  2178. {
  2179. unsigned long flags;
  2180. int to_drain, batch;
  2181. local_irq_save(flags);
  2182. batch = READ_ONCE(pcp->batch);
  2183. to_drain = min(pcp->count, batch);
  2184. if (to_drain > 0)
  2185. free_pcppages_bulk(zone, to_drain, pcp);
  2186. local_irq_restore(flags);
  2187. }
  2188. #endif
  2189. /*
  2190. * Drain pcplists of the indicated processor and zone.
  2191. *
  2192. * The processor must either be the current processor and the
  2193. * thread pinned to the current processor or a processor that
  2194. * is not online.
  2195. */
  2196. static void drain_pages_zone(unsigned int cpu, struct zone *zone)
  2197. {
  2198. unsigned long flags;
  2199. struct per_cpu_pageset *pset;
  2200. struct per_cpu_pages *pcp;
  2201. local_irq_save(flags);
  2202. pset = per_cpu_ptr(zone->pageset, cpu);
  2203. pcp = &pset->pcp;
  2204. if (pcp->count)
  2205. free_pcppages_bulk(zone, pcp->count, pcp);
  2206. local_irq_restore(flags);
  2207. }
  2208. /*
  2209. * Drain pcplists of all zones on the indicated processor.
  2210. *
  2211. * The processor must either be the current processor and the
  2212. * thread pinned to the current processor or a processor that
  2213. * is not online.
  2214. */
  2215. static void drain_pages(unsigned int cpu)
  2216. {
  2217. struct zone *zone;
  2218. for_each_populated_zone(zone) {
  2219. drain_pages_zone(cpu, zone);
  2220. }
  2221. }
  2222. /*
  2223. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
  2224. *
  2225. * The CPU has to be pinned. When zone parameter is non-NULL, spill just
  2226. * the single zone's pages.
  2227. */
  2228. void drain_local_pages(struct zone *zone)
  2229. {
  2230. int cpu = smp_processor_id();
  2231. if (zone)
  2232. drain_pages_zone(cpu, zone);
  2233. else
  2234. drain_pages(cpu);
  2235. }
  2236. static void drain_local_pages_wq(struct work_struct *work)
  2237. {
  2238. /*
  2239. * drain_all_pages doesn't use proper cpu hotplug protection so
  2240. * we can race with cpu offline when the WQ can move this from
  2241. * a cpu pinned worker to an unbound one. We can operate on a different
  2242. * cpu which is allright but we also have to make sure to not move to
  2243. * a different one.
  2244. */
  2245. preempt_disable();
  2246. drain_local_pages(NULL);
  2247. preempt_enable();
  2248. }
  2249. /*
  2250. * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
  2251. *
  2252. * When zone parameter is non-NULL, spill just the single zone's pages.
  2253. *
  2254. * Note that this can be extremely slow as the draining happens in a workqueue.
  2255. */
  2256. void drain_all_pages(struct zone *zone)
  2257. {
  2258. int cpu;
  2259. /*
  2260. * Allocate in the BSS so we wont require allocation in
  2261. * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
  2262. */
  2263. static cpumask_t cpus_with_pcps;
  2264. /*
  2265. * Make sure nobody triggers this path before mm_percpu_wq is fully
  2266. * initialized.
  2267. */
  2268. if (WARN_ON_ONCE(!mm_percpu_wq))
  2269. return;
  2270. /*
  2271. * Do not drain if one is already in progress unless it's specific to
  2272. * a zone. Such callers are primarily CMA and memory hotplug and need
  2273. * the drain to be complete when the call returns.
  2274. */
  2275. if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
  2276. if (!zone)
  2277. return;
  2278. mutex_lock(&pcpu_drain_mutex);
  2279. }
  2280. /*
  2281. * We don't care about racing with CPU hotplug event
  2282. * as offline notification will cause the notified
  2283. * cpu to drain that CPU pcps and on_each_cpu_mask
  2284. * disables preemption as part of its processing
  2285. */
  2286. for_each_online_cpu(cpu) {
  2287. struct per_cpu_pageset *pcp;
  2288. struct zone *z;
  2289. bool has_pcps = false;
  2290. if (zone) {
  2291. pcp = per_cpu_ptr(zone->pageset, cpu);
  2292. if (pcp->pcp.count)
  2293. has_pcps = true;
  2294. } else {
  2295. for_each_populated_zone(z) {
  2296. pcp = per_cpu_ptr(z->pageset, cpu);
  2297. if (pcp->pcp.count) {
  2298. has_pcps = true;
  2299. break;
  2300. }
  2301. }
  2302. }
  2303. if (has_pcps)
  2304. cpumask_set_cpu(cpu, &cpus_with_pcps);
  2305. else
  2306. cpumask_clear_cpu(cpu, &cpus_with_pcps);
  2307. }
  2308. for_each_cpu(cpu, &cpus_with_pcps) {
  2309. struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
  2310. INIT_WORK(work, drain_local_pages_wq);
  2311. queue_work_on(cpu, mm_percpu_wq, work);
  2312. }
  2313. for_each_cpu(cpu, &cpus_with_pcps)
  2314. flush_work(per_cpu_ptr(&pcpu_drain, cpu));
  2315. mutex_unlock(&pcpu_drain_mutex);
  2316. }
  2317. #ifdef CONFIG_HIBERNATION
  2318. /*
  2319. * Touch the watchdog for every WD_PAGE_COUNT pages.
  2320. */
  2321. #define WD_PAGE_COUNT (128*1024)
  2322. void mark_free_pages(struct zone *zone)
  2323. {
  2324. unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
  2325. unsigned long flags;
  2326. unsigned int order, t;
  2327. struct page *page;
  2328. if (zone_is_empty(zone))
  2329. return;
  2330. spin_lock_irqsave(&zone->lock, flags);
  2331. max_zone_pfn = zone_end_pfn(zone);
  2332. for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
  2333. if (pfn_valid(pfn)) {
  2334. page = pfn_to_page(pfn);
  2335. if (!--page_count) {
  2336. touch_nmi_watchdog();
  2337. page_count = WD_PAGE_COUNT;
  2338. }
  2339. if (page_zone(page) != zone)
  2340. continue;
  2341. if (!swsusp_page_is_forbidden(page))
  2342. swsusp_unset_page_free(page);
  2343. }
  2344. for_each_migratetype_order(order, t) {
  2345. list_for_each_entry(page,
  2346. &zone->free_area[order].free_list[t], lru) {
  2347. unsigned long i;
  2348. pfn = page_to_pfn(page);
  2349. for (i = 0; i < (1UL << order); i++) {
  2350. if (!--page_count) {
  2351. touch_nmi_watchdog();
  2352. page_count = WD_PAGE_COUNT;
  2353. }
  2354. swsusp_set_page_free(pfn_to_page(pfn + i));
  2355. }
  2356. }
  2357. }
  2358. spin_unlock_irqrestore(&zone->lock, flags);
  2359. }
  2360. #endif /* CONFIG_PM */
  2361. static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
  2362. {
  2363. int migratetype;
  2364. if (!free_pcp_prepare(page))
  2365. return false;
  2366. migratetype = get_pfnblock_migratetype(page, pfn);
  2367. set_pcppage_migratetype(page, migratetype);
  2368. return true;
  2369. }
  2370. static void free_unref_page_commit(struct page *page, unsigned long pfn)
  2371. {
  2372. struct zone *zone = page_zone(page);
  2373. struct per_cpu_pages *pcp;
  2374. int migratetype;
  2375. migratetype = get_pcppage_migratetype(page);
  2376. __count_vm_event(PGFREE);
  2377. /*
  2378. * We only track unmovable, reclaimable and movable on pcp lists.
  2379. * Free ISOLATE pages back to the allocator because they are being
  2380. * offlined but treat HIGHATOMIC as movable pages so we can get those
  2381. * areas back if necessary. Otherwise, we may have to free
  2382. * excessively into the page allocator
  2383. */
  2384. if (migratetype >= MIGRATE_PCPTYPES) {
  2385. if (unlikely(is_migrate_isolate(migratetype))) {
  2386. free_one_page(zone, page, pfn, 0, migratetype);
  2387. return;
  2388. }
  2389. migratetype = MIGRATE_MOVABLE;
  2390. }
  2391. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  2392. list_add(&page->lru, &pcp->lists[migratetype]);
  2393. pcp->count++;
  2394. if (pcp->count >= pcp->high) {
  2395. unsigned long batch = READ_ONCE(pcp->batch);
  2396. free_pcppages_bulk(zone, batch, pcp);
  2397. }
  2398. }
  2399. /*
  2400. * Free a 0-order page
  2401. */
  2402. void free_unref_page(struct page *page)
  2403. {
  2404. unsigned long flags;
  2405. unsigned long pfn = page_to_pfn(page);
  2406. if (!free_unref_page_prepare(page, pfn))
  2407. return;
  2408. local_irq_save(flags);
  2409. free_unref_page_commit(page, pfn);
  2410. local_irq_restore(flags);
  2411. }
  2412. /*
  2413. * Free a list of 0-order pages
  2414. */
  2415. void free_unref_page_list(struct list_head *list)
  2416. {
  2417. struct page *page, *next;
  2418. unsigned long flags, pfn;
  2419. int batch_count = 0;
  2420. /* Prepare pages for freeing */
  2421. list_for_each_entry_safe(page, next, list, lru) {
  2422. pfn = page_to_pfn(page);
  2423. if (!free_unref_page_prepare(page, pfn))
  2424. list_del(&page->lru);
  2425. set_page_private(page, pfn);
  2426. }
  2427. local_irq_save(flags);
  2428. list_for_each_entry_safe(page, next, list, lru) {
  2429. unsigned long pfn = page_private(page);
  2430. set_page_private(page, 0);
  2431. trace_mm_page_free_batched(page);
  2432. free_unref_page_commit(page, pfn);
  2433. /*
  2434. * Guard against excessive IRQ disabled times when we get
  2435. * a large list of pages to free.
  2436. */
  2437. if (++batch_count == SWAP_CLUSTER_MAX) {
  2438. local_irq_restore(flags);
  2439. batch_count = 0;
  2440. local_irq_save(flags);
  2441. }
  2442. }
  2443. local_irq_restore(flags);
  2444. }
  2445. /*
  2446. * split_page takes a non-compound higher-order page, and splits it into
  2447. * n (1<<order) sub-pages: page[0..n]
  2448. * Each sub-page must be freed individually.
  2449. *
  2450. * Note: this is probably too low level an operation for use in drivers.
  2451. * Please consult with lkml before using this in your driver.
  2452. */
  2453. void split_page(struct page *page, unsigned int order)
  2454. {
  2455. int i;
  2456. VM_BUG_ON_PAGE(PageCompound(page), page);
  2457. VM_BUG_ON_PAGE(!page_count(page), page);
  2458. for (i = 1; i < (1 << order); i++)
  2459. set_page_refcounted(page + i);
  2460. split_page_owner(page, order);
  2461. }
  2462. EXPORT_SYMBOL_GPL(split_page);
  2463. int __isolate_free_page(struct page *page, unsigned int order)
  2464. {
  2465. unsigned long watermark;
  2466. struct zone *zone;
  2467. int mt;
  2468. BUG_ON(!PageBuddy(page));
  2469. zone = page_zone(page);
  2470. mt = get_pageblock_migratetype(page);
  2471. if (!is_migrate_isolate(mt)) {
  2472. /*
  2473. * Obey watermarks as if the page was being allocated. We can
  2474. * emulate a high-order watermark check with a raised order-0
  2475. * watermark, because we already know our high-order page
  2476. * exists.
  2477. */
  2478. watermark = min_wmark_pages(zone) + (1UL << order);
  2479. if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
  2480. return 0;
  2481. __mod_zone_freepage_state(zone, -(1UL << order), mt);
  2482. }
  2483. /* Remove page from free list */
  2484. list_del(&page->lru);
  2485. zone->free_area[order].nr_free--;
  2486. rmv_page_order(page);
  2487. /*
  2488. * Set the pageblock if the isolated page is at least half of a
  2489. * pageblock
  2490. */
  2491. if (order >= pageblock_order - 1) {
  2492. struct page *endpage = page + (1 << order) - 1;
  2493. for (; page < endpage; page += pageblock_nr_pages) {
  2494. int mt = get_pageblock_migratetype(page);
  2495. if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
  2496. && !is_migrate_highatomic(mt))
  2497. set_pageblock_migratetype(page,
  2498. MIGRATE_MOVABLE);
  2499. }
  2500. }
  2501. return 1UL << order;
  2502. }
  2503. /*
  2504. * Update NUMA hit/miss statistics
  2505. *
  2506. * Must be called with interrupts disabled.
  2507. */
  2508. static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
  2509. {
  2510. #ifdef CONFIG_NUMA
  2511. enum numa_stat_item local_stat = NUMA_LOCAL;
  2512. /* skip numa counters update if numa stats is disabled */
  2513. if (!static_branch_likely(&vm_numa_stat_key))
  2514. return;
  2515. if (z->node != numa_node_id())
  2516. local_stat = NUMA_OTHER;
  2517. if (z->node == preferred_zone->node)
  2518. __inc_numa_state(z, NUMA_HIT);
  2519. else {
  2520. __inc_numa_state(z, NUMA_MISS);
  2521. __inc_numa_state(preferred_zone, NUMA_FOREIGN);
  2522. }
  2523. __inc_numa_state(z, local_stat);
  2524. #endif
  2525. }
  2526. /* Remove page from the per-cpu list, caller must protect the list */
  2527. static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
  2528. struct per_cpu_pages *pcp,
  2529. struct list_head *list)
  2530. {
  2531. struct page *page;
  2532. do {
  2533. if (list_empty(list)) {
  2534. pcp->count += rmqueue_bulk(zone, 0,
  2535. pcp->batch, list,
  2536. migratetype);
  2537. if (unlikely(list_empty(list)))
  2538. return NULL;
  2539. }
  2540. page = list_first_entry(list, struct page, lru);
  2541. list_del(&page->lru);
  2542. pcp->count--;
  2543. } while (check_new_pcp(page));
  2544. return page;
  2545. }
  2546. /* Lock and remove page from the per-cpu list */
  2547. static struct page *rmqueue_pcplist(struct zone *preferred_zone,
  2548. struct zone *zone, unsigned int order,
  2549. gfp_t gfp_flags, int migratetype)
  2550. {
  2551. struct per_cpu_pages *pcp;
  2552. struct list_head *list;
  2553. struct page *page;
  2554. unsigned long flags;
  2555. local_irq_save(flags);
  2556. pcp = &this_cpu_ptr(zone->pageset)->pcp;
  2557. list = &pcp->lists[migratetype];
  2558. page = __rmqueue_pcplist(zone, migratetype, pcp, list);
  2559. if (page) {
  2560. __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
  2561. zone_statistics(preferred_zone, zone);
  2562. }
  2563. local_irq_restore(flags);
  2564. return page;
  2565. }
  2566. /*
  2567. * Allocate a page from the given zone. Use pcplists for order-0 allocations.
  2568. */
  2569. static inline
  2570. struct page *rmqueue(struct zone *preferred_zone,
  2571. struct zone *zone, unsigned int order,
  2572. gfp_t gfp_flags, unsigned int alloc_flags,
  2573. int migratetype)
  2574. {
  2575. unsigned long flags;
  2576. struct page *page;
  2577. if (likely(order == 0)) {
  2578. page = rmqueue_pcplist(preferred_zone, zone, order,
  2579. gfp_flags, migratetype);
  2580. goto out;
  2581. }
  2582. /*
  2583. * We most definitely don't want callers attempting to
  2584. * allocate greater than order-1 page units with __GFP_NOFAIL.
  2585. */
  2586. WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
  2587. spin_lock_irqsave(&zone->lock, flags);
  2588. do {
  2589. page = NULL;
  2590. if (alloc_flags & ALLOC_HARDER) {
  2591. page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
  2592. if (page)
  2593. trace_mm_page_alloc_zone_locked(page, order, migratetype);
  2594. }
  2595. if (!page)
  2596. page = __rmqueue(zone, order, migratetype);
  2597. } while (page && check_new_pages(page, order));
  2598. spin_unlock(&zone->lock);
  2599. if (!page)
  2600. goto failed;
  2601. __mod_zone_freepage_state(zone, -(1 << order),
  2602. get_pcppage_migratetype(page));
  2603. __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
  2604. zone_statistics(preferred_zone, zone);
  2605. local_irq_restore(flags);
  2606. out:
  2607. VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
  2608. return page;
  2609. failed:
  2610. local_irq_restore(flags);
  2611. return NULL;
  2612. }
  2613. #ifdef CONFIG_FAIL_PAGE_ALLOC
  2614. static struct {
  2615. struct fault_attr attr;
  2616. bool ignore_gfp_highmem;
  2617. bool ignore_gfp_reclaim;
  2618. u32 min_order;
  2619. } fail_page_alloc = {
  2620. .attr = FAULT_ATTR_INITIALIZER,
  2621. .ignore_gfp_reclaim = true,
  2622. .ignore_gfp_highmem = true,
  2623. .min_order = 1,
  2624. };
  2625. static int __init setup_fail_page_alloc(char *str)
  2626. {
  2627. return setup_fault_attr(&fail_page_alloc.attr, str);
  2628. }
  2629. __setup("fail_page_alloc=", setup_fail_page_alloc);
  2630. static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2631. {
  2632. if (order < fail_page_alloc.min_order)
  2633. return false;
  2634. if (gfp_mask & __GFP_NOFAIL)
  2635. return false;
  2636. if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
  2637. return false;
  2638. if (fail_page_alloc.ignore_gfp_reclaim &&
  2639. (gfp_mask & __GFP_DIRECT_RECLAIM))
  2640. return false;
  2641. return should_fail(&fail_page_alloc.attr, 1 << order);
  2642. }
  2643. #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
  2644. static int __init fail_page_alloc_debugfs(void)
  2645. {
  2646. umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
  2647. struct dentry *dir;
  2648. dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
  2649. &fail_page_alloc.attr);
  2650. if (IS_ERR(dir))
  2651. return PTR_ERR(dir);
  2652. if (!debugfs_create_bool("ignore-gfp-wait", mode, dir,
  2653. &fail_page_alloc.ignore_gfp_reclaim))
  2654. goto fail;
  2655. if (!debugfs_create_bool("ignore-gfp-highmem", mode, dir,
  2656. &fail_page_alloc.ignore_gfp_highmem))
  2657. goto fail;
  2658. if (!debugfs_create_u32("min-order", mode, dir,
  2659. &fail_page_alloc.min_order))
  2660. goto fail;
  2661. return 0;
  2662. fail:
  2663. debugfs_remove_recursive(dir);
  2664. return -ENOMEM;
  2665. }
  2666. late_initcall(fail_page_alloc_debugfs);
  2667. #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */
  2668. #else /* CONFIG_FAIL_PAGE_ALLOC */
  2669. static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
  2670. {
  2671. return false;
  2672. }
  2673. #endif /* CONFIG_FAIL_PAGE_ALLOC */
  2674. /*
  2675. * Return true if free base pages are above 'mark'. For high-order checks it
  2676. * will return true of the order-0 watermark is reached and there is at least
  2677. * one free page of a suitable size. Checking now avoids taking the zone lock
  2678. * to check in the allocation paths if no pages are free.
  2679. */
  2680. bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  2681. int classzone_idx, unsigned int alloc_flags,
  2682. long free_pages)
  2683. {
  2684. long min = mark;
  2685. int o;
  2686. const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
  2687. /* free_pages may go negative - that's OK */
  2688. free_pages -= (1 << order) - 1;
  2689. if (alloc_flags & ALLOC_HIGH)
  2690. min -= min / 2;
  2691. /*
  2692. * If the caller does not have rights to ALLOC_HARDER then subtract
  2693. * the high-atomic reserves. This will over-estimate the size of the
  2694. * atomic reserve but it avoids a search.
  2695. */
  2696. if (likely(!alloc_harder)) {
  2697. free_pages -= z->nr_reserved_highatomic;
  2698. } else {
  2699. /*
  2700. * OOM victims can try even harder than normal ALLOC_HARDER
  2701. * users on the grounds that it's definitely going to be in
  2702. * the exit path shortly and free memory. Any allocation it
  2703. * makes during the free path will be small and short-lived.
  2704. */
  2705. if (alloc_flags & ALLOC_OOM)
  2706. min -= min / 2;
  2707. else
  2708. min -= min / 4;
  2709. }
  2710. #ifdef CONFIG_CMA
  2711. /* If allocation can't use CMA areas don't use free CMA pages */
  2712. if (!(alloc_flags & ALLOC_CMA))
  2713. free_pages -= zone_page_state(z, NR_FREE_CMA_PAGES);
  2714. #endif
  2715. /*
  2716. * Check watermarks for an order-0 allocation request. If these
  2717. * are not met, then a high-order request also cannot go ahead
  2718. * even if a suitable page happened to be free.
  2719. */
  2720. if (free_pages <= min + z->lowmem_reserve[classzone_idx])
  2721. return false;
  2722. /* If this is an order-0 request then the watermark is fine */
  2723. if (!order)
  2724. return true;
  2725. /* For a high-order request, check at least one suitable page is free */
  2726. for (o = order; o < MAX_ORDER; o++) {
  2727. struct free_area *area = &z->free_area[o];
  2728. int mt;
  2729. if (!area->nr_free)
  2730. continue;
  2731. for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
  2732. if (!list_empty(&area->free_list[mt]))
  2733. return true;
  2734. }
  2735. #ifdef CONFIG_CMA
  2736. if ((alloc_flags & ALLOC_CMA) &&
  2737. !list_empty(&area->free_list[MIGRATE_CMA])) {
  2738. return true;
  2739. }
  2740. #endif
  2741. if (alloc_harder &&
  2742. !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
  2743. return true;
  2744. }
  2745. return false;
  2746. }
  2747. bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
  2748. int classzone_idx, unsigned int alloc_flags)
  2749. {
  2750. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2751. zone_page_state(z, NR_FREE_PAGES));
  2752. }
  2753. static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
  2754. unsigned long mark, int classzone_idx, unsigned int alloc_flags)
  2755. {
  2756. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2757. long cma_pages = 0;
  2758. #ifdef CONFIG_CMA
  2759. /* If allocation can't use CMA areas don't use free CMA pages */
  2760. if (!(alloc_flags & ALLOC_CMA))
  2761. cma_pages = zone_page_state(z, NR_FREE_CMA_PAGES);
  2762. #endif
  2763. /*
  2764. * Fast check for order-0 only. If this fails then the reserves
  2765. * need to be calculated. There is a corner case where the check
  2766. * passes but only the high-order atomic reserve are free. If
  2767. * the caller is !atomic then it'll uselessly search the free
  2768. * list. That corner case is then slower but it is harmless.
  2769. */
  2770. if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx])
  2771. return true;
  2772. return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags,
  2773. free_pages);
  2774. }
  2775. bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
  2776. unsigned long mark, int classzone_idx)
  2777. {
  2778. long free_pages = zone_page_state(z, NR_FREE_PAGES);
  2779. if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
  2780. free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);
  2781. return __zone_watermark_ok(z, order, mark, classzone_idx, 0,
  2782. free_pages);
  2783. }
  2784. #ifdef CONFIG_NUMA
  2785. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2786. {
  2787. return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
  2788. RECLAIM_DISTANCE;
  2789. }
  2790. #else /* CONFIG_NUMA */
  2791. static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
  2792. {
  2793. return true;
  2794. }
  2795. #endif /* CONFIG_NUMA */
  2796. /*
  2797. * get_page_from_freelist goes through the zonelist trying to allocate
  2798. * a page.
  2799. */
  2800. static struct page *
  2801. get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
  2802. const struct alloc_context *ac)
  2803. {
  2804. struct zoneref *z = ac->preferred_zoneref;
  2805. struct zone *zone;
  2806. struct pglist_data *last_pgdat_dirty_limit = NULL;
  2807. /*
  2808. * Scan zonelist, looking for a zone with enough free.
  2809. * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
  2810. */
  2811. for_next_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  2812. ac->nodemask) {
  2813. struct page *page;
  2814. unsigned long mark;
  2815. if (cpusets_enabled() &&
  2816. (alloc_flags & ALLOC_CPUSET) &&
  2817. !__cpuset_zone_allowed(zone, gfp_mask))
  2818. continue;
  2819. /*
  2820. * When allocating a page cache page for writing, we
  2821. * want to get it from a node that is within its dirty
  2822. * limit, such that no single node holds more than its
  2823. * proportional share of globally allowed dirty pages.
  2824. * The dirty limits take into account the node's
  2825. * lowmem reserves and high watermark so that kswapd
  2826. * should be able to balance it without having to
  2827. * write pages from its LRU list.
  2828. *
  2829. * XXX: For now, allow allocations to potentially
  2830. * exceed the per-node dirty limit in the slowpath
  2831. * (spread_dirty_pages unset) before going into reclaim,
  2832. * which is important when on a NUMA setup the allowed
  2833. * nodes are together not big enough to reach the
  2834. * global limit. The proper fix for these situations
  2835. * will require awareness of nodes in the
  2836. * dirty-throttling and the flusher threads.
  2837. */
  2838. if (ac->spread_dirty_pages) {
  2839. if (last_pgdat_dirty_limit == zone->zone_pgdat)
  2840. continue;
  2841. if (!node_dirty_ok(zone->zone_pgdat)) {
  2842. last_pgdat_dirty_limit = zone->zone_pgdat;
  2843. continue;
  2844. }
  2845. }
  2846. mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
  2847. if (!zone_watermark_fast(zone, order, mark,
  2848. ac_classzone_idx(ac), alloc_flags)) {
  2849. int ret;
  2850. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  2851. /*
  2852. * Watermark failed for this zone, but see if we can
  2853. * grow this zone if it contains deferred pages.
  2854. */
  2855. if (static_branch_unlikely(&deferred_pages)) {
  2856. if (_deferred_grow_zone(zone, order))
  2857. goto try_this_zone;
  2858. }
  2859. #endif
  2860. /* Checked here to keep the fast path fast */
  2861. BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
  2862. if (alloc_flags & ALLOC_NO_WATERMARKS)
  2863. goto try_this_zone;
  2864. if (node_reclaim_mode == 0 ||
  2865. !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
  2866. continue;
  2867. ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
  2868. switch (ret) {
  2869. case NODE_RECLAIM_NOSCAN:
  2870. /* did not scan */
  2871. continue;
  2872. case NODE_RECLAIM_FULL:
  2873. /* scanned but unreclaimable */
  2874. continue;
  2875. default:
  2876. /* did we reclaim enough */
  2877. if (zone_watermark_ok(zone, order, mark,
  2878. ac_classzone_idx(ac), alloc_flags))
  2879. goto try_this_zone;
  2880. continue;
  2881. }
  2882. }
  2883. try_this_zone:
  2884. page = rmqueue(ac->preferred_zoneref->zone, zone, order,
  2885. gfp_mask, alloc_flags, ac->migratetype);
  2886. if (page) {
  2887. prep_new_page(page, order, gfp_mask, alloc_flags);
  2888. /*
  2889. * If this is a high-order atomic allocation then check
  2890. * if the pageblock should be reserved for the future
  2891. */
  2892. if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
  2893. reserve_highatomic_pageblock(page, zone, order);
  2894. return page;
  2895. } else {
  2896. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  2897. /* Try again if zone has deferred pages */
  2898. if (static_branch_unlikely(&deferred_pages)) {
  2899. if (_deferred_grow_zone(zone, order))
  2900. goto try_this_zone;
  2901. }
  2902. #endif
  2903. }
  2904. }
  2905. return NULL;
  2906. }
  2907. /*
  2908. * Large machines with many possible nodes should not always dump per-node
  2909. * meminfo in irq context.
  2910. */
  2911. static inline bool should_suppress_show_mem(void)
  2912. {
  2913. bool ret = false;
  2914. #if NODES_SHIFT > 8
  2915. ret = in_interrupt();
  2916. #endif
  2917. return ret;
  2918. }
  2919. static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
  2920. {
  2921. unsigned int filter = SHOW_MEM_FILTER_NODES;
  2922. static DEFINE_RATELIMIT_STATE(show_mem_rs, HZ, 1);
  2923. if (should_suppress_show_mem() || !__ratelimit(&show_mem_rs))
  2924. return;
  2925. /*
  2926. * This documents exceptions given to allocations in certain
  2927. * contexts that are allowed to allocate outside current's set
  2928. * of allowed nodes.
  2929. */
  2930. if (!(gfp_mask & __GFP_NOMEMALLOC))
  2931. if (tsk_is_oom_victim(current) ||
  2932. (current->flags & (PF_MEMALLOC | PF_EXITING)))
  2933. filter &= ~SHOW_MEM_FILTER_NODES;
  2934. if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
  2935. filter &= ~SHOW_MEM_FILTER_NODES;
  2936. show_mem(filter, nodemask);
  2937. }
  2938. void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
  2939. {
  2940. struct va_format vaf;
  2941. va_list args;
  2942. static DEFINE_RATELIMIT_STATE(nopage_rs, DEFAULT_RATELIMIT_INTERVAL,
  2943. DEFAULT_RATELIMIT_BURST);
  2944. if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
  2945. return;
  2946. va_start(args, fmt);
  2947. vaf.fmt = fmt;
  2948. vaf.va = &args;
  2949. pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl\n",
  2950. current->comm, &vaf, gfp_mask, &gfp_mask,
  2951. nodemask_pr_args(nodemask));
  2952. va_end(args);
  2953. cpuset_print_current_mems_allowed();
  2954. dump_stack();
  2955. warn_alloc_show_mem(gfp_mask, nodemask);
  2956. }
  2957. static inline struct page *
  2958. __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
  2959. unsigned int alloc_flags,
  2960. const struct alloc_context *ac)
  2961. {
  2962. struct page *page;
  2963. page = get_page_from_freelist(gfp_mask, order,
  2964. alloc_flags|ALLOC_CPUSET, ac);
  2965. /*
  2966. * fallback to ignore cpuset restriction if our nodes
  2967. * are depleted
  2968. */
  2969. if (!page)
  2970. page = get_page_from_freelist(gfp_mask, order,
  2971. alloc_flags, ac);
  2972. return page;
  2973. }
  2974. static inline struct page *
  2975. __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
  2976. const struct alloc_context *ac, unsigned long *did_some_progress)
  2977. {
  2978. struct oom_control oc = {
  2979. .zonelist = ac->zonelist,
  2980. .nodemask = ac->nodemask,
  2981. .memcg = NULL,
  2982. .gfp_mask = gfp_mask,
  2983. .order = order,
  2984. };
  2985. struct page *page;
  2986. *did_some_progress = 0;
  2987. /*
  2988. * Acquire the oom lock. If that fails, somebody else is
  2989. * making progress for us.
  2990. */
  2991. if (!mutex_trylock(&oom_lock)) {
  2992. *did_some_progress = 1;
  2993. schedule_timeout_uninterruptible(1);
  2994. return NULL;
  2995. }
  2996. /*
  2997. * Go through the zonelist yet one more time, keep very high watermark
  2998. * here, this is only to catch a parallel oom killing, we must fail if
  2999. * we're still under heavy pressure. But make sure that this reclaim
  3000. * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
  3001. * allocation which will never fail due to oom_lock already held.
  3002. */
  3003. page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
  3004. ~__GFP_DIRECT_RECLAIM, order,
  3005. ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
  3006. if (page)
  3007. goto out;
  3008. /* Coredumps can quickly deplete all memory reserves */
  3009. if (current->flags & PF_DUMPCORE)
  3010. goto out;
  3011. /* The OOM killer will not help higher order allocs */
  3012. if (order > PAGE_ALLOC_COSTLY_ORDER)
  3013. goto out;
  3014. /*
  3015. * We have already exhausted all our reclaim opportunities without any
  3016. * success so it is time to admit defeat. We will skip the OOM killer
  3017. * because it is very likely that the caller has a more reasonable
  3018. * fallback than shooting a random task.
  3019. */
  3020. if (gfp_mask & __GFP_RETRY_MAYFAIL)
  3021. goto out;
  3022. /* The OOM killer does not needlessly kill tasks for lowmem */
  3023. if (ac->high_zoneidx < ZONE_NORMAL)
  3024. goto out;
  3025. if (pm_suspended_storage())
  3026. goto out;
  3027. /*
  3028. * XXX: GFP_NOFS allocations should rather fail than rely on
  3029. * other request to make a forward progress.
  3030. * We are in an unfortunate situation where out_of_memory cannot
  3031. * do much for this context but let's try it to at least get
  3032. * access to memory reserved if the current task is killed (see
  3033. * out_of_memory). Once filesystems are ready to handle allocation
  3034. * failures more gracefully we should just bail out here.
  3035. */
  3036. /* The OOM killer may not free memory on a specific node */
  3037. if (gfp_mask & __GFP_THISNODE)
  3038. goto out;
  3039. /* Exhausted what can be done so it's blame time */
  3040. if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
  3041. *did_some_progress = 1;
  3042. /*
  3043. * Help non-failing allocations by giving them access to memory
  3044. * reserves
  3045. */
  3046. if (gfp_mask & __GFP_NOFAIL)
  3047. page = __alloc_pages_cpuset_fallback(gfp_mask, order,
  3048. ALLOC_NO_WATERMARKS, ac);
  3049. }
  3050. out:
  3051. mutex_unlock(&oom_lock);
  3052. return page;
  3053. }
  3054. /*
  3055. * Maximum number of compaction retries wit a progress before OOM
  3056. * killer is consider as the only way to move forward.
  3057. */
  3058. #define MAX_COMPACT_RETRIES 16
  3059. #ifdef CONFIG_COMPACTION
  3060. /* Try memory compaction for high-order allocations before reclaim */
  3061. static struct page *
  3062. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  3063. unsigned int alloc_flags, const struct alloc_context *ac,
  3064. enum compact_priority prio, enum compact_result *compact_result)
  3065. {
  3066. struct page *page;
  3067. unsigned int noreclaim_flag;
  3068. if (!order)
  3069. return NULL;
  3070. noreclaim_flag = memalloc_noreclaim_save();
  3071. *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
  3072. prio);
  3073. memalloc_noreclaim_restore(noreclaim_flag);
  3074. if (*compact_result <= COMPACT_INACTIVE)
  3075. return NULL;
  3076. /*
  3077. * At least in one zone compaction wasn't deferred or skipped, so let's
  3078. * count a compaction stall
  3079. */
  3080. count_vm_event(COMPACTSTALL);
  3081. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3082. if (page) {
  3083. struct zone *zone = page_zone(page);
  3084. zone->compact_blockskip_flush = false;
  3085. compaction_defer_reset(zone, order, true);
  3086. count_vm_event(COMPACTSUCCESS);
  3087. return page;
  3088. }
  3089. /*
  3090. * It's bad if compaction run occurs and fails. The most likely reason
  3091. * is that pages exist, but not enough to satisfy watermarks.
  3092. */
  3093. count_vm_event(COMPACTFAIL);
  3094. cond_resched();
  3095. return NULL;
  3096. }
  3097. static inline bool
  3098. should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
  3099. enum compact_result compact_result,
  3100. enum compact_priority *compact_priority,
  3101. int *compaction_retries)
  3102. {
  3103. int max_retries = MAX_COMPACT_RETRIES;
  3104. int min_priority;
  3105. bool ret = false;
  3106. int retries = *compaction_retries;
  3107. enum compact_priority priority = *compact_priority;
  3108. if (!order)
  3109. return false;
  3110. if (compaction_made_progress(compact_result))
  3111. (*compaction_retries)++;
  3112. /*
  3113. * compaction considers all the zone as desperately out of memory
  3114. * so it doesn't really make much sense to retry except when the
  3115. * failure could be caused by insufficient priority
  3116. */
  3117. if (compaction_failed(compact_result))
  3118. goto check_priority;
  3119. /*
  3120. * make sure the compaction wasn't deferred or didn't bail out early
  3121. * due to locks contention before we declare that we should give up.
  3122. * But do not retry if the given zonelist is not suitable for
  3123. * compaction.
  3124. */
  3125. if (compaction_withdrawn(compact_result)) {
  3126. ret = compaction_zonelist_suitable(ac, order, alloc_flags);
  3127. goto out;
  3128. }
  3129. /*
  3130. * !costly requests are much more important than __GFP_RETRY_MAYFAIL
  3131. * costly ones because they are de facto nofail and invoke OOM
  3132. * killer to move on while costly can fail and users are ready
  3133. * to cope with that. 1/4 retries is rather arbitrary but we
  3134. * would need much more detailed feedback from compaction to
  3135. * make a better decision.
  3136. */
  3137. if (order > PAGE_ALLOC_COSTLY_ORDER)
  3138. max_retries /= 4;
  3139. if (*compaction_retries <= max_retries) {
  3140. ret = true;
  3141. goto out;
  3142. }
  3143. /*
  3144. * Make sure there are attempts at the highest priority if we exhausted
  3145. * all retries or failed at the lower priorities.
  3146. */
  3147. check_priority:
  3148. min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
  3149. MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
  3150. if (*compact_priority > min_priority) {
  3151. (*compact_priority)--;
  3152. *compaction_retries = 0;
  3153. ret = true;
  3154. }
  3155. out:
  3156. trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
  3157. return ret;
  3158. }
  3159. #else
  3160. static inline struct page *
  3161. __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
  3162. unsigned int alloc_flags, const struct alloc_context *ac,
  3163. enum compact_priority prio, enum compact_result *compact_result)
  3164. {
  3165. *compact_result = COMPACT_SKIPPED;
  3166. return NULL;
  3167. }
  3168. static inline bool
  3169. should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
  3170. enum compact_result compact_result,
  3171. enum compact_priority *compact_priority,
  3172. int *compaction_retries)
  3173. {
  3174. struct zone *zone;
  3175. struct zoneref *z;
  3176. if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
  3177. return false;
  3178. /*
  3179. * There are setups with compaction disabled which would prefer to loop
  3180. * inside the allocator rather than hit the oom killer prematurely.
  3181. * Let's give them a good hope and keep retrying while the order-0
  3182. * watermarks are OK.
  3183. */
  3184. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  3185. ac->nodemask) {
  3186. if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
  3187. ac_classzone_idx(ac), alloc_flags))
  3188. return true;
  3189. }
  3190. return false;
  3191. }
  3192. #endif /* CONFIG_COMPACTION */
  3193. #ifdef CONFIG_LOCKDEP
  3194. struct lockdep_map __fs_reclaim_map =
  3195. STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);
  3196. static bool __need_fs_reclaim(gfp_t gfp_mask)
  3197. {
  3198. gfp_mask = current_gfp_context(gfp_mask);
  3199. /* no reclaim without waiting on it */
  3200. if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
  3201. return false;
  3202. /* this guy won't enter reclaim */
  3203. if (current->flags & PF_MEMALLOC)
  3204. return false;
  3205. /* We're only interested __GFP_FS allocations for now */
  3206. if (!(gfp_mask & __GFP_FS))
  3207. return false;
  3208. if (gfp_mask & __GFP_NOLOCKDEP)
  3209. return false;
  3210. return true;
  3211. }
  3212. void fs_reclaim_acquire(gfp_t gfp_mask)
  3213. {
  3214. if (__need_fs_reclaim(gfp_mask))
  3215. lock_map_acquire(&__fs_reclaim_map);
  3216. }
  3217. EXPORT_SYMBOL_GPL(fs_reclaim_acquire);
  3218. void fs_reclaim_release(gfp_t gfp_mask)
  3219. {
  3220. if (__need_fs_reclaim(gfp_mask))
  3221. lock_map_release(&__fs_reclaim_map);
  3222. }
  3223. EXPORT_SYMBOL_GPL(fs_reclaim_release);
  3224. #endif
  3225. /* Perform direct synchronous page reclaim */
  3226. static int
  3227. __perform_reclaim(gfp_t gfp_mask, unsigned int order,
  3228. const struct alloc_context *ac)
  3229. {
  3230. struct reclaim_state reclaim_state;
  3231. int progress;
  3232. unsigned int noreclaim_flag;
  3233. cond_resched();
  3234. /* We now go into synchronous reclaim */
  3235. cpuset_memory_pressure_bump();
  3236. noreclaim_flag = memalloc_noreclaim_save();
  3237. fs_reclaim_acquire(gfp_mask);
  3238. reclaim_state.reclaimed_slab = 0;
  3239. current->reclaim_state = &reclaim_state;
  3240. progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
  3241. ac->nodemask);
  3242. current->reclaim_state = NULL;
  3243. fs_reclaim_release(gfp_mask);
  3244. memalloc_noreclaim_restore(noreclaim_flag);
  3245. cond_resched();
  3246. return progress;
  3247. }
  3248. /* The really slow allocator path where we enter direct reclaim */
  3249. static inline struct page *
  3250. __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
  3251. unsigned int alloc_flags, const struct alloc_context *ac,
  3252. unsigned long *did_some_progress)
  3253. {
  3254. struct page *page = NULL;
  3255. bool drained = false;
  3256. *did_some_progress = __perform_reclaim(gfp_mask, order, ac);
  3257. if (unlikely(!(*did_some_progress)))
  3258. return NULL;
  3259. retry:
  3260. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3261. /*
  3262. * If an allocation failed after direct reclaim, it could be because
  3263. * pages are pinned on the per-cpu lists or in high alloc reserves.
  3264. * Shrink them them and try again
  3265. */
  3266. if (!page && !drained) {
  3267. unreserve_highatomic_pageblock(ac, false);
  3268. drain_all_pages(NULL);
  3269. drained = true;
  3270. goto retry;
  3271. }
  3272. return page;
  3273. }
  3274. static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
  3275. const struct alloc_context *ac)
  3276. {
  3277. struct zoneref *z;
  3278. struct zone *zone;
  3279. pg_data_t *last_pgdat = NULL;
  3280. enum zone_type high_zoneidx = ac->high_zoneidx;
  3281. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, high_zoneidx,
  3282. ac->nodemask) {
  3283. if (last_pgdat != zone->zone_pgdat)
  3284. wakeup_kswapd(zone, gfp_mask, order, high_zoneidx);
  3285. last_pgdat = zone->zone_pgdat;
  3286. }
  3287. }
  3288. static inline unsigned int
  3289. gfp_to_alloc_flags(gfp_t gfp_mask)
  3290. {
  3291. unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
  3292. /* __GFP_HIGH is assumed to be the same as ALLOC_HIGH to save a branch. */
  3293. BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
  3294. /*
  3295. * The caller may dip into page reserves a bit more if the caller
  3296. * cannot run direct reclaim, or if the caller has realtime scheduling
  3297. * policy or is asking for __GFP_HIGH memory. GFP_ATOMIC requests will
  3298. * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
  3299. */
  3300. alloc_flags |= (__force int) (gfp_mask & __GFP_HIGH);
  3301. if (gfp_mask & __GFP_ATOMIC) {
  3302. /*
  3303. * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
  3304. * if it can't schedule.
  3305. */
  3306. if (!(gfp_mask & __GFP_NOMEMALLOC))
  3307. alloc_flags |= ALLOC_HARDER;
  3308. /*
  3309. * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
  3310. * comment for __cpuset_node_allowed().
  3311. */
  3312. alloc_flags &= ~ALLOC_CPUSET;
  3313. } else if (unlikely(rt_task(current)) && !in_interrupt())
  3314. alloc_flags |= ALLOC_HARDER;
  3315. #ifdef CONFIG_CMA
  3316. if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
  3317. alloc_flags |= ALLOC_CMA;
  3318. #endif
  3319. return alloc_flags;
  3320. }
  3321. static bool oom_reserves_allowed(struct task_struct *tsk)
  3322. {
  3323. if (!tsk_is_oom_victim(tsk))
  3324. return false;
  3325. /*
  3326. * !MMU doesn't have oom reaper so give access to memory reserves
  3327. * only to the thread with TIF_MEMDIE set
  3328. */
  3329. if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
  3330. return false;
  3331. return true;
  3332. }
  3333. /*
  3334. * Distinguish requests which really need access to full memory
  3335. * reserves from oom victims which can live with a portion of it
  3336. */
  3337. static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
  3338. {
  3339. if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
  3340. return 0;
  3341. if (gfp_mask & __GFP_MEMALLOC)
  3342. return ALLOC_NO_WATERMARKS;
  3343. if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
  3344. return ALLOC_NO_WATERMARKS;
  3345. if (!in_interrupt()) {
  3346. if (current->flags & PF_MEMALLOC)
  3347. return ALLOC_NO_WATERMARKS;
  3348. else if (oom_reserves_allowed(current))
  3349. return ALLOC_OOM;
  3350. }
  3351. return 0;
  3352. }
  3353. bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
  3354. {
  3355. return !!__gfp_pfmemalloc_flags(gfp_mask);
  3356. }
  3357. /*
  3358. * Checks whether it makes sense to retry the reclaim to make a forward progress
  3359. * for the given allocation request.
  3360. *
  3361. * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
  3362. * without success, or when we couldn't even meet the watermark if we
  3363. * reclaimed all remaining pages on the LRU lists.
  3364. *
  3365. * Returns true if a retry is viable or false to enter the oom path.
  3366. */
  3367. static inline bool
  3368. should_reclaim_retry(gfp_t gfp_mask, unsigned order,
  3369. struct alloc_context *ac, int alloc_flags,
  3370. bool did_some_progress, int *no_progress_loops)
  3371. {
  3372. struct zone *zone;
  3373. struct zoneref *z;
  3374. /*
  3375. * Costly allocations might have made a progress but this doesn't mean
  3376. * their order will become available due to high fragmentation so
  3377. * always increment the no progress counter for them
  3378. */
  3379. if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
  3380. *no_progress_loops = 0;
  3381. else
  3382. (*no_progress_loops)++;
  3383. /*
  3384. * Make sure we converge to OOM if we cannot make any progress
  3385. * several times in the row.
  3386. */
  3387. if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
  3388. /* Before OOM, exhaust highatomic_reserve */
  3389. return unreserve_highatomic_pageblock(ac, true);
  3390. }
  3391. /*
  3392. * Keep reclaiming pages while there is a chance this will lead
  3393. * somewhere. If none of the target zones can satisfy our allocation
  3394. * request even if all reclaimable pages are considered then we are
  3395. * screwed and have to go OOM.
  3396. */
  3397. for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
  3398. ac->nodemask) {
  3399. unsigned long available;
  3400. unsigned long reclaimable;
  3401. unsigned long min_wmark = min_wmark_pages(zone);
  3402. bool wmark;
  3403. available = reclaimable = zone_reclaimable_pages(zone);
  3404. available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
  3405. /*
  3406. * Would the allocation succeed if we reclaimed all
  3407. * reclaimable pages?
  3408. */
  3409. wmark = __zone_watermark_ok(zone, order, min_wmark,
  3410. ac_classzone_idx(ac), alloc_flags, available);
  3411. trace_reclaim_retry_zone(z, order, reclaimable,
  3412. available, min_wmark, *no_progress_loops, wmark);
  3413. if (wmark) {
  3414. /*
  3415. * If we didn't make any progress and have a lot of
  3416. * dirty + writeback pages then we should wait for
  3417. * an IO to complete to slow down the reclaim and
  3418. * prevent from pre mature OOM
  3419. */
  3420. if (!did_some_progress) {
  3421. unsigned long write_pending;
  3422. write_pending = zone_page_state_snapshot(zone,
  3423. NR_ZONE_WRITE_PENDING);
  3424. if (2 * write_pending > reclaimable) {
  3425. congestion_wait(BLK_RW_ASYNC, HZ/10);
  3426. return true;
  3427. }
  3428. }
  3429. /*
  3430. * Memory allocation/reclaim might be called from a WQ
  3431. * context and the current implementation of the WQ
  3432. * concurrency control doesn't recognize that
  3433. * a particular WQ is congested if the worker thread is
  3434. * looping without ever sleeping. Therefore we have to
  3435. * do a short sleep here rather than calling
  3436. * cond_resched().
  3437. */
  3438. if (current->flags & PF_WQ_WORKER)
  3439. schedule_timeout_uninterruptible(1);
  3440. else
  3441. cond_resched();
  3442. return true;
  3443. }
  3444. }
  3445. return false;
  3446. }
  3447. static inline bool
  3448. check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
  3449. {
  3450. /*
  3451. * It's possible that cpuset's mems_allowed and the nodemask from
  3452. * mempolicy don't intersect. This should be normally dealt with by
  3453. * policy_nodemask(), but it's possible to race with cpuset update in
  3454. * such a way the check therein was true, and then it became false
  3455. * before we got our cpuset_mems_cookie here.
  3456. * This assumes that for all allocations, ac->nodemask can come only
  3457. * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
  3458. * when it does not intersect with the cpuset restrictions) or the
  3459. * caller can deal with a violated nodemask.
  3460. */
  3461. if (cpusets_enabled() && ac->nodemask &&
  3462. !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
  3463. ac->nodemask = NULL;
  3464. return true;
  3465. }
  3466. /*
  3467. * When updating a task's mems_allowed or mempolicy nodemask, it is
  3468. * possible to race with parallel threads in such a way that our
  3469. * allocation can fail while the mask is being updated. If we are about
  3470. * to fail, check if the cpuset changed during allocation and if so,
  3471. * retry.
  3472. */
  3473. if (read_mems_allowed_retry(cpuset_mems_cookie))
  3474. return true;
  3475. return false;
  3476. }
  3477. static inline struct page *
  3478. __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
  3479. struct alloc_context *ac)
  3480. {
  3481. bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
  3482. const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
  3483. struct page *page = NULL;
  3484. unsigned int alloc_flags;
  3485. unsigned long did_some_progress;
  3486. enum compact_priority compact_priority;
  3487. enum compact_result compact_result;
  3488. int compaction_retries;
  3489. int no_progress_loops;
  3490. unsigned int cpuset_mems_cookie;
  3491. int reserve_flags;
  3492. /*
  3493. * In the slowpath, we sanity check order to avoid ever trying to
  3494. * reclaim >= MAX_ORDER areas which will never succeed. Callers may
  3495. * be using allocators in order of preference for an area that is
  3496. * too large.
  3497. */
  3498. if (order >= MAX_ORDER) {
  3499. WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
  3500. return NULL;
  3501. }
  3502. /*
  3503. * We also sanity check to catch abuse of atomic reserves being used by
  3504. * callers that are not in atomic context.
  3505. */
  3506. if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
  3507. (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
  3508. gfp_mask &= ~__GFP_ATOMIC;
  3509. retry_cpuset:
  3510. compaction_retries = 0;
  3511. no_progress_loops = 0;
  3512. compact_priority = DEF_COMPACT_PRIORITY;
  3513. cpuset_mems_cookie = read_mems_allowed_begin();
  3514. /*
  3515. * The fast path uses conservative alloc_flags to succeed only until
  3516. * kswapd needs to be woken up, and to avoid the cost of setting up
  3517. * alloc_flags precisely. So we do that now.
  3518. */
  3519. alloc_flags = gfp_to_alloc_flags(gfp_mask);
  3520. /*
  3521. * We need to recalculate the starting point for the zonelist iterator
  3522. * because we might have used different nodemask in the fast path, or
  3523. * there was a cpuset modification and we are retrying - otherwise we
  3524. * could end up iterating over non-eligible zones endlessly.
  3525. */
  3526. ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  3527. ac->high_zoneidx, ac->nodemask);
  3528. if (!ac->preferred_zoneref->zone)
  3529. goto nopage;
  3530. if (gfp_mask & __GFP_KSWAPD_RECLAIM)
  3531. wake_all_kswapds(order, gfp_mask, ac);
  3532. /*
  3533. * The adjusted alloc_flags might result in immediate success, so try
  3534. * that first
  3535. */
  3536. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3537. if (page)
  3538. goto got_pg;
  3539. /*
  3540. * For costly allocations, try direct compaction first, as it's likely
  3541. * that we have enough base pages and don't need to reclaim. For non-
  3542. * movable high-order allocations, do that as well, as compaction will
  3543. * try prevent permanent fragmentation by migrating from blocks of the
  3544. * same migratetype.
  3545. * Don't try this for allocations that are allowed to ignore
  3546. * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
  3547. */
  3548. if (can_direct_reclaim &&
  3549. (costly_order ||
  3550. (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
  3551. && !gfp_pfmemalloc_allowed(gfp_mask)) {
  3552. page = __alloc_pages_direct_compact(gfp_mask, order,
  3553. alloc_flags, ac,
  3554. INIT_COMPACT_PRIORITY,
  3555. &compact_result);
  3556. if (page)
  3557. goto got_pg;
  3558. /*
  3559. * Checks for costly allocations with __GFP_NORETRY, which
  3560. * includes THP page fault allocations
  3561. */
  3562. if (costly_order && (gfp_mask & __GFP_NORETRY)) {
  3563. /*
  3564. * If compaction is deferred for high-order allocations,
  3565. * it is because sync compaction recently failed. If
  3566. * this is the case and the caller requested a THP
  3567. * allocation, we do not want to heavily disrupt the
  3568. * system, so we fail the allocation instead of entering
  3569. * direct reclaim.
  3570. */
  3571. if (compact_result == COMPACT_DEFERRED)
  3572. goto nopage;
  3573. /*
  3574. * Looks like reclaim/compaction is worth trying, but
  3575. * sync compaction could be very expensive, so keep
  3576. * using async compaction.
  3577. */
  3578. compact_priority = INIT_COMPACT_PRIORITY;
  3579. }
  3580. }
  3581. retry:
  3582. /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
  3583. if (gfp_mask & __GFP_KSWAPD_RECLAIM)
  3584. wake_all_kswapds(order, gfp_mask, ac);
  3585. reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
  3586. if (reserve_flags)
  3587. alloc_flags = reserve_flags;
  3588. /*
  3589. * Reset the zonelist iterators if memory policies can be ignored.
  3590. * These allocations are high priority and system rather than user
  3591. * orientated.
  3592. */
  3593. if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
  3594. ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
  3595. ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  3596. ac->high_zoneidx, ac->nodemask);
  3597. }
  3598. /* Attempt with potentially adjusted zonelist and alloc_flags */
  3599. page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
  3600. if (page)
  3601. goto got_pg;
  3602. /* Caller is not willing to reclaim, we can't balance anything */
  3603. if (!can_direct_reclaim)
  3604. goto nopage;
  3605. /* Avoid recursion of direct reclaim */
  3606. if (current->flags & PF_MEMALLOC)
  3607. goto nopage;
  3608. /* Try direct reclaim and then allocating */
  3609. page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
  3610. &did_some_progress);
  3611. if (page)
  3612. goto got_pg;
  3613. /* Try direct compaction and then allocating */
  3614. page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
  3615. compact_priority, &compact_result);
  3616. if (page)
  3617. goto got_pg;
  3618. /* Do not loop if specifically requested */
  3619. if (gfp_mask & __GFP_NORETRY)
  3620. goto nopage;
  3621. /*
  3622. * Do not retry costly high order allocations unless they are
  3623. * __GFP_RETRY_MAYFAIL
  3624. */
  3625. if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
  3626. goto nopage;
  3627. if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
  3628. did_some_progress > 0, &no_progress_loops))
  3629. goto retry;
  3630. /*
  3631. * It doesn't make any sense to retry for the compaction if the order-0
  3632. * reclaim is not able to make any progress because the current
  3633. * implementation of the compaction depends on the sufficient amount
  3634. * of free memory (see __compaction_suitable)
  3635. */
  3636. if (did_some_progress > 0 &&
  3637. should_compact_retry(ac, order, alloc_flags,
  3638. compact_result, &compact_priority,
  3639. &compaction_retries))
  3640. goto retry;
  3641. /* Deal with possible cpuset update races before we start OOM killing */
  3642. if (check_retry_cpuset(cpuset_mems_cookie, ac))
  3643. goto retry_cpuset;
  3644. /* Reclaim has failed us, start killing things */
  3645. page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
  3646. if (page)
  3647. goto got_pg;
  3648. /* Avoid allocations with no watermarks from looping endlessly */
  3649. if (tsk_is_oom_victim(current) &&
  3650. (alloc_flags == ALLOC_OOM ||
  3651. (gfp_mask & __GFP_NOMEMALLOC)))
  3652. goto nopage;
  3653. /* Retry as long as the OOM killer is making progress */
  3654. if (did_some_progress) {
  3655. no_progress_loops = 0;
  3656. goto retry;
  3657. }
  3658. nopage:
  3659. /* Deal with possible cpuset update races before we fail */
  3660. if (check_retry_cpuset(cpuset_mems_cookie, ac))
  3661. goto retry_cpuset;
  3662. /*
  3663. * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
  3664. * we always retry
  3665. */
  3666. if (gfp_mask & __GFP_NOFAIL) {
  3667. /*
  3668. * All existing users of the __GFP_NOFAIL are blockable, so warn
  3669. * of any new users that actually require GFP_NOWAIT
  3670. */
  3671. if (WARN_ON_ONCE(!can_direct_reclaim))
  3672. goto fail;
  3673. /*
  3674. * PF_MEMALLOC request from this context is rather bizarre
  3675. * because we cannot reclaim anything and only can loop waiting
  3676. * for somebody to do a work for us
  3677. */
  3678. WARN_ON_ONCE(current->flags & PF_MEMALLOC);
  3679. /*
  3680. * non failing costly orders are a hard requirement which we
  3681. * are not prepared for much so let's warn about these users
  3682. * so that we can identify them and convert them to something
  3683. * else.
  3684. */
  3685. WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);
  3686. /*
  3687. * Help non-failing allocations by giving them access to memory
  3688. * reserves but do not use ALLOC_NO_WATERMARKS because this
  3689. * could deplete whole memory reserves which would just make
  3690. * the situation worse
  3691. */
  3692. page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
  3693. if (page)
  3694. goto got_pg;
  3695. cond_resched();
  3696. goto retry;
  3697. }
  3698. fail:
  3699. warn_alloc(gfp_mask, ac->nodemask,
  3700. "page allocation failure: order:%u", order);
  3701. got_pg:
  3702. return page;
  3703. }
  3704. static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
  3705. int preferred_nid, nodemask_t *nodemask,
  3706. struct alloc_context *ac, gfp_t *alloc_mask,
  3707. unsigned int *alloc_flags)
  3708. {
  3709. ac->high_zoneidx = gfp_zone(gfp_mask);
  3710. ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
  3711. ac->nodemask = nodemask;
  3712. ac->migratetype = gfpflags_to_migratetype(gfp_mask);
  3713. if (cpusets_enabled()) {
  3714. *alloc_mask |= __GFP_HARDWALL;
  3715. if (!ac->nodemask)
  3716. ac->nodemask = &cpuset_current_mems_allowed;
  3717. else
  3718. *alloc_flags |= ALLOC_CPUSET;
  3719. }
  3720. fs_reclaim_acquire(gfp_mask);
  3721. fs_reclaim_release(gfp_mask);
  3722. might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
  3723. if (should_fail_alloc_page(gfp_mask, order))
  3724. return false;
  3725. if (IS_ENABLED(CONFIG_CMA) && ac->migratetype == MIGRATE_MOVABLE)
  3726. *alloc_flags |= ALLOC_CMA;
  3727. return true;
  3728. }
  3729. /* Determine whether to spread dirty pages and what the first usable zone */
  3730. static inline void finalise_ac(gfp_t gfp_mask,
  3731. unsigned int order, struct alloc_context *ac)
  3732. {
  3733. /* Dirty zone balancing only done in the fast path */
  3734. ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
  3735. /*
  3736. * The preferred zone is used for statistics but crucially it is
  3737. * also used as the starting point for the zonelist iterator. It
  3738. * may get reset for allocations that ignore memory policies.
  3739. */
  3740. ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
  3741. ac->high_zoneidx, ac->nodemask);
  3742. }
  3743. /*
  3744. * This is the 'heart' of the zoned buddy allocator.
  3745. */
  3746. struct page *
  3747. __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
  3748. nodemask_t *nodemask)
  3749. {
  3750. struct page *page;
  3751. unsigned int alloc_flags = ALLOC_WMARK_LOW;
  3752. gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
  3753. struct alloc_context ac = { };
  3754. gfp_mask &= gfp_allowed_mask;
  3755. alloc_mask = gfp_mask;
  3756. if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
  3757. return NULL;
  3758. finalise_ac(gfp_mask, order, &ac);
  3759. /* First allocation attempt */
  3760. page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
  3761. if (likely(page))
  3762. goto out;
  3763. /*
  3764. * Apply scoped allocation constraints. This is mainly about GFP_NOFS
  3765. * resp. GFP_NOIO which has to be inherited for all allocation requests
  3766. * from a particular context which has been marked by
  3767. * memalloc_no{fs,io}_{save,restore}.
  3768. */
  3769. alloc_mask = current_gfp_context(gfp_mask);
  3770. ac.spread_dirty_pages = false;
  3771. /*
  3772. * Restore the original nodemask if it was potentially replaced with
  3773. * &cpuset_current_mems_allowed to optimize the fast-path attempt.
  3774. */
  3775. if (unlikely(ac.nodemask != nodemask))
  3776. ac.nodemask = nodemask;
  3777. page = __alloc_pages_slowpath(alloc_mask, order, &ac);
  3778. out:
  3779. if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
  3780. unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) {
  3781. __free_pages(page, order);
  3782. page = NULL;
  3783. }
  3784. trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);
  3785. return page;
  3786. }
  3787. EXPORT_SYMBOL(__alloc_pages_nodemask);
  3788. /*
  3789. * Common helper functions.
  3790. */
  3791. unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
  3792. {
  3793. struct page *page;
  3794. /*
  3795. * __get_free_pages() returns a virtual address, which cannot represent
  3796. * a highmem page
  3797. */
  3798. VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
  3799. page = alloc_pages(gfp_mask, order);
  3800. if (!page)
  3801. return 0;
  3802. return (unsigned long) page_address(page);
  3803. }
  3804. EXPORT_SYMBOL(__get_free_pages);
  3805. unsigned long get_zeroed_page(gfp_t gfp_mask)
  3806. {
  3807. return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
  3808. }
  3809. EXPORT_SYMBOL(get_zeroed_page);
  3810. void __free_pages(struct page *page, unsigned int order)
  3811. {
  3812. if (put_page_testzero(page)) {
  3813. if (order == 0)
  3814. free_unref_page(page);
  3815. else
  3816. __free_pages_ok(page, order);
  3817. }
  3818. }
  3819. EXPORT_SYMBOL(__free_pages);
  3820. void free_pages(unsigned long addr, unsigned int order)
  3821. {
  3822. if (addr != 0) {
  3823. VM_BUG_ON(!virt_addr_valid((void *)addr));
  3824. __free_pages(virt_to_page((void *)addr), order);
  3825. }
  3826. }
  3827. EXPORT_SYMBOL(free_pages);
  3828. /*
  3829. * Page Fragment:
  3830. * An arbitrary-length arbitrary-offset area of memory which resides
  3831. * within a 0 or higher order page. Multiple fragments within that page
  3832. * are individually refcounted, in the page's reference counter.
  3833. *
  3834. * The page_frag functions below provide a simple allocation framework for
  3835. * page fragments. This is used by the network stack and network device
  3836. * drivers to provide a backing region of memory for use as either an
  3837. * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
  3838. */
  3839. static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
  3840. gfp_t gfp_mask)
  3841. {
  3842. struct page *page = NULL;
  3843. gfp_t gfp = gfp_mask;
  3844. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3845. gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
  3846. __GFP_NOMEMALLOC;
  3847. page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
  3848. PAGE_FRAG_CACHE_MAX_ORDER);
  3849. nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
  3850. #endif
  3851. if (unlikely(!page))
  3852. page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
  3853. nc->va = page ? page_address(page) : NULL;
  3854. return page;
  3855. }
  3856. void __page_frag_cache_drain(struct page *page, unsigned int count)
  3857. {
  3858. VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
  3859. if (page_ref_sub_and_test(page, count)) {
  3860. unsigned int order = compound_order(page);
  3861. if (order == 0)
  3862. free_unref_page(page);
  3863. else
  3864. __free_pages_ok(page, order);
  3865. }
  3866. }
  3867. EXPORT_SYMBOL(__page_frag_cache_drain);
  3868. void *page_frag_alloc(struct page_frag_cache *nc,
  3869. unsigned int fragsz, gfp_t gfp_mask)
  3870. {
  3871. unsigned int size = PAGE_SIZE;
  3872. struct page *page;
  3873. int offset;
  3874. if (unlikely(!nc->va)) {
  3875. refill:
  3876. page = __page_frag_cache_refill(nc, gfp_mask);
  3877. if (!page)
  3878. return NULL;
  3879. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3880. /* if size can vary use size else just use PAGE_SIZE */
  3881. size = nc->size;
  3882. #endif
  3883. /* Even if we own the page, we do not use atomic_set().
  3884. * This would break get_page_unless_zero() users.
  3885. */
  3886. page_ref_add(page, size - 1);
  3887. /* reset page count bias and offset to start of new frag */
  3888. nc->pfmemalloc = page_is_pfmemalloc(page);
  3889. nc->pagecnt_bias = size;
  3890. nc->offset = size;
  3891. }
  3892. offset = nc->offset - fragsz;
  3893. if (unlikely(offset < 0)) {
  3894. page = virt_to_page(nc->va);
  3895. if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
  3896. goto refill;
  3897. #if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
  3898. /* if size can vary use size else just use PAGE_SIZE */
  3899. size = nc->size;
  3900. #endif
  3901. /* OK, page count is 0, we can safely set it */
  3902. set_page_count(page, size);
  3903. /* reset page count bias and offset to start of new frag */
  3904. nc->pagecnt_bias = size;
  3905. offset = size - fragsz;
  3906. }
  3907. nc->pagecnt_bias--;
  3908. nc->offset = offset;
  3909. return nc->va + offset;
  3910. }
  3911. EXPORT_SYMBOL(page_frag_alloc);
  3912. /*
  3913. * Frees a page fragment allocated out of either a compound or order 0 page.
  3914. */
  3915. void page_frag_free(void *addr)
  3916. {
  3917. struct page *page = virt_to_head_page(addr);
  3918. if (unlikely(put_page_testzero(page)))
  3919. __free_pages_ok(page, compound_order(page));
  3920. }
  3921. EXPORT_SYMBOL(page_frag_free);
  3922. static void *make_alloc_exact(unsigned long addr, unsigned int order,
  3923. size_t size)
  3924. {
  3925. if (addr) {
  3926. unsigned long alloc_end = addr + (PAGE_SIZE << order);
  3927. unsigned long used = addr + PAGE_ALIGN(size);
  3928. split_page(virt_to_page((void *)addr), order);
  3929. while (used < alloc_end) {
  3930. free_page(used);
  3931. used += PAGE_SIZE;
  3932. }
  3933. }
  3934. return (void *)addr;
  3935. }
  3936. /**
  3937. * alloc_pages_exact - allocate an exact number physically-contiguous pages.
  3938. * @size: the number of bytes to allocate
  3939. * @gfp_mask: GFP flags for the allocation
  3940. *
  3941. * This function is similar to alloc_pages(), except that it allocates the
  3942. * minimum number of pages to satisfy the request. alloc_pages() can only
  3943. * allocate memory in power-of-two pages.
  3944. *
  3945. * This function is also limited by MAX_ORDER.
  3946. *
  3947. * Memory allocated by this function must be released by free_pages_exact().
  3948. */
  3949. void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
  3950. {
  3951. unsigned int order = get_order(size);
  3952. unsigned long addr;
  3953. addr = __get_free_pages(gfp_mask, order);
  3954. return make_alloc_exact(addr, order, size);
  3955. }
  3956. EXPORT_SYMBOL(alloc_pages_exact);
  3957. /**
  3958. * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
  3959. * pages on a node.
  3960. * @nid: the preferred node ID where memory should be allocated
  3961. * @size: the number of bytes to allocate
  3962. * @gfp_mask: GFP flags for the allocation
  3963. *
  3964. * Like alloc_pages_exact(), but try to allocate on node nid first before falling
  3965. * back.
  3966. */
  3967. void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
  3968. {
  3969. unsigned int order = get_order(size);
  3970. struct page *p = alloc_pages_node(nid, gfp_mask, order);
  3971. if (!p)
  3972. return NULL;
  3973. return make_alloc_exact((unsigned long)page_address(p), order, size);
  3974. }
  3975. /**
  3976. * free_pages_exact - release memory allocated via alloc_pages_exact()
  3977. * @virt: the value returned by alloc_pages_exact.
  3978. * @size: size of allocation, same value as passed to alloc_pages_exact().
  3979. *
  3980. * Release the memory allocated by a previous call to alloc_pages_exact.
  3981. */
  3982. void free_pages_exact(void *virt, size_t size)
  3983. {
  3984. unsigned long addr = (unsigned long)virt;
  3985. unsigned long end = addr + PAGE_ALIGN(size);
  3986. while (addr < end) {
  3987. free_page(addr);
  3988. addr += PAGE_SIZE;
  3989. }
  3990. }
  3991. EXPORT_SYMBOL(free_pages_exact);
  3992. /**
  3993. * nr_free_zone_pages - count number of pages beyond high watermark
  3994. * @offset: The zone index of the highest zone
  3995. *
  3996. * nr_free_zone_pages() counts the number of counts pages which are beyond the
  3997. * high watermark within all zones at or below a given zone index. For each
  3998. * zone, the number of pages is calculated as:
  3999. *
  4000. * nr_free_zone_pages = managed_pages - high_pages
  4001. */
  4002. static unsigned long nr_free_zone_pages(int offset)
  4003. {
  4004. struct zoneref *z;
  4005. struct zone *zone;
  4006. /* Just pick one node, since fallback list is circular */
  4007. unsigned long sum = 0;
  4008. struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
  4009. for_each_zone_zonelist(zone, z, zonelist, offset) {
  4010. unsigned long size = zone->managed_pages;
  4011. unsigned long high = high_wmark_pages(zone);
  4012. if (size > high)
  4013. sum += size - high;
  4014. }
  4015. return sum;
  4016. }
  4017. /**
  4018. * nr_free_buffer_pages - count number of pages beyond high watermark
  4019. *
  4020. * nr_free_buffer_pages() counts the number of pages which are beyond the high
  4021. * watermark within ZONE_DMA and ZONE_NORMAL.
  4022. */
  4023. unsigned long nr_free_buffer_pages(void)
  4024. {
  4025. return nr_free_zone_pages(gfp_zone(GFP_USER));
  4026. }
  4027. EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
  4028. /**
  4029. * nr_free_pagecache_pages - count number of pages beyond high watermark
  4030. *
  4031. * nr_free_pagecache_pages() counts the number of pages which are beyond the
  4032. * high watermark within all zones.
  4033. */
  4034. unsigned long nr_free_pagecache_pages(void)
  4035. {
  4036. return nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
  4037. }
  4038. static inline void show_node(struct zone *zone)
  4039. {
  4040. if (IS_ENABLED(CONFIG_NUMA))
  4041. printk("Node %d ", zone_to_nid(zone));
  4042. }
  4043. long si_mem_available(void)
  4044. {
  4045. long available;
  4046. unsigned long pagecache;
  4047. unsigned long wmark_low = 0;
  4048. unsigned long pages[NR_LRU_LISTS];
  4049. struct zone *zone;
  4050. int lru;
  4051. for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
  4052. pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
  4053. for_each_zone(zone)
  4054. wmark_low += zone->watermark[WMARK_LOW];
  4055. /*
  4056. * Estimate the amount of memory available for userspace allocations,
  4057. * without causing swapping.
  4058. */
  4059. available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
  4060. /*
  4061. * Not all the page cache can be freed, otherwise the system will
  4062. * start swapping. Assume at least half of the page cache, or the
  4063. * low watermark worth of cache, needs to stay.
  4064. */
  4065. pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
  4066. pagecache -= min(pagecache / 2, wmark_low);
  4067. available += pagecache;
  4068. /*
  4069. * Part of the reclaimable slab consists of items that are in use,
  4070. * and cannot be freed. Cap this estimate at the low watermark.
  4071. */
  4072. available += global_node_page_state(NR_SLAB_RECLAIMABLE) -
  4073. min(global_node_page_state(NR_SLAB_RECLAIMABLE) / 2,
  4074. wmark_low);
  4075. /*
  4076. * Part of the kernel memory, which can be released under memory
  4077. * pressure.
  4078. */
  4079. available += global_node_page_state(NR_INDIRECTLY_RECLAIMABLE_BYTES) >>
  4080. PAGE_SHIFT;
  4081. if (available < 0)
  4082. available = 0;
  4083. return available;
  4084. }
  4085. EXPORT_SYMBOL_GPL(si_mem_available);
  4086. void si_meminfo(struct sysinfo *val)
  4087. {
  4088. val->totalram = totalram_pages;
  4089. val->sharedram = global_node_page_state(NR_SHMEM);
  4090. val->freeram = global_zone_page_state(NR_FREE_PAGES);
  4091. val->bufferram = nr_blockdev_pages();
  4092. val->totalhigh = totalhigh_pages;
  4093. val->freehigh = nr_free_highpages();
  4094. val->mem_unit = PAGE_SIZE;
  4095. }
  4096. EXPORT_SYMBOL(si_meminfo);
  4097. #ifdef CONFIG_NUMA
  4098. void si_meminfo_node(struct sysinfo *val, int nid)
  4099. {
  4100. int zone_type; /* needs to be signed */
  4101. unsigned long managed_pages = 0;
  4102. unsigned long managed_highpages = 0;
  4103. unsigned long free_highpages = 0;
  4104. pg_data_t *pgdat = NODE_DATA(nid);
  4105. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
  4106. managed_pages += pgdat->node_zones[zone_type].managed_pages;
  4107. val->totalram = managed_pages;
  4108. val->sharedram = node_page_state(pgdat, NR_SHMEM);
  4109. val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
  4110. #ifdef CONFIG_HIGHMEM
  4111. for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
  4112. struct zone *zone = &pgdat->node_zones[zone_type];
  4113. if (is_highmem(zone)) {
  4114. managed_highpages += zone->managed_pages;
  4115. free_highpages += zone_page_state(zone, NR_FREE_PAGES);
  4116. }
  4117. }
  4118. val->totalhigh = managed_highpages;
  4119. val->freehigh = free_highpages;
  4120. #else
  4121. val->totalhigh = managed_highpages;
  4122. val->freehigh = free_highpages;
  4123. #endif
  4124. val->mem_unit = PAGE_SIZE;
  4125. }
  4126. #endif
  4127. /*
  4128. * Determine whether the node should be displayed or not, depending on whether
  4129. * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
  4130. */
  4131. static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
  4132. {
  4133. if (!(flags & SHOW_MEM_FILTER_NODES))
  4134. return false;
  4135. /*
  4136. * no node mask - aka implicit memory numa policy. Do not bother with
  4137. * the synchronization - read_mems_allowed_begin - because we do not
  4138. * have to be precise here.
  4139. */
  4140. if (!nodemask)
  4141. nodemask = &cpuset_current_mems_allowed;
  4142. return !node_isset(nid, *nodemask);
  4143. }
  4144. #define K(x) ((x) << (PAGE_SHIFT-10))
  4145. static void show_migration_types(unsigned char type)
  4146. {
  4147. static const char types[MIGRATE_TYPES] = {
  4148. [MIGRATE_UNMOVABLE] = 'U',
  4149. [MIGRATE_MOVABLE] = 'M',
  4150. [MIGRATE_RECLAIMABLE] = 'E',
  4151. [MIGRATE_HIGHATOMIC] = 'H',
  4152. #ifdef CONFIG_CMA
  4153. [MIGRATE_CMA] = 'C',
  4154. #endif
  4155. #ifdef CONFIG_MEMORY_ISOLATION
  4156. [MIGRATE_ISOLATE] = 'I',
  4157. #endif
  4158. };
  4159. char tmp[MIGRATE_TYPES + 1];
  4160. char *p = tmp;
  4161. int i;
  4162. for (i = 0; i < MIGRATE_TYPES; i++) {
  4163. if (type & (1 << i))
  4164. *p++ = types[i];
  4165. }
  4166. *p = '\0';
  4167. printk(KERN_CONT "(%s) ", tmp);
  4168. }
  4169. /*
  4170. * Show free area list (used inside shift_scroll-lock stuff)
  4171. * We also calculate the percentage fragmentation. We do this by counting the
  4172. * memory on each free list with the exception of the first item on the list.
  4173. *
  4174. * Bits in @filter:
  4175. * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
  4176. * cpuset.
  4177. */
  4178. void show_free_areas(unsigned int filter, nodemask_t *nodemask)
  4179. {
  4180. unsigned long free_pcp = 0;
  4181. int cpu;
  4182. struct zone *zone;
  4183. pg_data_t *pgdat;
  4184. for_each_populated_zone(zone) {
  4185. if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
  4186. continue;
  4187. for_each_online_cpu(cpu)
  4188. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  4189. }
  4190. printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
  4191. " active_file:%lu inactive_file:%lu isolated_file:%lu\n"
  4192. " unevictable:%lu dirty:%lu writeback:%lu unstable:%lu\n"
  4193. " slab_reclaimable:%lu slab_unreclaimable:%lu\n"
  4194. " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
  4195. " free:%lu free_pcp:%lu free_cma:%lu\n",
  4196. global_node_page_state(NR_ACTIVE_ANON),
  4197. global_node_page_state(NR_INACTIVE_ANON),
  4198. global_node_page_state(NR_ISOLATED_ANON),
  4199. global_node_page_state(NR_ACTIVE_FILE),
  4200. global_node_page_state(NR_INACTIVE_FILE),
  4201. global_node_page_state(NR_ISOLATED_FILE),
  4202. global_node_page_state(NR_UNEVICTABLE),
  4203. global_node_page_state(NR_FILE_DIRTY),
  4204. global_node_page_state(NR_WRITEBACK),
  4205. global_node_page_state(NR_UNSTABLE_NFS),
  4206. global_node_page_state(NR_SLAB_RECLAIMABLE),
  4207. global_node_page_state(NR_SLAB_UNRECLAIMABLE),
  4208. global_node_page_state(NR_FILE_MAPPED),
  4209. global_node_page_state(NR_SHMEM),
  4210. global_zone_page_state(NR_PAGETABLE),
  4211. global_zone_page_state(NR_BOUNCE),
  4212. global_zone_page_state(NR_FREE_PAGES),
  4213. free_pcp,
  4214. global_zone_page_state(NR_FREE_CMA_PAGES));
  4215. for_each_online_pgdat(pgdat) {
  4216. if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
  4217. continue;
  4218. printk("Node %d"
  4219. " active_anon:%lukB"
  4220. " inactive_anon:%lukB"
  4221. " active_file:%lukB"
  4222. " inactive_file:%lukB"
  4223. " unevictable:%lukB"
  4224. " isolated(anon):%lukB"
  4225. " isolated(file):%lukB"
  4226. " mapped:%lukB"
  4227. " dirty:%lukB"
  4228. " writeback:%lukB"
  4229. " shmem:%lukB"
  4230. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4231. " shmem_thp: %lukB"
  4232. " shmem_pmdmapped: %lukB"
  4233. " anon_thp: %lukB"
  4234. #endif
  4235. " writeback_tmp:%lukB"
  4236. " unstable:%lukB"
  4237. " all_unreclaimable? %s"
  4238. "\n",
  4239. pgdat->node_id,
  4240. K(node_page_state(pgdat, NR_ACTIVE_ANON)),
  4241. K(node_page_state(pgdat, NR_INACTIVE_ANON)),
  4242. K(node_page_state(pgdat, NR_ACTIVE_FILE)),
  4243. K(node_page_state(pgdat, NR_INACTIVE_FILE)),
  4244. K(node_page_state(pgdat, NR_UNEVICTABLE)),
  4245. K(node_page_state(pgdat, NR_ISOLATED_ANON)),
  4246. K(node_page_state(pgdat, NR_ISOLATED_FILE)),
  4247. K(node_page_state(pgdat, NR_FILE_MAPPED)),
  4248. K(node_page_state(pgdat, NR_FILE_DIRTY)),
  4249. K(node_page_state(pgdat, NR_WRITEBACK)),
  4250. K(node_page_state(pgdat, NR_SHMEM)),
  4251. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  4252. K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
  4253. K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
  4254. * HPAGE_PMD_NR),
  4255. K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
  4256. #endif
  4257. K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
  4258. K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
  4259. pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
  4260. "yes" : "no");
  4261. }
  4262. for_each_populated_zone(zone) {
  4263. int i;
  4264. if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
  4265. continue;
  4266. free_pcp = 0;
  4267. for_each_online_cpu(cpu)
  4268. free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
  4269. show_node(zone);
  4270. printk(KERN_CONT
  4271. "%s"
  4272. " free:%lukB"
  4273. " min:%lukB"
  4274. " low:%lukB"
  4275. " high:%lukB"
  4276. " active_anon:%lukB"
  4277. " inactive_anon:%lukB"
  4278. " active_file:%lukB"
  4279. " inactive_file:%lukB"
  4280. " unevictable:%lukB"
  4281. " writepending:%lukB"
  4282. " present:%lukB"
  4283. " managed:%lukB"
  4284. " mlocked:%lukB"
  4285. " kernel_stack:%lukB"
  4286. " pagetables:%lukB"
  4287. " bounce:%lukB"
  4288. " free_pcp:%lukB"
  4289. " local_pcp:%ukB"
  4290. " free_cma:%lukB"
  4291. "\n",
  4292. zone->name,
  4293. K(zone_page_state(zone, NR_FREE_PAGES)),
  4294. K(min_wmark_pages(zone)),
  4295. K(low_wmark_pages(zone)),
  4296. K(high_wmark_pages(zone)),
  4297. K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
  4298. K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
  4299. K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
  4300. K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
  4301. K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
  4302. K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
  4303. K(zone->present_pages),
  4304. K(zone->managed_pages),
  4305. K(zone_page_state(zone, NR_MLOCK)),
  4306. zone_page_state(zone, NR_KERNEL_STACK_KB),
  4307. K(zone_page_state(zone, NR_PAGETABLE)),
  4308. K(zone_page_state(zone, NR_BOUNCE)),
  4309. K(free_pcp),
  4310. K(this_cpu_read(zone->pageset->pcp.count)),
  4311. K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
  4312. printk("lowmem_reserve[]:");
  4313. for (i = 0; i < MAX_NR_ZONES; i++)
  4314. printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
  4315. printk(KERN_CONT "\n");
  4316. }
  4317. for_each_populated_zone(zone) {
  4318. unsigned int order;
  4319. unsigned long nr[MAX_ORDER], flags, total = 0;
  4320. unsigned char types[MAX_ORDER];
  4321. if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
  4322. continue;
  4323. show_node(zone);
  4324. printk(KERN_CONT "%s: ", zone->name);
  4325. spin_lock_irqsave(&zone->lock, flags);
  4326. for (order = 0; order < MAX_ORDER; order++) {
  4327. struct free_area *area = &zone->free_area[order];
  4328. int type;
  4329. nr[order] = area->nr_free;
  4330. total += nr[order] << order;
  4331. types[order] = 0;
  4332. for (type = 0; type < MIGRATE_TYPES; type++) {
  4333. if (!list_empty(&area->free_list[type]))
  4334. types[order] |= 1 << type;
  4335. }
  4336. }
  4337. spin_unlock_irqrestore(&zone->lock, flags);
  4338. for (order = 0; order < MAX_ORDER; order++) {
  4339. printk(KERN_CONT "%lu*%lukB ",
  4340. nr[order], K(1UL) << order);
  4341. if (nr[order])
  4342. show_migration_types(types[order]);
  4343. }
  4344. printk(KERN_CONT "= %lukB\n", K(total));
  4345. }
  4346. hugetlb_show_meminfo();
  4347. printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
  4348. show_swap_cache_info();
  4349. }
  4350. static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
  4351. {
  4352. zoneref->zone = zone;
  4353. zoneref->zone_idx = zone_idx(zone);
  4354. }
  4355. /*
  4356. * Builds allocation fallback zone lists.
  4357. *
  4358. * Add all populated zones of a node to the zonelist.
  4359. */
  4360. static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
  4361. {
  4362. struct zone *zone;
  4363. enum zone_type zone_type = MAX_NR_ZONES;
  4364. int nr_zones = 0;
  4365. do {
  4366. zone_type--;
  4367. zone = pgdat->node_zones + zone_type;
  4368. if (managed_zone(zone)) {
  4369. zoneref_set_zone(zone, &zonerefs[nr_zones++]);
  4370. check_highest_zone(zone_type);
  4371. }
  4372. } while (zone_type);
  4373. return nr_zones;
  4374. }
  4375. #ifdef CONFIG_NUMA
  4376. static int __parse_numa_zonelist_order(char *s)
  4377. {
  4378. /*
  4379. * We used to support different zonlists modes but they turned
  4380. * out to be just not useful. Let's keep the warning in place
  4381. * if somebody still use the cmd line parameter so that we do
  4382. * not fail it silently
  4383. */
  4384. if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
  4385. pr_warn("Ignoring unsupported numa_zonelist_order value: %s\n", s);
  4386. return -EINVAL;
  4387. }
  4388. return 0;
  4389. }
  4390. static __init int setup_numa_zonelist_order(char *s)
  4391. {
  4392. if (!s)
  4393. return 0;
  4394. return __parse_numa_zonelist_order(s);
  4395. }
  4396. early_param("numa_zonelist_order", setup_numa_zonelist_order);
  4397. char numa_zonelist_order[] = "Node";
  4398. /*
  4399. * sysctl handler for numa_zonelist_order
  4400. */
  4401. int numa_zonelist_order_handler(struct ctl_table *table, int write,
  4402. void __user *buffer, size_t *length,
  4403. loff_t *ppos)
  4404. {
  4405. char *str;
  4406. int ret;
  4407. if (!write)
  4408. return proc_dostring(table, write, buffer, length, ppos);
  4409. str = memdup_user_nul(buffer, 16);
  4410. if (IS_ERR(str))
  4411. return PTR_ERR(str);
  4412. ret = __parse_numa_zonelist_order(str);
  4413. kfree(str);
  4414. return ret;
  4415. }
  4416. #define MAX_NODE_LOAD (nr_online_nodes)
  4417. static int node_load[MAX_NUMNODES];
  4418. /**
  4419. * find_next_best_node - find the next node that should appear in a given node's fallback list
  4420. * @node: node whose fallback list we're appending
  4421. * @used_node_mask: nodemask_t of already used nodes
  4422. *
  4423. * We use a number of factors to determine which is the next node that should
  4424. * appear on a given node's fallback list. The node should not have appeared
  4425. * already in @node's fallback list, and it should be the next closest node
  4426. * according to the distance array (which contains arbitrary distance values
  4427. * from each node to each node in the system), and should also prefer nodes
  4428. * with no CPUs, since presumably they'll have very little allocation pressure
  4429. * on them otherwise.
  4430. * It returns -1 if no node is found.
  4431. */
  4432. static int find_next_best_node(int node, nodemask_t *used_node_mask)
  4433. {
  4434. int n, val;
  4435. int min_val = INT_MAX;
  4436. int best_node = NUMA_NO_NODE;
  4437. const struct cpumask *tmp = cpumask_of_node(0);
  4438. /* Use the local node if we haven't already */
  4439. if (!node_isset(node, *used_node_mask)) {
  4440. node_set(node, *used_node_mask);
  4441. return node;
  4442. }
  4443. for_each_node_state(n, N_MEMORY) {
  4444. /* Don't want a node to appear more than once */
  4445. if (node_isset(n, *used_node_mask))
  4446. continue;
  4447. /* Use the distance array to find the distance */
  4448. val = node_distance(node, n);
  4449. /* Penalize nodes under us ("prefer the next node") */
  4450. val += (n < node);
  4451. /* Give preference to headless and unused nodes */
  4452. tmp = cpumask_of_node(n);
  4453. if (!cpumask_empty(tmp))
  4454. val += PENALTY_FOR_NODE_WITH_CPUS;
  4455. /* Slight preference for less loaded node */
  4456. val *= (MAX_NODE_LOAD*MAX_NUMNODES);
  4457. val += node_load[n];
  4458. if (val < min_val) {
  4459. min_val = val;
  4460. best_node = n;
  4461. }
  4462. }
  4463. if (best_node >= 0)
  4464. node_set(best_node, *used_node_mask);
  4465. return best_node;
  4466. }
  4467. /*
  4468. * Build zonelists ordered by node and zones within node.
  4469. * This results in maximum locality--normal zone overflows into local
  4470. * DMA zone, if any--but risks exhausting DMA zone.
  4471. */
  4472. static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
  4473. unsigned nr_nodes)
  4474. {
  4475. struct zoneref *zonerefs;
  4476. int i;
  4477. zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
  4478. for (i = 0; i < nr_nodes; i++) {
  4479. int nr_zones;
  4480. pg_data_t *node = NODE_DATA(node_order[i]);
  4481. nr_zones = build_zonerefs_node(node, zonerefs);
  4482. zonerefs += nr_zones;
  4483. }
  4484. zonerefs->zone = NULL;
  4485. zonerefs->zone_idx = 0;
  4486. }
  4487. /*
  4488. * Build gfp_thisnode zonelists
  4489. */
  4490. static void build_thisnode_zonelists(pg_data_t *pgdat)
  4491. {
  4492. struct zoneref *zonerefs;
  4493. int nr_zones;
  4494. zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
  4495. nr_zones = build_zonerefs_node(pgdat, zonerefs);
  4496. zonerefs += nr_zones;
  4497. zonerefs->zone = NULL;
  4498. zonerefs->zone_idx = 0;
  4499. }
  4500. /*
  4501. * Build zonelists ordered by zone and nodes within zones.
  4502. * This results in conserving DMA zone[s] until all Normal memory is
  4503. * exhausted, but results in overflowing to remote node while memory
  4504. * may still exist in local DMA zone.
  4505. */
  4506. static void build_zonelists(pg_data_t *pgdat)
  4507. {
  4508. static int node_order[MAX_NUMNODES];
  4509. int node, load, nr_nodes = 0;
  4510. nodemask_t used_mask;
  4511. int local_node, prev_node;
  4512. /* NUMA-aware ordering of nodes */
  4513. local_node = pgdat->node_id;
  4514. load = nr_online_nodes;
  4515. prev_node = local_node;
  4516. nodes_clear(used_mask);
  4517. memset(node_order, 0, sizeof(node_order));
  4518. while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
  4519. /*
  4520. * We don't want to pressure a particular node.
  4521. * So adding penalty to the first node in same
  4522. * distance group to make it round-robin.
  4523. */
  4524. if (node_distance(local_node, node) !=
  4525. node_distance(local_node, prev_node))
  4526. node_load[node] = load;
  4527. node_order[nr_nodes++] = node;
  4528. prev_node = node;
  4529. load--;
  4530. }
  4531. build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
  4532. build_thisnode_zonelists(pgdat);
  4533. }
  4534. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  4535. /*
  4536. * Return node id of node used for "local" allocations.
  4537. * I.e., first node id of first zone in arg node's generic zonelist.
  4538. * Used for initializing percpu 'numa_mem', which is used primarily
  4539. * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
  4540. */
  4541. int local_memory_node(int node)
  4542. {
  4543. struct zoneref *z;
  4544. z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
  4545. gfp_zone(GFP_KERNEL),
  4546. NULL);
  4547. return z->zone->node;
  4548. }
  4549. #endif
  4550. static void setup_min_unmapped_ratio(void);
  4551. static void setup_min_slab_ratio(void);
  4552. #else /* CONFIG_NUMA */
  4553. static void build_zonelists(pg_data_t *pgdat)
  4554. {
  4555. int node, local_node;
  4556. struct zoneref *zonerefs;
  4557. int nr_zones;
  4558. local_node = pgdat->node_id;
  4559. zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
  4560. nr_zones = build_zonerefs_node(pgdat, zonerefs);
  4561. zonerefs += nr_zones;
  4562. /*
  4563. * Now we build the zonelist so that it contains the zones
  4564. * of all the other nodes.
  4565. * We don't want to pressure a particular node, so when
  4566. * building the zones for node N, we make sure that the
  4567. * zones coming right after the local ones are those from
  4568. * node N+1 (modulo N)
  4569. */
  4570. for (node = local_node + 1; node < MAX_NUMNODES; node++) {
  4571. if (!node_online(node))
  4572. continue;
  4573. nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
  4574. zonerefs += nr_zones;
  4575. }
  4576. for (node = 0; node < local_node; node++) {
  4577. if (!node_online(node))
  4578. continue;
  4579. nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
  4580. zonerefs += nr_zones;
  4581. }
  4582. zonerefs->zone = NULL;
  4583. zonerefs->zone_idx = 0;
  4584. }
  4585. #endif /* CONFIG_NUMA */
  4586. /*
  4587. * Boot pageset table. One per cpu which is going to be used for all
  4588. * zones and all nodes. The parameters will be set in such a way
  4589. * that an item put on a list will immediately be handed over to
  4590. * the buddy list. This is safe since pageset manipulation is done
  4591. * with interrupts disabled.
  4592. *
  4593. * The boot_pagesets must be kept even after bootup is complete for
  4594. * unused processors and/or zones. They do play a role for bootstrapping
  4595. * hotplugged processors.
  4596. *
  4597. * zoneinfo_show() and maybe other functions do
  4598. * not check if the processor is online before following the pageset pointer.
  4599. * Other parts of the kernel may not check if the zone is available.
  4600. */
  4601. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch);
  4602. static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
  4603. static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
  4604. static void __build_all_zonelists(void *data)
  4605. {
  4606. int nid;
  4607. int __maybe_unused cpu;
  4608. pg_data_t *self = data;
  4609. static DEFINE_SPINLOCK(lock);
  4610. spin_lock(&lock);
  4611. #ifdef CONFIG_NUMA
  4612. memset(node_load, 0, sizeof(node_load));
  4613. #endif
  4614. /*
  4615. * This node is hotadded and no memory is yet present. So just
  4616. * building zonelists is fine - no need to touch other nodes.
  4617. */
  4618. if (self && !node_online(self->node_id)) {
  4619. build_zonelists(self);
  4620. } else {
  4621. for_each_online_node(nid) {
  4622. pg_data_t *pgdat = NODE_DATA(nid);
  4623. build_zonelists(pgdat);
  4624. }
  4625. #ifdef CONFIG_HAVE_MEMORYLESS_NODES
  4626. /*
  4627. * We now know the "local memory node" for each node--
  4628. * i.e., the node of the first zone in the generic zonelist.
  4629. * Set up numa_mem percpu variable for on-line cpus. During
  4630. * boot, only the boot cpu should be on-line; we'll init the
  4631. * secondary cpus' numa_mem as they come on-line. During
  4632. * node/memory hotplug, we'll fixup all on-line cpus.
  4633. */
  4634. for_each_online_cpu(cpu)
  4635. set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
  4636. #endif
  4637. }
  4638. spin_unlock(&lock);
  4639. }
  4640. static noinline void __init
  4641. build_all_zonelists_init(void)
  4642. {
  4643. int cpu;
  4644. __build_all_zonelists(NULL);
  4645. /*
  4646. * Initialize the boot_pagesets that are going to be used
  4647. * for bootstrapping processors. The real pagesets for
  4648. * each zone will be allocated later when the per cpu
  4649. * allocator is available.
  4650. *
  4651. * boot_pagesets are used also for bootstrapping offline
  4652. * cpus if the system is already booted because the pagesets
  4653. * are needed to initialize allocators on a specific cpu too.
  4654. * F.e. the percpu allocator needs the page allocator which
  4655. * needs the percpu allocator in order to allocate its pagesets
  4656. * (a chicken-egg dilemma).
  4657. */
  4658. for_each_possible_cpu(cpu)
  4659. setup_pageset(&per_cpu(boot_pageset, cpu), 0);
  4660. mminit_verify_zonelist();
  4661. cpuset_init_current_mems_allowed();
  4662. }
  4663. /*
  4664. * unless system_state == SYSTEM_BOOTING.
  4665. *
  4666. * __ref due to call of __init annotated helper build_all_zonelists_init
  4667. * [protected by SYSTEM_BOOTING].
  4668. */
  4669. void __ref build_all_zonelists(pg_data_t *pgdat)
  4670. {
  4671. if (system_state == SYSTEM_BOOTING) {
  4672. build_all_zonelists_init();
  4673. } else {
  4674. __build_all_zonelists(pgdat);
  4675. /* cpuset refresh routine should be here */
  4676. }
  4677. vm_total_pages = nr_free_pagecache_pages();
  4678. /*
  4679. * Disable grouping by mobility if the number of pages in the
  4680. * system is too low to allow the mechanism to work. It would be
  4681. * more accurate, but expensive to check per-zone. This check is
  4682. * made on memory-hotadd so a system can start with mobility
  4683. * disabled and enable it later
  4684. */
  4685. if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
  4686. page_group_by_mobility_disabled = 1;
  4687. else
  4688. page_group_by_mobility_disabled = 0;
  4689. pr_info("Built %i zonelists, mobility grouping %s. Total pages: %ld\n",
  4690. nr_online_nodes,
  4691. page_group_by_mobility_disabled ? "off" : "on",
  4692. vm_total_pages);
  4693. #ifdef CONFIG_NUMA
  4694. pr_info("Policy zone: %s\n", zone_names[policy_zone]);
  4695. #endif
  4696. }
  4697. /*
  4698. * Initially all pages are reserved - free ones are freed
  4699. * up by free_all_bootmem() once the early boot process is
  4700. * done. Non-atomic initialization, single-pass.
  4701. */
  4702. void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
  4703. unsigned long start_pfn, enum memmap_context context,
  4704. struct vmem_altmap *altmap)
  4705. {
  4706. unsigned long end_pfn = start_pfn + size;
  4707. pg_data_t *pgdat = NODE_DATA(nid);
  4708. unsigned long pfn;
  4709. unsigned long nr_initialised = 0;
  4710. struct page *page;
  4711. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4712. struct memblock_region *r = NULL, *tmp;
  4713. #endif
  4714. if (highest_memmap_pfn < end_pfn - 1)
  4715. highest_memmap_pfn = end_pfn - 1;
  4716. /*
  4717. * Honor reservation requested by the driver for this ZONE_DEVICE
  4718. * memory
  4719. */
  4720. if (altmap && start_pfn == altmap->base_pfn)
  4721. start_pfn += altmap->reserve;
  4722. for (pfn = start_pfn; pfn < end_pfn; pfn++) {
  4723. /*
  4724. * There can be holes in boot-time mem_map[]s handed to this
  4725. * function. They do not exist on hotplugged memory.
  4726. */
  4727. if (context != MEMMAP_EARLY)
  4728. goto not_early;
  4729. if (!early_pfn_valid(pfn))
  4730. continue;
  4731. if (!early_pfn_in_nid(pfn, nid))
  4732. continue;
  4733. if (!update_defer_init(pgdat, pfn, end_pfn, &nr_initialised))
  4734. break;
  4735. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4736. /*
  4737. * Check given memblock attribute by firmware which can affect
  4738. * kernel memory layout. If zone==ZONE_MOVABLE but memory is
  4739. * mirrored, it's an overlapped memmap init. skip it.
  4740. */
  4741. if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
  4742. if (!r || pfn >= memblock_region_memory_end_pfn(r)) {
  4743. for_each_memblock(memory, tmp)
  4744. if (pfn < memblock_region_memory_end_pfn(tmp))
  4745. break;
  4746. r = tmp;
  4747. }
  4748. if (pfn >= memblock_region_memory_base_pfn(r) &&
  4749. memblock_is_mirror(r)) {
  4750. /* already initialized as NORMAL */
  4751. pfn = memblock_region_memory_end_pfn(r);
  4752. continue;
  4753. }
  4754. }
  4755. #endif
  4756. not_early:
  4757. page = pfn_to_page(pfn);
  4758. __init_single_page(page, pfn, zone, nid);
  4759. if (context == MEMMAP_HOTPLUG)
  4760. SetPageReserved(page);
  4761. /*
  4762. * Mark the block movable so that blocks are reserved for
  4763. * movable at startup. This will force kernel allocations
  4764. * to reserve their blocks rather than leaking throughout
  4765. * the address space during boot when many long-lived
  4766. * kernel allocations are made.
  4767. *
  4768. * bitmap is created for zone's valid pfn range. but memmap
  4769. * can be created for invalid pages (for alignment)
  4770. * check here not to call set_pageblock_migratetype() against
  4771. * pfn out of zone.
  4772. *
  4773. * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
  4774. * because this is done early in sparse_add_one_section
  4775. */
  4776. if (!(pfn & (pageblock_nr_pages - 1))) {
  4777. set_pageblock_migratetype(page, MIGRATE_MOVABLE);
  4778. cond_resched();
  4779. }
  4780. }
  4781. }
  4782. static void __meminit zone_init_free_lists(struct zone *zone)
  4783. {
  4784. unsigned int order, t;
  4785. for_each_migratetype_order(order, t) {
  4786. INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
  4787. zone->free_area[order].nr_free = 0;
  4788. }
  4789. }
  4790. #ifndef __HAVE_ARCH_MEMMAP_INIT
  4791. #define memmap_init(size, nid, zone, start_pfn) \
  4792. memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY, NULL)
  4793. #endif
  4794. static int zone_batchsize(struct zone *zone)
  4795. {
  4796. #ifdef CONFIG_MMU
  4797. int batch;
  4798. /*
  4799. * The per-cpu-pages pools are set to around 1000th of the
  4800. * size of the zone. But no more than 1/2 of a meg.
  4801. *
  4802. * OK, so we don't know how big the cache is. So guess.
  4803. */
  4804. batch = zone->managed_pages / 1024;
  4805. if (batch * PAGE_SIZE > 512 * 1024)
  4806. batch = (512 * 1024) / PAGE_SIZE;
  4807. batch /= 4; /* We effectively *= 4 below */
  4808. if (batch < 1)
  4809. batch = 1;
  4810. /*
  4811. * Clamp the batch to a 2^n - 1 value. Having a power
  4812. * of 2 value was found to be more likely to have
  4813. * suboptimal cache aliasing properties in some cases.
  4814. *
  4815. * For example if 2 tasks are alternately allocating
  4816. * batches of pages, one task can end up with a lot
  4817. * of pages of one half of the possible page colors
  4818. * and the other with pages of the other colors.
  4819. */
  4820. batch = rounddown_pow_of_two(batch + batch/2) - 1;
  4821. return batch;
  4822. #else
  4823. /* The deferral and batching of frees should be suppressed under NOMMU
  4824. * conditions.
  4825. *
  4826. * The problem is that NOMMU needs to be able to allocate large chunks
  4827. * of contiguous memory as there's no hardware page translation to
  4828. * assemble apparent contiguous memory from discontiguous pages.
  4829. *
  4830. * Queueing large contiguous runs of pages for batching, however,
  4831. * causes the pages to actually be freed in smaller chunks. As there
  4832. * can be a significant delay between the individual batches being
  4833. * recycled, this leads to the once large chunks of space being
  4834. * fragmented and becoming unavailable for high-order allocations.
  4835. */
  4836. return 0;
  4837. #endif
  4838. }
  4839. /*
  4840. * pcp->high and pcp->batch values are related and dependent on one another:
  4841. * ->batch must never be higher then ->high.
  4842. * The following function updates them in a safe manner without read side
  4843. * locking.
  4844. *
  4845. * Any new users of pcp->batch and pcp->high should ensure they can cope with
  4846. * those fields changing asynchronously (acording the the above rule).
  4847. *
  4848. * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
  4849. * outside of boot time (or some other assurance that no concurrent updaters
  4850. * exist).
  4851. */
  4852. static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
  4853. unsigned long batch)
  4854. {
  4855. /* start with a fail safe value for batch */
  4856. pcp->batch = 1;
  4857. smp_wmb();
  4858. /* Update high, then batch, in order */
  4859. pcp->high = high;
  4860. smp_wmb();
  4861. pcp->batch = batch;
  4862. }
  4863. /* a companion to pageset_set_high() */
  4864. static void pageset_set_batch(struct per_cpu_pageset *p, unsigned long batch)
  4865. {
  4866. pageset_update(&p->pcp, 6 * batch, max(1UL, 1 * batch));
  4867. }
  4868. static void pageset_init(struct per_cpu_pageset *p)
  4869. {
  4870. struct per_cpu_pages *pcp;
  4871. int migratetype;
  4872. memset(p, 0, sizeof(*p));
  4873. pcp = &p->pcp;
  4874. pcp->count = 0;
  4875. for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
  4876. INIT_LIST_HEAD(&pcp->lists[migratetype]);
  4877. }
  4878. static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
  4879. {
  4880. pageset_init(p);
  4881. pageset_set_batch(p, batch);
  4882. }
  4883. /*
  4884. * pageset_set_high() sets the high water mark for hot per_cpu_pagelist
  4885. * to the value high for the pageset p.
  4886. */
  4887. static void pageset_set_high(struct per_cpu_pageset *p,
  4888. unsigned long high)
  4889. {
  4890. unsigned long batch = max(1UL, high / 4);
  4891. if ((high / 4) > (PAGE_SHIFT * 8))
  4892. batch = PAGE_SHIFT * 8;
  4893. pageset_update(&p->pcp, high, batch);
  4894. }
  4895. static void pageset_set_high_and_batch(struct zone *zone,
  4896. struct per_cpu_pageset *pcp)
  4897. {
  4898. if (percpu_pagelist_fraction)
  4899. pageset_set_high(pcp,
  4900. (zone->managed_pages /
  4901. percpu_pagelist_fraction));
  4902. else
  4903. pageset_set_batch(pcp, zone_batchsize(zone));
  4904. }
  4905. static void __meminit zone_pageset_init(struct zone *zone, int cpu)
  4906. {
  4907. struct per_cpu_pageset *pcp = per_cpu_ptr(zone->pageset, cpu);
  4908. pageset_init(pcp);
  4909. pageset_set_high_and_batch(zone, pcp);
  4910. }
  4911. void __meminit setup_zone_pageset(struct zone *zone)
  4912. {
  4913. int cpu;
  4914. zone->pageset = alloc_percpu(struct per_cpu_pageset);
  4915. for_each_possible_cpu(cpu)
  4916. zone_pageset_init(zone, cpu);
  4917. }
  4918. /*
  4919. * Allocate per cpu pagesets and initialize them.
  4920. * Before this call only boot pagesets were available.
  4921. */
  4922. void __init setup_per_cpu_pageset(void)
  4923. {
  4924. struct pglist_data *pgdat;
  4925. struct zone *zone;
  4926. for_each_populated_zone(zone)
  4927. setup_zone_pageset(zone);
  4928. for_each_online_pgdat(pgdat)
  4929. pgdat->per_cpu_nodestats =
  4930. alloc_percpu(struct per_cpu_nodestat);
  4931. }
  4932. static __meminit void zone_pcp_init(struct zone *zone)
  4933. {
  4934. /*
  4935. * per cpu subsystem is not up at this point. The following code
  4936. * relies on the ability of the linker to provide the
  4937. * offset of a (static) per cpu variable into the per cpu area.
  4938. */
  4939. zone->pageset = &boot_pageset;
  4940. if (populated_zone(zone))
  4941. printk(KERN_DEBUG " %s zone: %lu pages, LIFO batch:%u\n",
  4942. zone->name, zone->present_pages,
  4943. zone_batchsize(zone));
  4944. }
  4945. void __meminit init_currently_empty_zone(struct zone *zone,
  4946. unsigned long zone_start_pfn,
  4947. unsigned long size)
  4948. {
  4949. struct pglist_data *pgdat = zone->zone_pgdat;
  4950. pgdat->nr_zones = zone_idx(zone) + 1;
  4951. zone->zone_start_pfn = zone_start_pfn;
  4952. mminit_dprintk(MMINIT_TRACE, "memmap_init",
  4953. "Initialising map node %d zone %lu pfns %lu -> %lu\n",
  4954. pgdat->node_id,
  4955. (unsigned long)zone_idx(zone),
  4956. zone_start_pfn, (zone_start_pfn + size));
  4957. zone_init_free_lists(zone);
  4958. zone->initialized = 1;
  4959. }
  4960. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  4961. #ifndef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
  4962. /*
  4963. * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
  4964. */
  4965. int __meminit __early_pfn_to_nid(unsigned long pfn,
  4966. struct mminit_pfnnid_cache *state)
  4967. {
  4968. unsigned long start_pfn, end_pfn;
  4969. int nid;
  4970. if (state->last_start <= pfn && pfn < state->last_end)
  4971. return state->last_nid;
  4972. nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
  4973. if (nid != -1) {
  4974. state->last_start = start_pfn;
  4975. state->last_end = end_pfn;
  4976. state->last_nid = nid;
  4977. }
  4978. return nid;
  4979. }
  4980. #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
  4981. /**
  4982. * free_bootmem_with_active_regions - Call memblock_free_early_nid for each active range
  4983. * @nid: The node to free memory on. If MAX_NUMNODES, all nodes are freed.
  4984. * @max_low_pfn: The highest PFN that will be passed to memblock_free_early_nid
  4985. *
  4986. * If an architecture guarantees that all ranges registered contain no holes
  4987. * and may be freed, this this function may be used instead of calling
  4988. * memblock_free_early_nid() manually.
  4989. */
  4990. void __init free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn)
  4991. {
  4992. unsigned long start_pfn, end_pfn;
  4993. int i, this_nid;
  4994. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid) {
  4995. start_pfn = min(start_pfn, max_low_pfn);
  4996. end_pfn = min(end_pfn, max_low_pfn);
  4997. if (start_pfn < end_pfn)
  4998. memblock_free_early_nid(PFN_PHYS(start_pfn),
  4999. (end_pfn - start_pfn) << PAGE_SHIFT,
  5000. this_nid);
  5001. }
  5002. }
  5003. /**
  5004. * sparse_memory_present_with_active_regions - Call memory_present for each active range
  5005. * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used.
  5006. *
  5007. * If an architecture guarantees that all ranges registered contain no holes and may
  5008. * be freed, this function may be used instead of calling memory_present() manually.
  5009. */
  5010. void __init sparse_memory_present_with_active_regions(int nid)
  5011. {
  5012. unsigned long start_pfn, end_pfn;
  5013. int i, this_nid;
  5014. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, &this_nid)
  5015. memory_present(this_nid, start_pfn, end_pfn);
  5016. }
  5017. /**
  5018. * get_pfn_range_for_nid - Return the start and end page frames for a node
  5019. * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
  5020. * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
  5021. * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
  5022. *
  5023. * It returns the start and end page frame of a node based on information
  5024. * provided by memblock_set_node(). If called for a node
  5025. * with no available memory, a warning is printed and the start and end
  5026. * PFNs will be 0.
  5027. */
  5028. void __meminit get_pfn_range_for_nid(unsigned int nid,
  5029. unsigned long *start_pfn, unsigned long *end_pfn)
  5030. {
  5031. unsigned long this_start_pfn, this_end_pfn;
  5032. int i;
  5033. *start_pfn = -1UL;
  5034. *end_pfn = 0;
  5035. for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
  5036. *start_pfn = min(*start_pfn, this_start_pfn);
  5037. *end_pfn = max(*end_pfn, this_end_pfn);
  5038. }
  5039. if (*start_pfn == -1UL)
  5040. *start_pfn = 0;
  5041. }
  5042. /*
  5043. * This finds a zone that can be used for ZONE_MOVABLE pages. The
  5044. * assumption is made that zones within a node are ordered in monotonic
  5045. * increasing memory addresses so that the "highest" populated zone is used
  5046. */
  5047. static void __init find_usable_zone_for_movable(void)
  5048. {
  5049. int zone_index;
  5050. for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  5051. if (zone_index == ZONE_MOVABLE)
  5052. continue;
  5053. if (arch_zone_highest_possible_pfn[zone_index] >
  5054. arch_zone_lowest_possible_pfn[zone_index])
  5055. break;
  5056. }
  5057. VM_BUG_ON(zone_index == -1);
  5058. movable_zone = zone_index;
  5059. }
  5060. /*
  5061. * The zone ranges provided by the architecture do not include ZONE_MOVABLE
  5062. * because it is sized independent of architecture. Unlike the other zones,
  5063. * the starting point for ZONE_MOVABLE is not fixed. It may be different
  5064. * in each node depending on the size of each node and how evenly kernelcore
  5065. * is distributed. This helper function adjusts the zone ranges
  5066. * provided by the architecture for a given node by using the end of the
  5067. * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
  5068. * zones within a node are in order of monotonic increases memory addresses
  5069. */
  5070. static void __meminit adjust_zone_range_for_zone_movable(int nid,
  5071. unsigned long zone_type,
  5072. unsigned long node_start_pfn,
  5073. unsigned long node_end_pfn,
  5074. unsigned long *zone_start_pfn,
  5075. unsigned long *zone_end_pfn)
  5076. {
  5077. /* Only adjust if ZONE_MOVABLE is on this node */
  5078. if (zone_movable_pfn[nid]) {
  5079. /* Size ZONE_MOVABLE */
  5080. if (zone_type == ZONE_MOVABLE) {
  5081. *zone_start_pfn = zone_movable_pfn[nid];
  5082. *zone_end_pfn = min(node_end_pfn,
  5083. arch_zone_highest_possible_pfn[movable_zone]);
  5084. /* Adjust for ZONE_MOVABLE starting within this range */
  5085. } else if (!mirrored_kernelcore &&
  5086. *zone_start_pfn < zone_movable_pfn[nid] &&
  5087. *zone_end_pfn > zone_movable_pfn[nid]) {
  5088. *zone_end_pfn = zone_movable_pfn[nid];
  5089. /* Check if this whole range is within ZONE_MOVABLE */
  5090. } else if (*zone_start_pfn >= zone_movable_pfn[nid])
  5091. *zone_start_pfn = *zone_end_pfn;
  5092. }
  5093. }
  5094. /*
  5095. * Return the number of pages a zone spans in a node, including holes
  5096. * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
  5097. */
  5098. static unsigned long __meminit zone_spanned_pages_in_node(int nid,
  5099. unsigned long zone_type,
  5100. unsigned long node_start_pfn,
  5101. unsigned long node_end_pfn,
  5102. unsigned long *zone_start_pfn,
  5103. unsigned long *zone_end_pfn,
  5104. unsigned long *ignored)
  5105. {
  5106. /* When hotadd a new node from cpu_up(), the node should be empty */
  5107. if (!node_start_pfn && !node_end_pfn)
  5108. return 0;
  5109. /* Get the start and end of the zone */
  5110. *zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
  5111. *zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
  5112. adjust_zone_range_for_zone_movable(nid, zone_type,
  5113. node_start_pfn, node_end_pfn,
  5114. zone_start_pfn, zone_end_pfn);
  5115. /* Check that this node has pages within the zone's required range */
  5116. if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
  5117. return 0;
  5118. /* Move the zone boundaries inside the node if necessary */
  5119. *zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
  5120. *zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
  5121. /* Return the spanned pages */
  5122. return *zone_end_pfn - *zone_start_pfn;
  5123. }
  5124. /*
  5125. * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
  5126. * then all holes in the requested range will be accounted for.
  5127. */
  5128. unsigned long __meminit __absent_pages_in_range(int nid,
  5129. unsigned long range_start_pfn,
  5130. unsigned long range_end_pfn)
  5131. {
  5132. unsigned long nr_absent = range_end_pfn - range_start_pfn;
  5133. unsigned long start_pfn, end_pfn;
  5134. int i;
  5135. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  5136. start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
  5137. end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
  5138. nr_absent -= end_pfn - start_pfn;
  5139. }
  5140. return nr_absent;
  5141. }
  5142. /**
  5143. * absent_pages_in_range - Return number of page frames in holes within a range
  5144. * @start_pfn: The start PFN to start searching for holes
  5145. * @end_pfn: The end PFN to stop searching for holes
  5146. *
  5147. * It returns the number of pages frames in memory holes within a range.
  5148. */
  5149. unsigned long __init absent_pages_in_range(unsigned long start_pfn,
  5150. unsigned long end_pfn)
  5151. {
  5152. return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
  5153. }
  5154. /* Return the number of page frames in holes in a zone on a node */
  5155. static unsigned long __meminit zone_absent_pages_in_node(int nid,
  5156. unsigned long zone_type,
  5157. unsigned long node_start_pfn,
  5158. unsigned long node_end_pfn,
  5159. unsigned long *ignored)
  5160. {
  5161. unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
  5162. unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
  5163. unsigned long zone_start_pfn, zone_end_pfn;
  5164. unsigned long nr_absent;
  5165. /* When hotadd a new node from cpu_up(), the node should be empty */
  5166. if (!node_start_pfn && !node_end_pfn)
  5167. return 0;
  5168. zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
  5169. zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
  5170. adjust_zone_range_for_zone_movable(nid, zone_type,
  5171. node_start_pfn, node_end_pfn,
  5172. &zone_start_pfn, &zone_end_pfn);
  5173. nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
  5174. /*
  5175. * ZONE_MOVABLE handling.
  5176. * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
  5177. * and vice versa.
  5178. */
  5179. if (mirrored_kernelcore && zone_movable_pfn[nid]) {
  5180. unsigned long start_pfn, end_pfn;
  5181. struct memblock_region *r;
  5182. for_each_memblock(memory, r) {
  5183. start_pfn = clamp(memblock_region_memory_base_pfn(r),
  5184. zone_start_pfn, zone_end_pfn);
  5185. end_pfn = clamp(memblock_region_memory_end_pfn(r),
  5186. zone_start_pfn, zone_end_pfn);
  5187. if (zone_type == ZONE_MOVABLE &&
  5188. memblock_is_mirror(r))
  5189. nr_absent += end_pfn - start_pfn;
  5190. if (zone_type == ZONE_NORMAL &&
  5191. !memblock_is_mirror(r))
  5192. nr_absent += end_pfn - start_pfn;
  5193. }
  5194. }
  5195. return nr_absent;
  5196. }
  5197. #else /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5198. static inline unsigned long __meminit zone_spanned_pages_in_node(int nid,
  5199. unsigned long zone_type,
  5200. unsigned long node_start_pfn,
  5201. unsigned long node_end_pfn,
  5202. unsigned long *zone_start_pfn,
  5203. unsigned long *zone_end_pfn,
  5204. unsigned long *zones_size)
  5205. {
  5206. unsigned int zone;
  5207. *zone_start_pfn = node_start_pfn;
  5208. for (zone = 0; zone < zone_type; zone++)
  5209. *zone_start_pfn += zones_size[zone];
  5210. *zone_end_pfn = *zone_start_pfn + zones_size[zone_type];
  5211. return zones_size[zone_type];
  5212. }
  5213. static inline unsigned long __meminit zone_absent_pages_in_node(int nid,
  5214. unsigned long zone_type,
  5215. unsigned long node_start_pfn,
  5216. unsigned long node_end_pfn,
  5217. unsigned long *zholes_size)
  5218. {
  5219. if (!zholes_size)
  5220. return 0;
  5221. return zholes_size[zone_type];
  5222. }
  5223. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5224. static void __meminit calculate_node_totalpages(struct pglist_data *pgdat,
  5225. unsigned long node_start_pfn,
  5226. unsigned long node_end_pfn,
  5227. unsigned long *zones_size,
  5228. unsigned long *zholes_size)
  5229. {
  5230. unsigned long realtotalpages = 0, totalpages = 0;
  5231. enum zone_type i;
  5232. for (i = 0; i < MAX_NR_ZONES; i++) {
  5233. struct zone *zone = pgdat->node_zones + i;
  5234. unsigned long zone_start_pfn, zone_end_pfn;
  5235. unsigned long size, real_size;
  5236. size = zone_spanned_pages_in_node(pgdat->node_id, i,
  5237. node_start_pfn,
  5238. node_end_pfn,
  5239. &zone_start_pfn,
  5240. &zone_end_pfn,
  5241. zones_size);
  5242. real_size = size - zone_absent_pages_in_node(pgdat->node_id, i,
  5243. node_start_pfn, node_end_pfn,
  5244. zholes_size);
  5245. if (size)
  5246. zone->zone_start_pfn = zone_start_pfn;
  5247. else
  5248. zone->zone_start_pfn = 0;
  5249. zone->spanned_pages = size;
  5250. zone->present_pages = real_size;
  5251. totalpages += size;
  5252. realtotalpages += real_size;
  5253. }
  5254. pgdat->node_spanned_pages = totalpages;
  5255. pgdat->node_present_pages = realtotalpages;
  5256. printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
  5257. realtotalpages);
  5258. }
  5259. #ifndef CONFIG_SPARSEMEM
  5260. /*
  5261. * Calculate the size of the zone->blockflags rounded to an unsigned long
  5262. * Start by making sure zonesize is a multiple of pageblock_order by rounding
  5263. * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
  5264. * round what is now in bits to nearest long in bits, then return it in
  5265. * bytes.
  5266. */
  5267. static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
  5268. {
  5269. unsigned long usemapsize;
  5270. zonesize += zone_start_pfn & (pageblock_nr_pages-1);
  5271. usemapsize = roundup(zonesize, pageblock_nr_pages);
  5272. usemapsize = usemapsize >> pageblock_order;
  5273. usemapsize *= NR_PAGEBLOCK_BITS;
  5274. usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));
  5275. return usemapsize / 8;
  5276. }
  5277. static void __init setup_usemap(struct pglist_data *pgdat,
  5278. struct zone *zone,
  5279. unsigned long zone_start_pfn,
  5280. unsigned long zonesize)
  5281. {
  5282. unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
  5283. zone->pageblock_flags = NULL;
  5284. if (usemapsize)
  5285. zone->pageblock_flags =
  5286. memblock_virt_alloc_node_nopanic(usemapsize,
  5287. pgdat->node_id);
  5288. }
  5289. #else
  5290. static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
  5291. unsigned long zone_start_pfn, unsigned long zonesize) {}
  5292. #endif /* CONFIG_SPARSEMEM */
  5293. #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
  5294. /* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
  5295. void __paginginit set_pageblock_order(void)
  5296. {
  5297. unsigned int order;
  5298. /* Check that pageblock_nr_pages has not already been setup */
  5299. if (pageblock_order)
  5300. return;
  5301. if (HPAGE_SHIFT > PAGE_SHIFT)
  5302. order = HUGETLB_PAGE_ORDER;
  5303. else
  5304. order = MAX_ORDER - 1;
  5305. /*
  5306. * Assume the largest contiguous order of interest is a huge page.
  5307. * This value may be variable depending on boot parameters on IA64 and
  5308. * powerpc.
  5309. */
  5310. pageblock_order = order;
  5311. }
  5312. #else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  5313. /*
  5314. * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
  5315. * is unused as pageblock_order is set at compile-time. See
  5316. * include/linux/pageblock-flags.h for the values of pageblock_order based on
  5317. * the kernel config
  5318. */
  5319. void __paginginit set_pageblock_order(void)
  5320. {
  5321. }
  5322. #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
  5323. static unsigned long __paginginit calc_memmap_size(unsigned long spanned_pages,
  5324. unsigned long present_pages)
  5325. {
  5326. unsigned long pages = spanned_pages;
  5327. /*
  5328. * Provide a more accurate estimation if there are holes within
  5329. * the zone and SPARSEMEM is in use. If there are holes within the
  5330. * zone, each populated memory region may cost us one or two extra
  5331. * memmap pages due to alignment because memmap pages for each
  5332. * populated regions may not be naturally aligned on page boundary.
  5333. * So the (present_pages >> 4) heuristic is a tradeoff for that.
  5334. */
  5335. if (spanned_pages > present_pages + (present_pages >> 4) &&
  5336. IS_ENABLED(CONFIG_SPARSEMEM))
  5337. pages = present_pages;
  5338. return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
  5339. }
  5340. /*
  5341. * Set up the zone data structures:
  5342. * - mark all pages reserved
  5343. * - mark all memory queues empty
  5344. * - clear the memory bitmaps
  5345. *
  5346. * NOTE: pgdat should get zeroed by caller.
  5347. */
  5348. static void __paginginit free_area_init_core(struct pglist_data *pgdat)
  5349. {
  5350. enum zone_type j;
  5351. int nid = pgdat->node_id;
  5352. pgdat_resize_init(pgdat);
  5353. #ifdef CONFIG_NUMA_BALANCING
  5354. spin_lock_init(&pgdat->numabalancing_migrate_lock);
  5355. pgdat->numabalancing_migrate_nr_pages = 0;
  5356. pgdat->numabalancing_migrate_next_window = jiffies;
  5357. #endif
  5358. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  5359. spin_lock_init(&pgdat->split_queue_lock);
  5360. INIT_LIST_HEAD(&pgdat->split_queue);
  5361. pgdat->split_queue_len = 0;
  5362. #endif
  5363. init_waitqueue_head(&pgdat->kswapd_wait);
  5364. init_waitqueue_head(&pgdat->pfmemalloc_wait);
  5365. #ifdef CONFIG_COMPACTION
  5366. init_waitqueue_head(&pgdat->kcompactd_wait);
  5367. #endif
  5368. pgdat_page_ext_init(pgdat);
  5369. spin_lock_init(&pgdat->lru_lock);
  5370. lruvec_init(node_lruvec(pgdat));
  5371. pgdat->per_cpu_nodestats = &boot_nodestats;
  5372. for (j = 0; j < MAX_NR_ZONES; j++) {
  5373. struct zone *zone = pgdat->node_zones + j;
  5374. unsigned long size, realsize, freesize, memmap_pages;
  5375. unsigned long zone_start_pfn = zone->zone_start_pfn;
  5376. size = zone->spanned_pages;
  5377. realsize = freesize = zone->present_pages;
  5378. /*
  5379. * Adjust freesize so that it accounts for how much memory
  5380. * is used by this zone for memmap. This affects the watermark
  5381. * and per-cpu initialisations
  5382. */
  5383. memmap_pages = calc_memmap_size(size, realsize);
  5384. if (!is_highmem_idx(j)) {
  5385. if (freesize >= memmap_pages) {
  5386. freesize -= memmap_pages;
  5387. if (memmap_pages)
  5388. printk(KERN_DEBUG
  5389. " %s zone: %lu pages used for memmap\n",
  5390. zone_names[j], memmap_pages);
  5391. } else
  5392. pr_warn(" %s zone: %lu pages exceeds freesize %lu\n",
  5393. zone_names[j], memmap_pages, freesize);
  5394. }
  5395. /* Account for reserved pages */
  5396. if (j == 0 && freesize > dma_reserve) {
  5397. freesize -= dma_reserve;
  5398. printk(KERN_DEBUG " %s zone: %lu pages reserved\n",
  5399. zone_names[0], dma_reserve);
  5400. }
  5401. if (!is_highmem_idx(j))
  5402. nr_kernel_pages += freesize;
  5403. /* Charge for highmem memmap if there are enough kernel pages */
  5404. else if (nr_kernel_pages > memmap_pages * 2)
  5405. nr_kernel_pages -= memmap_pages;
  5406. nr_all_pages += freesize;
  5407. /*
  5408. * Set an approximate value for lowmem here, it will be adjusted
  5409. * when the bootmem allocator frees pages into the buddy system.
  5410. * And all highmem pages will be managed by the buddy system.
  5411. */
  5412. zone->managed_pages = is_highmem_idx(j) ? realsize : freesize;
  5413. #ifdef CONFIG_NUMA
  5414. zone->node = nid;
  5415. #endif
  5416. zone->name = zone_names[j];
  5417. zone->zone_pgdat = pgdat;
  5418. spin_lock_init(&zone->lock);
  5419. zone_seqlock_init(zone);
  5420. zone_pcp_init(zone);
  5421. if (!size)
  5422. continue;
  5423. set_pageblock_order();
  5424. setup_usemap(pgdat, zone, zone_start_pfn, size);
  5425. init_currently_empty_zone(zone, zone_start_pfn, size);
  5426. memmap_init(size, nid, j, zone_start_pfn);
  5427. }
  5428. }
  5429. #ifdef CONFIG_FLAT_NODE_MEM_MAP
  5430. static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
  5431. {
  5432. unsigned long __maybe_unused start = 0;
  5433. unsigned long __maybe_unused offset = 0;
  5434. /* Skip empty nodes */
  5435. if (!pgdat->node_spanned_pages)
  5436. return;
  5437. start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
  5438. offset = pgdat->node_start_pfn - start;
  5439. /* ia64 gets its own node_mem_map, before this, without bootmem */
  5440. if (!pgdat->node_mem_map) {
  5441. unsigned long size, end;
  5442. struct page *map;
  5443. /*
  5444. * The zone's endpoints aren't required to be MAX_ORDER
  5445. * aligned but the node_mem_map endpoints must be in order
  5446. * for the buddy allocator to function correctly.
  5447. */
  5448. end = pgdat_end_pfn(pgdat);
  5449. end = ALIGN(end, MAX_ORDER_NR_PAGES);
  5450. size = (end - start) * sizeof(struct page);
  5451. map = memblock_virt_alloc_node_nopanic(size, pgdat->node_id);
  5452. pgdat->node_mem_map = map + offset;
  5453. }
  5454. pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
  5455. __func__, pgdat->node_id, (unsigned long)pgdat,
  5456. (unsigned long)pgdat->node_mem_map);
  5457. #ifndef CONFIG_NEED_MULTIPLE_NODES
  5458. /*
  5459. * With no DISCONTIG, the global mem_map is just set as node 0's
  5460. */
  5461. if (pgdat == NODE_DATA(0)) {
  5462. mem_map = NODE_DATA(0)->node_mem_map;
  5463. #if defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) || defined(CONFIG_FLATMEM)
  5464. if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
  5465. mem_map -= offset;
  5466. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5467. }
  5468. #endif
  5469. }
  5470. #else
  5471. static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
  5472. #endif /* CONFIG_FLAT_NODE_MEM_MAP */
  5473. void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
  5474. unsigned long node_start_pfn, unsigned long *zholes_size)
  5475. {
  5476. pg_data_t *pgdat = NODE_DATA(nid);
  5477. unsigned long start_pfn = 0;
  5478. unsigned long end_pfn = 0;
  5479. /* pg_data_t should be reset to zero when it's allocated */
  5480. WARN_ON(pgdat->nr_zones || pgdat->kswapd_classzone_idx);
  5481. pgdat->node_id = nid;
  5482. pgdat->node_start_pfn = node_start_pfn;
  5483. pgdat->per_cpu_nodestats = NULL;
  5484. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  5485. get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
  5486. pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
  5487. (u64)start_pfn << PAGE_SHIFT,
  5488. end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
  5489. #else
  5490. start_pfn = node_start_pfn;
  5491. #endif
  5492. calculate_node_totalpages(pgdat, start_pfn, end_pfn,
  5493. zones_size, zholes_size);
  5494. alloc_node_mem_map(pgdat);
  5495. #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
  5496. /*
  5497. * We start only with one section of pages, more pages are added as
  5498. * needed until the rest of deferred pages are initialized.
  5499. */
  5500. pgdat->static_init_pgcnt = min_t(unsigned long, PAGES_PER_SECTION,
  5501. pgdat->node_spanned_pages);
  5502. pgdat->first_deferred_pfn = ULONG_MAX;
  5503. #endif
  5504. free_area_init_core(pgdat);
  5505. }
  5506. #ifdef CONFIG_HAVE_MEMBLOCK
  5507. /*
  5508. * Only struct pages that are backed by physical memory are zeroed and
  5509. * initialized by going through __init_single_page(). But, there are some
  5510. * struct pages which are reserved in memblock allocator and their fields
  5511. * may be accessed (for example page_to_pfn() on some configuration accesses
  5512. * flags). We must explicitly zero those struct pages.
  5513. */
  5514. void __paginginit zero_resv_unavail(void)
  5515. {
  5516. phys_addr_t start, end;
  5517. unsigned long pfn;
  5518. u64 i, pgcnt;
  5519. /*
  5520. * Loop through ranges that are reserved, but do not have reported
  5521. * physical memory backing.
  5522. */
  5523. pgcnt = 0;
  5524. for_each_resv_unavail_range(i, &start, &end) {
  5525. for (pfn = PFN_DOWN(start); pfn < PFN_UP(end); pfn++) {
  5526. if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages)))
  5527. continue;
  5528. mm_zero_struct_page(pfn_to_page(pfn));
  5529. pgcnt++;
  5530. }
  5531. }
  5532. /*
  5533. * Struct pages that do not have backing memory. This could be because
  5534. * firmware is using some of this memory, or for some other reasons.
  5535. * Once memblock is changed so such behaviour is not allowed: i.e.
  5536. * list of "reserved" memory must be a subset of list of "memory", then
  5537. * this code can be removed.
  5538. */
  5539. if (pgcnt)
  5540. pr_info("Reserved but unavailable: %lld pages", pgcnt);
  5541. }
  5542. #endif /* CONFIG_HAVE_MEMBLOCK */
  5543. #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
  5544. #if MAX_NUMNODES > 1
  5545. /*
  5546. * Figure out the number of possible node ids.
  5547. */
  5548. void __init setup_nr_node_ids(void)
  5549. {
  5550. unsigned int highest;
  5551. highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
  5552. nr_node_ids = highest + 1;
  5553. }
  5554. #endif
  5555. /**
  5556. * node_map_pfn_alignment - determine the maximum internode alignment
  5557. *
  5558. * This function should be called after node map is populated and sorted.
  5559. * It calculates the maximum power of two alignment which can distinguish
  5560. * all the nodes.
  5561. *
  5562. * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
  5563. * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)). If the
  5564. * nodes are shifted by 256MiB, 256MiB. Note that if only the last node is
  5565. * shifted, 1GiB is enough and this function will indicate so.
  5566. *
  5567. * This is used to test whether pfn -> nid mapping of the chosen memory
  5568. * model has fine enough granularity to avoid incorrect mapping for the
  5569. * populated node map.
  5570. *
  5571. * Returns the determined alignment in pfn's. 0 if there is no alignment
  5572. * requirement (single node).
  5573. */
  5574. unsigned long __init node_map_pfn_alignment(void)
  5575. {
  5576. unsigned long accl_mask = 0, last_end = 0;
  5577. unsigned long start, end, mask;
  5578. int last_nid = -1;
  5579. int i, nid;
  5580. for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
  5581. if (!start || last_nid < 0 || last_nid == nid) {
  5582. last_nid = nid;
  5583. last_end = end;
  5584. continue;
  5585. }
  5586. /*
  5587. * Start with a mask granular enough to pin-point to the
  5588. * start pfn and tick off bits one-by-one until it becomes
  5589. * too coarse to separate the current node from the last.
  5590. */
  5591. mask = ~((1 << __ffs(start)) - 1);
  5592. while (mask && last_end <= (start & (mask << 1)))
  5593. mask <<= 1;
  5594. /* accumulate all internode masks */
  5595. accl_mask |= mask;
  5596. }
  5597. /* convert mask to number of pages */
  5598. return ~accl_mask + 1;
  5599. }
  5600. /* Find the lowest pfn for a node */
  5601. static unsigned long __init find_min_pfn_for_node(int nid)
  5602. {
  5603. unsigned long min_pfn = ULONG_MAX;
  5604. unsigned long start_pfn;
  5605. int i;
  5606. for_each_mem_pfn_range(i, nid, &start_pfn, NULL, NULL)
  5607. min_pfn = min(min_pfn, start_pfn);
  5608. if (min_pfn == ULONG_MAX) {
  5609. pr_warn("Could not find start_pfn for node %d\n", nid);
  5610. return 0;
  5611. }
  5612. return min_pfn;
  5613. }
  5614. /**
  5615. * find_min_pfn_with_active_regions - Find the minimum PFN registered
  5616. *
  5617. * It returns the minimum PFN based on information provided via
  5618. * memblock_set_node().
  5619. */
  5620. unsigned long __init find_min_pfn_with_active_regions(void)
  5621. {
  5622. return find_min_pfn_for_node(MAX_NUMNODES);
  5623. }
  5624. /*
  5625. * early_calculate_totalpages()
  5626. * Sum pages in active regions for movable zone.
  5627. * Populate N_MEMORY for calculating usable_nodes.
  5628. */
  5629. static unsigned long __init early_calculate_totalpages(void)
  5630. {
  5631. unsigned long totalpages = 0;
  5632. unsigned long start_pfn, end_pfn;
  5633. int i, nid;
  5634. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
  5635. unsigned long pages = end_pfn - start_pfn;
  5636. totalpages += pages;
  5637. if (pages)
  5638. node_set_state(nid, N_MEMORY);
  5639. }
  5640. return totalpages;
  5641. }
  5642. /*
  5643. * Find the PFN the Movable zone begins in each node. Kernel memory
  5644. * is spread evenly between nodes as long as the nodes have enough
  5645. * memory. When they don't, some nodes will have more kernelcore than
  5646. * others
  5647. */
  5648. static void __init find_zone_movable_pfns_for_nodes(void)
  5649. {
  5650. int i, nid;
  5651. unsigned long usable_startpfn;
  5652. unsigned long kernelcore_node, kernelcore_remaining;
  5653. /* save the state before borrow the nodemask */
  5654. nodemask_t saved_node_state = node_states[N_MEMORY];
  5655. unsigned long totalpages = early_calculate_totalpages();
  5656. int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  5657. struct memblock_region *r;
  5658. /* Need to find movable_zone earlier when movable_node is specified. */
  5659. find_usable_zone_for_movable();
  5660. /*
  5661. * If movable_node is specified, ignore kernelcore and movablecore
  5662. * options.
  5663. */
  5664. if (movable_node_is_enabled()) {
  5665. for_each_memblock(memory, r) {
  5666. if (!memblock_is_hotpluggable(r))
  5667. continue;
  5668. nid = r->nid;
  5669. usable_startpfn = PFN_DOWN(r->base);
  5670. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  5671. min(usable_startpfn, zone_movable_pfn[nid]) :
  5672. usable_startpfn;
  5673. }
  5674. goto out2;
  5675. }
  5676. /*
  5677. * If kernelcore=mirror is specified, ignore movablecore option
  5678. */
  5679. if (mirrored_kernelcore) {
  5680. bool mem_below_4gb_not_mirrored = false;
  5681. for_each_memblock(memory, r) {
  5682. if (memblock_is_mirror(r))
  5683. continue;
  5684. nid = r->nid;
  5685. usable_startpfn = memblock_region_memory_base_pfn(r);
  5686. if (usable_startpfn < 0x100000) {
  5687. mem_below_4gb_not_mirrored = true;
  5688. continue;
  5689. }
  5690. zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  5691. min(usable_startpfn, zone_movable_pfn[nid]) :
  5692. usable_startpfn;
  5693. }
  5694. if (mem_below_4gb_not_mirrored)
  5695. pr_warn("This configuration results in unmirrored kernel memory.");
  5696. goto out2;
  5697. }
  5698. /*
  5699. * If kernelcore=nn% or movablecore=nn% was specified, calculate the
  5700. * amount of necessary memory.
  5701. */
  5702. if (required_kernelcore_percent)
  5703. required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
  5704. 10000UL;
  5705. if (required_movablecore_percent)
  5706. required_movablecore = (totalpages * 100 * required_movablecore_percent) /
  5707. 10000UL;
  5708. /*
  5709. * If movablecore= was specified, calculate what size of
  5710. * kernelcore that corresponds so that memory usable for
  5711. * any allocation type is evenly spread. If both kernelcore
  5712. * and movablecore are specified, then the value of kernelcore
  5713. * will be used for required_kernelcore if it's greater than
  5714. * what movablecore would have allowed.
  5715. */
  5716. if (required_movablecore) {
  5717. unsigned long corepages;
  5718. /*
  5719. * Round-up so that ZONE_MOVABLE is at least as large as what
  5720. * was requested by the user
  5721. */
  5722. required_movablecore =
  5723. roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  5724. required_movablecore = min(totalpages, required_movablecore);
  5725. corepages = totalpages - required_movablecore;
  5726. required_kernelcore = max(required_kernelcore, corepages);
  5727. }
  5728. /*
  5729. * If kernelcore was not specified or kernelcore size is larger
  5730. * than totalpages, there is no ZONE_MOVABLE.
  5731. */
  5732. if (!required_kernelcore || required_kernelcore >= totalpages)
  5733. goto out;
  5734. /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  5735. usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  5736. restart:
  5737. /* Spread kernelcore memory as evenly as possible throughout nodes */
  5738. kernelcore_node = required_kernelcore / usable_nodes;
  5739. for_each_node_state(nid, N_MEMORY) {
  5740. unsigned long start_pfn, end_pfn;
  5741. /*
  5742. * Recalculate kernelcore_node if the division per node
  5743. * now exceeds what is necessary to satisfy the requested
  5744. * amount of memory for the kernel
  5745. */
  5746. if (required_kernelcore < kernelcore_node)
  5747. kernelcore_node = required_kernelcore / usable_nodes;
  5748. /*
  5749. * As the map is walked, we track how much memory is usable
  5750. * by the kernel using kernelcore_remaining. When it is
  5751. * 0, the rest of the node is usable by ZONE_MOVABLE
  5752. */
  5753. kernelcore_remaining = kernelcore_node;
  5754. /* Go through each range of PFNs within this node */
  5755. for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  5756. unsigned long size_pages;
  5757. start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  5758. if (start_pfn >= end_pfn)
  5759. continue;
  5760. /* Account for what is only usable for kernelcore */
  5761. if (start_pfn < usable_startpfn) {
  5762. unsigned long kernel_pages;
  5763. kernel_pages = min(end_pfn, usable_startpfn)
  5764. - start_pfn;
  5765. kernelcore_remaining -= min(kernel_pages,
  5766. kernelcore_remaining);
  5767. required_kernelcore -= min(kernel_pages,
  5768. required_kernelcore);
  5769. /* Continue if range is now fully accounted */
  5770. if (end_pfn <= usable_startpfn) {
  5771. /*
  5772. * Push zone_movable_pfn to the end so
  5773. * that if we have to rebalance
  5774. * kernelcore across nodes, we will
  5775. * not double account here
  5776. */
  5777. zone_movable_pfn[nid] = end_pfn;
  5778. continue;
  5779. }
  5780. start_pfn = usable_startpfn;
  5781. }
  5782. /*
  5783. * The usable PFN range for ZONE_MOVABLE is from
  5784. * start_pfn->end_pfn. Calculate size_pages as the
  5785. * number of pages used as kernelcore
  5786. */
  5787. size_pages = end_pfn - start_pfn;
  5788. if (size_pages > kernelcore_remaining)
  5789. size_pages = kernelcore_remaining;
  5790. zone_movable_pfn[nid] = start_pfn + size_pages;
  5791. /*
  5792. * Some kernelcore has been met, update counts and
  5793. * break if the kernelcore for this node has been
  5794. * satisfied
  5795. */
  5796. required_kernelcore -= min(required_kernelcore,
  5797. size_pages);
  5798. kernelcore_remaining -= size_pages;
  5799. if (!kernelcore_remaining)
  5800. break;
  5801. }
  5802. }
  5803. /*
  5804. * If there is still required_kernelcore, we do another pass with one
  5805. * less node in the count. This will push zone_movable_pfn[nid] further
  5806. * along on the nodes that still have memory until kernelcore is
  5807. * satisfied
  5808. */
  5809. usable_nodes--;
  5810. if (usable_nodes && required_kernelcore > usable_nodes)
  5811. goto restart;
  5812. out2:
  5813. /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  5814. for (nid = 0; nid < MAX_NUMNODES; nid++)
  5815. zone_movable_pfn[nid] =
  5816. roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  5817. out:
  5818. /* restore the node_state */
  5819. node_states[N_MEMORY] = saved_node_state;
  5820. }
  5821. /* Any regular or high memory on that node ? */
  5822. static void check_for_memory(pg_data_t *pgdat, int nid)
  5823. {
  5824. enum zone_type zone_type;
  5825. if (N_MEMORY == N_NORMAL_MEMORY)
  5826. return;
  5827. for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
  5828. struct zone *zone = &pgdat->node_zones[zone_type];
  5829. if (populated_zone(zone)) {
  5830. node_set_state(nid, N_HIGH_MEMORY);
  5831. if (N_NORMAL_MEMORY != N_HIGH_MEMORY &&
  5832. zone_type <= ZONE_NORMAL)
  5833. node_set_state(nid, N_NORMAL_MEMORY);
  5834. break;
  5835. }
  5836. }
  5837. }
  5838. /**
  5839. * free_area_init_nodes - Initialise all pg_data_t and zone data
  5840. * @max_zone_pfn: an array of max PFNs for each zone
  5841. *
  5842. * This will call free_area_init_node() for each active node in the system.
  5843. * Using the page ranges provided by memblock_set_node(), the size of each
  5844. * zone in each node and their holes is calculated. If the maximum PFN
  5845. * between two adjacent zones match, it is assumed that the zone is empty.
  5846. * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  5847. * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  5848. * starts where the previous one ended. For example, ZONE_DMA32 starts
  5849. * at arch_max_dma_pfn.
  5850. */
  5851. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  5852. {
  5853. unsigned long start_pfn, end_pfn;
  5854. int i, nid;
  5855. /* Record where the zone boundaries are */
  5856. memset(arch_zone_lowest_possible_pfn, 0,
  5857. sizeof(arch_zone_lowest_possible_pfn));
  5858. memset(arch_zone_highest_possible_pfn, 0,
  5859. sizeof(arch_zone_highest_possible_pfn));
  5860. start_pfn = find_min_pfn_with_active_regions();
  5861. for (i = 0; i < MAX_NR_ZONES; i++) {
  5862. if (i == ZONE_MOVABLE)
  5863. continue;
  5864. end_pfn = max(max_zone_pfn[i], start_pfn);
  5865. arch_zone_lowest_possible_pfn[i] = start_pfn;
  5866. arch_zone_highest_possible_pfn[i] = end_pfn;
  5867. start_pfn = end_pfn;
  5868. }
  5869. /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  5870. memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  5871. find_zone_movable_pfns_for_nodes();
  5872. /* Print out the zone ranges */
  5873. pr_info("Zone ranges:\n");
  5874. for (i = 0; i < MAX_NR_ZONES; i++) {
  5875. if (i == ZONE_MOVABLE)
  5876. continue;
  5877. pr_info(" %-8s ", zone_names[i]);
  5878. if (arch_zone_lowest_possible_pfn[i] ==
  5879. arch_zone_highest_possible_pfn[i])
  5880. pr_cont("empty\n");
  5881. else
  5882. pr_cont("[mem %#018Lx-%#018Lx]\n",
  5883. (u64)arch_zone_lowest_possible_pfn[i]
  5884. << PAGE_SHIFT,
  5885. ((u64)arch_zone_highest_possible_pfn[i]
  5886. << PAGE_SHIFT) - 1);
  5887. }
  5888. /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  5889. pr_info("Movable zone start for each node\n");
  5890. for (i = 0; i < MAX_NUMNODES; i++) {
  5891. if (zone_movable_pfn[i])
  5892. pr_info(" Node %d: %#018Lx\n", i,
  5893. (u64)zone_movable_pfn[i] << PAGE_SHIFT);
  5894. }
  5895. /* Print out the early node map */
  5896. pr_info("Early memory node ranges\n");
  5897. for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  5898. pr_info(" node %3d: [mem %#018Lx-%#018Lx]\n", nid,
  5899. (u64)start_pfn << PAGE_SHIFT,
  5900. ((u64)end_pfn << PAGE_SHIFT) - 1);
  5901. /* Initialise every node */
  5902. mminit_verify_pageflags_layout();
  5903. setup_nr_node_ids();
  5904. for_each_online_node(nid) {
  5905. pg_data_t *pgdat = NODE_DATA(nid);
  5906. free_area_init_node(nid, NULL,
  5907. find_min_pfn_for_node(nid), NULL);
  5908. /* Any memory on that node */
  5909. if (pgdat->node_present_pages)
  5910. node_set_state(nid, N_MEMORY);
  5911. check_for_memory(pgdat, nid);
  5912. }
  5913. zero_resv_unavail();
  5914. }
  5915. static int __init cmdline_parse_core(char *p, unsigned long *core,
  5916. unsigned long *percent)
  5917. {
  5918. unsigned long long coremem;
  5919. char *endptr;
  5920. if (!p)
  5921. return -EINVAL;
  5922. /* Value may be a percentage of total memory, otherwise bytes */
  5923. coremem = simple_strtoull(p, &endptr, 0);
  5924. if (*endptr == '%') {
  5925. /* Paranoid check for percent values greater than 100 */
  5926. WARN_ON(coremem > 100);
  5927. *percent = coremem;
  5928. } else {
  5929. coremem = memparse(p, &p);
  5930. /* Paranoid check that UL is enough for the coremem value */
  5931. WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
  5932. *core = coremem >> PAGE_SHIFT;
  5933. *percent = 0UL;
  5934. }
  5935. return 0;
  5936. }
  5937. /*
  5938. * kernelcore=size sets the amount of memory for use for allocations that
  5939. * cannot be reclaimed or migrated.
  5940. */
  5941. static int __init cmdline_parse_kernelcore(char *p)
  5942. {
  5943. /* parse kernelcore=mirror */
  5944. if (parse_option_str(p, "mirror")) {
  5945. mirrored_kernelcore = true;
  5946. return 0;
  5947. }
  5948. return cmdline_parse_core(p, &required_kernelcore,
  5949. &required_kernelcore_percent);
  5950. }
  5951. /*
  5952. * movablecore=size sets the amount of memory for use for allocations that
  5953. * can be reclaimed or migrated.
  5954. */
  5955. static int __init cmdline_parse_movablecore(char *p)
  5956. {
  5957. return cmdline_parse_core(p, &required_movablecore,
  5958. &required_movablecore_percent);
  5959. }
  5960. early_param("kernelcore", cmdline_parse_kernelcore);
  5961. early_param("movablecore", cmdline_parse_movablecore);
  5962. #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
  5963. void adjust_managed_page_count(struct page *page, long count)
  5964. {
  5965. spin_lock(&managed_page_count_lock);
  5966. page_zone(page)->managed_pages += count;
  5967. totalram_pages += count;
  5968. #ifdef CONFIG_HIGHMEM
  5969. if (PageHighMem(page))
  5970. totalhigh_pages += count;
  5971. #endif
  5972. spin_unlock(&managed_page_count_lock);
  5973. }
  5974. EXPORT_SYMBOL(adjust_managed_page_count);
  5975. unsigned long free_reserved_area(void *start, void *end, int poison, char *s)
  5976. {
  5977. void *pos;
  5978. unsigned long pages = 0;
  5979. start = (void *)PAGE_ALIGN((unsigned long)start);
  5980. end = (void *)((unsigned long)end & PAGE_MASK);
  5981. for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
  5982. if ((unsigned int)poison <= 0xFF)
  5983. memset(pos, poison, PAGE_SIZE);
  5984. free_reserved_page(virt_to_page(pos));
  5985. }
  5986. if (pages && s)
  5987. pr_info("Freeing %s memory: %ldK\n",
  5988. s, pages << (PAGE_SHIFT - 10));
  5989. return pages;
  5990. }
  5991. EXPORT_SYMBOL(free_reserved_area);
  5992. #ifdef CONFIG_HIGHMEM
  5993. void free_highmem_page(struct page *page)
  5994. {
  5995. __free_reserved_page(page);
  5996. totalram_pages++;
  5997. page_zone(page)->managed_pages++;
  5998. totalhigh_pages++;
  5999. }
  6000. #endif
  6001. void __init mem_init_print_info(const char *str)
  6002. {
  6003. unsigned long physpages, codesize, datasize, rosize, bss_size;
  6004. unsigned long init_code_size, init_data_size;
  6005. physpages = get_num_physpages();
  6006. codesize = _etext - _stext;
  6007. datasize = _edata - _sdata;
  6008. rosize = __end_rodata - __start_rodata;
  6009. bss_size = __bss_stop - __bss_start;
  6010. init_data_size = __init_end - __init_begin;
  6011. init_code_size = _einittext - _sinittext;
  6012. /*
  6013. * Detect special cases and adjust section sizes accordingly:
  6014. * 1) .init.* may be embedded into .data sections
  6015. * 2) .init.text.* may be out of [__init_begin, __init_end],
  6016. * please refer to arch/tile/kernel/vmlinux.lds.S.
  6017. * 3) .rodata.* may be embedded into .text or .data sections.
  6018. */
  6019. #define adj_init_size(start, end, size, pos, adj) \
  6020. do { \
  6021. if (start <= pos && pos < end && size > adj) \
  6022. size -= adj; \
  6023. } while (0)
  6024. adj_init_size(__init_begin, __init_end, init_data_size,
  6025. _sinittext, init_code_size);
  6026. adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
  6027. adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
  6028. adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
  6029. adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);
  6030. #undef adj_init_size
  6031. pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
  6032. #ifdef CONFIG_HIGHMEM
  6033. ", %luK highmem"
  6034. #endif
  6035. "%s%s)\n",
  6036. nr_free_pages() << (PAGE_SHIFT - 10),
  6037. physpages << (PAGE_SHIFT - 10),
  6038. codesize >> 10, datasize >> 10, rosize >> 10,
  6039. (init_data_size + init_code_size) >> 10, bss_size >> 10,
  6040. (physpages - totalram_pages - totalcma_pages) << (PAGE_SHIFT - 10),
  6041. totalcma_pages << (PAGE_SHIFT - 10),
  6042. #ifdef CONFIG_HIGHMEM
  6043. totalhigh_pages << (PAGE_SHIFT - 10),
  6044. #endif
  6045. str ? ", " : "", str ? str : "");
  6046. }
  6047. /**
  6048. * set_dma_reserve - set the specified number of pages reserved in the first zone
  6049. * @new_dma_reserve: The number of pages to mark reserved
  6050. *
  6051. * The per-cpu batchsize and zone watermarks are determined by managed_pages.
  6052. * In the DMA zone, a significant percentage may be consumed by kernel image
  6053. * and other unfreeable allocations which can skew the watermarks badly. This
  6054. * function may optionally be used to account for unfreeable pages in the
  6055. * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
  6056. * smaller per-cpu batchsize.
  6057. */
  6058. void __init set_dma_reserve(unsigned long new_dma_reserve)
  6059. {
  6060. dma_reserve = new_dma_reserve;
  6061. }
  6062. void __init free_area_init(unsigned long *zones_size)
  6063. {
  6064. free_area_init_node(0, zones_size,
  6065. __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
  6066. zero_resv_unavail();
  6067. }
  6068. static int page_alloc_cpu_dead(unsigned int cpu)
  6069. {
  6070. lru_add_drain_cpu(cpu);
  6071. drain_pages(cpu);
  6072. /*
  6073. * Spill the event counters of the dead processor
  6074. * into the current processors event counters.
  6075. * This artificially elevates the count of the current
  6076. * processor.
  6077. */
  6078. vm_events_fold_cpu(cpu);
  6079. /*
  6080. * Zero the differential counters of the dead processor
  6081. * so that the vm statistics are consistent.
  6082. *
  6083. * This is only okay since the processor is dead and cannot
  6084. * race with what we are doing.
  6085. */
  6086. cpu_vm_stats_fold(cpu);
  6087. return 0;
  6088. }
  6089. void __init page_alloc_init(void)
  6090. {
  6091. int ret;
  6092. ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
  6093. "mm/page_alloc:dead", NULL,
  6094. page_alloc_cpu_dead);
  6095. WARN_ON(ret < 0);
  6096. }
  6097. /*
  6098. * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
  6099. * or min_free_kbytes changes.
  6100. */
  6101. static void calculate_totalreserve_pages(void)
  6102. {
  6103. struct pglist_data *pgdat;
  6104. unsigned long reserve_pages = 0;
  6105. enum zone_type i, j;
  6106. for_each_online_pgdat(pgdat) {
  6107. pgdat->totalreserve_pages = 0;
  6108. for (i = 0; i < MAX_NR_ZONES; i++) {
  6109. struct zone *zone = pgdat->node_zones + i;
  6110. long max = 0;
  6111. /* Find valid and maximum lowmem_reserve in the zone */
  6112. for (j = i; j < MAX_NR_ZONES; j++) {
  6113. if (zone->lowmem_reserve[j] > max)
  6114. max = zone->lowmem_reserve[j];
  6115. }
  6116. /* we treat the high watermark as reserved pages. */
  6117. max += high_wmark_pages(zone);
  6118. if (max > zone->managed_pages)
  6119. max = zone->managed_pages;
  6120. pgdat->totalreserve_pages += max;
  6121. reserve_pages += max;
  6122. }
  6123. }
  6124. totalreserve_pages = reserve_pages;
  6125. }
  6126. /*
  6127. * setup_per_zone_lowmem_reserve - called whenever
  6128. * sysctl_lowmem_reserve_ratio changes. Ensures that each zone
  6129. * has a correct pages reserved value, so an adequate number of
  6130. * pages are left in the zone after a successful __alloc_pages().
  6131. */
  6132. static void setup_per_zone_lowmem_reserve(void)
  6133. {
  6134. struct pglist_data *pgdat;
  6135. enum zone_type j, idx;
  6136. for_each_online_pgdat(pgdat) {
  6137. for (j = 0; j < MAX_NR_ZONES; j++) {
  6138. struct zone *zone = pgdat->node_zones + j;
  6139. unsigned long managed_pages = zone->managed_pages;
  6140. zone->lowmem_reserve[j] = 0;
  6141. idx = j;
  6142. while (idx) {
  6143. struct zone *lower_zone;
  6144. idx--;
  6145. lower_zone = pgdat->node_zones + idx;
  6146. if (sysctl_lowmem_reserve_ratio[idx] < 1) {
  6147. sysctl_lowmem_reserve_ratio[idx] = 0;
  6148. lower_zone->lowmem_reserve[j] = 0;
  6149. } else {
  6150. lower_zone->lowmem_reserve[j] =
  6151. managed_pages / sysctl_lowmem_reserve_ratio[idx];
  6152. }
  6153. managed_pages += lower_zone->managed_pages;
  6154. }
  6155. }
  6156. }
  6157. /* update totalreserve_pages */
  6158. calculate_totalreserve_pages();
  6159. }
  6160. static void __setup_per_zone_wmarks(void)
  6161. {
  6162. unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
  6163. unsigned long lowmem_pages = 0;
  6164. struct zone *zone;
  6165. unsigned long flags;
  6166. /* Calculate total number of !ZONE_HIGHMEM pages */
  6167. for_each_zone(zone) {
  6168. if (!is_highmem(zone))
  6169. lowmem_pages += zone->managed_pages;
  6170. }
  6171. for_each_zone(zone) {
  6172. u64 tmp;
  6173. spin_lock_irqsave(&zone->lock, flags);
  6174. tmp = (u64)pages_min * zone->managed_pages;
  6175. do_div(tmp, lowmem_pages);
  6176. if (is_highmem(zone)) {
  6177. /*
  6178. * __GFP_HIGH and PF_MEMALLOC allocations usually don't
  6179. * need highmem pages, so cap pages_min to a small
  6180. * value here.
  6181. *
  6182. * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
  6183. * deltas control asynch page reclaim, and so should
  6184. * not be capped for highmem.
  6185. */
  6186. unsigned long min_pages;
  6187. min_pages = zone->managed_pages / 1024;
  6188. min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
  6189. zone->watermark[WMARK_MIN] = min_pages;
  6190. } else {
  6191. /*
  6192. * If it's a lowmem zone, reserve a number of pages
  6193. * proportionate to the zone's size.
  6194. */
  6195. zone->watermark[WMARK_MIN] = tmp;
  6196. }
  6197. /*
  6198. * Set the kswapd watermarks distance according to the
  6199. * scale factor in proportion to available memory, but
  6200. * ensure a minimum size on small systems.
  6201. */
  6202. tmp = max_t(u64, tmp >> 2,
  6203. mult_frac(zone->managed_pages,
  6204. watermark_scale_factor, 10000));
  6205. zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp;
  6206. zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
  6207. spin_unlock_irqrestore(&zone->lock, flags);
  6208. }
  6209. /* update totalreserve_pages */
  6210. calculate_totalreserve_pages();
  6211. }
  6212. /**
  6213. * setup_per_zone_wmarks - called when min_free_kbytes changes
  6214. * or when memory is hot-{added|removed}
  6215. *
  6216. * Ensures that the watermark[min,low,high] values for each zone are set
  6217. * correctly with respect to min_free_kbytes.
  6218. */
  6219. void setup_per_zone_wmarks(void)
  6220. {
  6221. static DEFINE_SPINLOCK(lock);
  6222. spin_lock(&lock);
  6223. __setup_per_zone_wmarks();
  6224. spin_unlock(&lock);
  6225. }
  6226. /*
  6227. * Initialise min_free_kbytes.
  6228. *
  6229. * For small machines we want it small (128k min). For large machines
  6230. * we want it large (64MB max). But it is not linear, because network
  6231. * bandwidth does not increase linearly with machine size. We use
  6232. *
  6233. * min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
  6234. * min_free_kbytes = sqrt(lowmem_kbytes * 16)
  6235. *
  6236. * which yields
  6237. *
  6238. * 16MB: 512k
  6239. * 32MB: 724k
  6240. * 64MB: 1024k
  6241. * 128MB: 1448k
  6242. * 256MB: 2048k
  6243. * 512MB: 2896k
  6244. * 1024MB: 4096k
  6245. * 2048MB: 5792k
  6246. * 4096MB: 8192k
  6247. * 8192MB: 11584k
  6248. * 16384MB: 16384k
  6249. */
  6250. int __meminit init_per_zone_wmark_min(void)
  6251. {
  6252. unsigned long lowmem_kbytes;
  6253. int new_min_free_kbytes;
  6254. lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
  6255. new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);
  6256. if (new_min_free_kbytes > user_min_free_kbytes) {
  6257. min_free_kbytes = new_min_free_kbytes;
  6258. if (min_free_kbytes < 128)
  6259. min_free_kbytes = 128;
  6260. if (min_free_kbytes > 65536)
  6261. min_free_kbytes = 65536;
  6262. } else {
  6263. pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
  6264. new_min_free_kbytes, user_min_free_kbytes);
  6265. }
  6266. setup_per_zone_wmarks();
  6267. refresh_zone_stat_thresholds();
  6268. setup_per_zone_lowmem_reserve();
  6269. #ifdef CONFIG_NUMA
  6270. setup_min_unmapped_ratio();
  6271. setup_min_slab_ratio();
  6272. #endif
  6273. return 0;
  6274. }
  6275. core_initcall(init_per_zone_wmark_min)
  6276. /*
  6277. * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
  6278. * that we can call two helper functions whenever min_free_kbytes
  6279. * changes.
  6280. */
  6281. int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
  6282. void __user *buffer, size_t *length, loff_t *ppos)
  6283. {
  6284. int rc;
  6285. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6286. if (rc)
  6287. return rc;
  6288. if (write) {
  6289. user_min_free_kbytes = min_free_kbytes;
  6290. setup_per_zone_wmarks();
  6291. }
  6292. return 0;
  6293. }
  6294. int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
  6295. void __user *buffer, size_t *length, loff_t *ppos)
  6296. {
  6297. int rc;
  6298. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6299. if (rc)
  6300. return rc;
  6301. if (write)
  6302. setup_per_zone_wmarks();
  6303. return 0;
  6304. }
  6305. #ifdef CONFIG_NUMA
  6306. static void setup_min_unmapped_ratio(void)
  6307. {
  6308. pg_data_t *pgdat;
  6309. struct zone *zone;
  6310. for_each_online_pgdat(pgdat)
  6311. pgdat->min_unmapped_pages = 0;
  6312. for_each_zone(zone)
  6313. zone->zone_pgdat->min_unmapped_pages += (zone->managed_pages *
  6314. sysctl_min_unmapped_ratio) / 100;
  6315. }
  6316. int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
  6317. void __user *buffer, size_t *length, loff_t *ppos)
  6318. {
  6319. int rc;
  6320. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6321. if (rc)
  6322. return rc;
  6323. setup_min_unmapped_ratio();
  6324. return 0;
  6325. }
  6326. static void setup_min_slab_ratio(void)
  6327. {
  6328. pg_data_t *pgdat;
  6329. struct zone *zone;
  6330. for_each_online_pgdat(pgdat)
  6331. pgdat->min_slab_pages = 0;
  6332. for_each_zone(zone)
  6333. zone->zone_pgdat->min_slab_pages += (zone->managed_pages *
  6334. sysctl_min_slab_ratio) / 100;
  6335. }
  6336. int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
  6337. void __user *buffer, size_t *length, loff_t *ppos)
  6338. {
  6339. int rc;
  6340. rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6341. if (rc)
  6342. return rc;
  6343. setup_min_slab_ratio();
  6344. return 0;
  6345. }
  6346. #endif
  6347. /*
  6348. * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
  6349. * proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
  6350. * whenever sysctl_lowmem_reserve_ratio changes.
  6351. *
  6352. * The reserve ratio obviously has absolutely no relation with the
  6353. * minimum watermarks. The lowmem reserve ratio can only make sense
  6354. * if in function of the boot time zone sizes.
  6355. */
  6356. int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
  6357. void __user *buffer, size_t *length, loff_t *ppos)
  6358. {
  6359. proc_dointvec_minmax(table, write, buffer, length, ppos);
  6360. setup_per_zone_lowmem_reserve();
  6361. return 0;
  6362. }
  6363. /*
  6364. * percpu_pagelist_fraction - changes the pcp->high for each zone on each
  6365. * cpu. It is the fraction of total pages in each zone that a hot per cpu
  6366. * pagelist can have before it gets flushed back to buddy allocator.
  6367. */
  6368. int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
  6369. void __user *buffer, size_t *length, loff_t *ppos)
  6370. {
  6371. struct zone *zone;
  6372. int old_percpu_pagelist_fraction;
  6373. int ret;
  6374. mutex_lock(&pcp_batch_high_lock);
  6375. old_percpu_pagelist_fraction = percpu_pagelist_fraction;
  6376. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  6377. if (!write || ret < 0)
  6378. goto out;
  6379. /* Sanity checking to avoid pcp imbalance */
  6380. if (percpu_pagelist_fraction &&
  6381. percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
  6382. percpu_pagelist_fraction = old_percpu_pagelist_fraction;
  6383. ret = -EINVAL;
  6384. goto out;
  6385. }
  6386. /* No change? */
  6387. if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
  6388. goto out;
  6389. for_each_populated_zone(zone) {
  6390. unsigned int cpu;
  6391. for_each_possible_cpu(cpu)
  6392. pageset_set_high_and_batch(zone,
  6393. per_cpu_ptr(zone->pageset, cpu));
  6394. }
  6395. out:
  6396. mutex_unlock(&pcp_batch_high_lock);
  6397. return ret;
  6398. }
  6399. #ifdef CONFIG_NUMA
  6400. int hashdist = HASHDIST_DEFAULT;
  6401. static int __init set_hashdist(char *str)
  6402. {
  6403. if (!str)
  6404. return 0;
  6405. hashdist = simple_strtoul(str, &str, 0);
  6406. return 1;
  6407. }
  6408. __setup("hashdist=", set_hashdist);
  6409. #endif
  6410. #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
  6411. /*
  6412. * Returns the number of pages that arch has reserved but
  6413. * is not known to alloc_large_system_hash().
  6414. */
  6415. static unsigned long __init arch_reserved_kernel_pages(void)
  6416. {
  6417. return 0;
  6418. }
  6419. #endif
  6420. /*
  6421. * Adaptive scale is meant to reduce sizes of hash tables on large memory
  6422. * machines. As memory size is increased the scale is also increased but at
  6423. * slower pace. Starting from ADAPT_SCALE_BASE (64G), every time memory
  6424. * quadruples the scale is increased by one, which means the size of hash table
  6425. * only doubles, instead of quadrupling as well.
  6426. * Because 32-bit systems cannot have large physical memory, where this scaling
  6427. * makes sense, it is disabled on such platforms.
  6428. */
  6429. #if __BITS_PER_LONG > 32
  6430. #define ADAPT_SCALE_BASE (64ul << 30)
  6431. #define ADAPT_SCALE_SHIFT 2
  6432. #define ADAPT_SCALE_NPAGES (ADAPT_SCALE_BASE >> PAGE_SHIFT)
  6433. #endif
  6434. /*
  6435. * allocate a large system hash table from bootmem
  6436. * - it is assumed that the hash table must contain an exact power-of-2
  6437. * quantity of entries
  6438. * - limit is the number of hash buckets, not the total allocation size
  6439. */
  6440. void *__init alloc_large_system_hash(const char *tablename,
  6441. unsigned long bucketsize,
  6442. unsigned long numentries,
  6443. int scale,
  6444. int flags,
  6445. unsigned int *_hash_shift,
  6446. unsigned int *_hash_mask,
  6447. unsigned long low_limit,
  6448. unsigned long high_limit)
  6449. {
  6450. unsigned long long max = high_limit;
  6451. unsigned long log2qty, size;
  6452. void *table = NULL;
  6453. gfp_t gfp_flags;
  6454. /* allow the kernel cmdline to have a say */
  6455. if (!numentries) {
  6456. /* round applicable memory size up to nearest megabyte */
  6457. numentries = nr_kernel_pages;
  6458. numentries -= arch_reserved_kernel_pages();
  6459. /* It isn't necessary when PAGE_SIZE >= 1MB */
  6460. if (PAGE_SHIFT < 20)
  6461. numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
  6462. #if __BITS_PER_LONG > 32
  6463. if (!high_limit) {
  6464. unsigned long adapt;
  6465. for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
  6466. adapt <<= ADAPT_SCALE_SHIFT)
  6467. scale++;
  6468. }
  6469. #endif
  6470. /* limit to 1 bucket per 2^scale bytes of low memory */
  6471. if (scale > PAGE_SHIFT)
  6472. numentries >>= (scale - PAGE_SHIFT);
  6473. else
  6474. numentries <<= (PAGE_SHIFT - scale);
  6475. /* Make sure we've got at least a 0-order allocation.. */
  6476. if (unlikely(flags & HASH_SMALL)) {
  6477. /* Makes no sense without HASH_EARLY */
  6478. WARN_ON(!(flags & HASH_EARLY));
  6479. if (!(numentries >> *_hash_shift)) {
  6480. numentries = 1UL << *_hash_shift;
  6481. BUG_ON(!numentries);
  6482. }
  6483. } else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
  6484. numentries = PAGE_SIZE / bucketsize;
  6485. }
  6486. numentries = roundup_pow_of_two(numentries);
  6487. /* limit allocation size to 1/16 total memory by default */
  6488. if (max == 0) {
  6489. max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
  6490. do_div(max, bucketsize);
  6491. }
  6492. max = min(max, 0x80000000ULL);
  6493. if (numentries < low_limit)
  6494. numentries = low_limit;
  6495. if (numentries > max)
  6496. numentries = max;
  6497. log2qty = ilog2(numentries);
  6498. gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
  6499. do {
  6500. size = bucketsize << log2qty;
  6501. if (flags & HASH_EARLY) {
  6502. if (flags & HASH_ZERO)
  6503. table = memblock_virt_alloc_nopanic(size, 0);
  6504. else
  6505. table = memblock_virt_alloc_raw(size, 0);
  6506. } else if (hashdist) {
  6507. table = __vmalloc(size, gfp_flags, PAGE_KERNEL);
  6508. } else {
  6509. /*
  6510. * If bucketsize is not a power-of-two, we may free
  6511. * some pages at the end of hash table which
  6512. * alloc_pages_exact() automatically does
  6513. */
  6514. if (get_order(size) < MAX_ORDER) {
  6515. table = alloc_pages_exact(size, gfp_flags);
  6516. kmemleak_alloc(table, size, 1, gfp_flags);
  6517. }
  6518. }
  6519. } while (!table && size > PAGE_SIZE && --log2qty);
  6520. if (!table)
  6521. panic("Failed to allocate %s hash table\n", tablename);
  6522. pr_info("%s hash table entries: %ld (order: %d, %lu bytes)\n",
  6523. tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size);
  6524. if (_hash_shift)
  6525. *_hash_shift = log2qty;
  6526. if (_hash_mask)
  6527. *_hash_mask = (1 << log2qty) - 1;
  6528. return table;
  6529. }
  6530. /*
  6531. * This function checks whether pageblock includes unmovable pages or not.
  6532. * If @count is not zero, it is okay to include less @count unmovable pages
  6533. *
  6534. * PageLRU check without isolation or lru_lock could race so that
  6535. * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
  6536. * check without lock_page also may miss some movable non-lru pages at
  6537. * race condition. So you can't expect this function should be exact.
  6538. */
  6539. bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
  6540. int migratetype,
  6541. bool skip_hwpoisoned_pages)
  6542. {
  6543. unsigned long pfn, iter, found;
  6544. /*
  6545. * TODO we could make this much more efficient by not checking every
  6546. * page in the range if we know all of them are in MOVABLE_ZONE and
  6547. * that the movable zone guarantees that pages are migratable but
  6548. * the later is not the case right now unfortunatelly. E.g. movablecore
  6549. * can still lead to having bootmem allocations in zone_movable.
  6550. */
  6551. /*
  6552. * CMA allocations (alloc_contig_range) really need to mark isolate
  6553. * CMA pageblocks even when they are not movable in fact so consider
  6554. * them movable here.
  6555. */
  6556. if (is_migrate_cma(migratetype) &&
  6557. is_migrate_cma(get_pageblock_migratetype(page)))
  6558. return false;
  6559. pfn = page_to_pfn(page);
  6560. for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
  6561. unsigned long check = pfn + iter;
  6562. if (!pfn_valid_within(check))
  6563. continue;
  6564. page = pfn_to_page(check);
  6565. if (PageReserved(page))
  6566. goto unmovable;
  6567. /*
  6568. * Hugepages are not in LRU lists, but they're movable.
  6569. * We need not scan over tail pages bacause we don't
  6570. * handle each tail page individually in migration.
  6571. */
  6572. if (PageHuge(page)) {
  6573. iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
  6574. continue;
  6575. }
  6576. /*
  6577. * We can't use page_count without pin a page
  6578. * because another CPU can free compound page.
  6579. * This check already skips compound tails of THP
  6580. * because their page->_refcount is zero at all time.
  6581. */
  6582. if (!page_ref_count(page)) {
  6583. if (PageBuddy(page))
  6584. iter += (1 << page_order(page)) - 1;
  6585. continue;
  6586. }
  6587. /*
  6588. * The HWPoisoned page may be not in buddy system, and
  6589. * page_count() is not 0.
  6590. */
  6591. if (skip_hwpoisoned_pages && PageHWPoison(page))
  6592. continue;
  6593. if (__PageMovable(page))
  6594. continue;
  6595. if (!PageLRU(page))
  6596. found++;
  6597. /*
  6598. * If there are RECLAIMABLE pages, we need to check
  6599. * it. But now, memory offline itself doesn't call
  6600. * shrink_node_slabs() and it still to be fixed.
  6601. */
  6602. /*
  6603. * If the page is not RAM, page_count()should be 0.
  6604. * we don't need more check. This is an _used_ not-movable page.
  6605. *
  6606. * The problematic thing here is PG_reserved pages. PG_reserved
  6607. * is set to both of a memory hole page and a _used_ kernel
  6608. * page at boot.
  6609. */
  6610. if (found > count)
  6611. goto unmovable;
  6612. }
  6613. return false;
  6614. unmovable:
  6615. WARN_ON_ONCE(zone_idx(zone) == ZONE_MOVABLE);
  6616. return true;
  6617. }
  6618. bool is_pageblock_removable_nolock(struct page *page)
  6619. {
  6620. struct zone *zone;
  6621. unsigned long pfn;
  6622. /*
  6623. * We have to be careful here because we are iterating over memory
  6624. * sections which are not zone aware so we might end up outside of
  6625. * the zone but still within the section.
  6626. * We have to take care about the node as well. If the node is offline
  6627. * its NODE_DATA will be NULL - see page_zone.
  6628. */
  6629. if (!node_online(page_to_nid(page)))
  6630. return false;
  6631. zone = page_zone(page);
  6632. pfn = page_to_pfn(page);
  6633. if (!zone_spans_pfn(zone, pfn))
  6634. return false;
  6635. return !has_unmovable_pages(zone, page, 0, MIGRATE_MOVABLE, true);
  6636. }
  6637. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  6638. static unsigned long pfn_max_align_down(unsigned long pfn)
  6639. {
  6640. return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
  6641. pageblock_nr_pages) - 1);
  6642. }
  6643. static unsigned long pfn_max_align_up(unsigned long pfn)
  6644. {
  6645. return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
  6646. pageblock_nr_pages));
  6647. }
  6648. /* [start, end) must belong to a single zone. */
  6649. static int __alloc_contig_migrate_range(struct compact_control *cc,
  6650. unsigned long start, unsigned long end)
  6651. {
  6652. /* This function is based on compact_zone() from compaction.c. */
  6653. unsigned long nr_reclaimed;
  6654. unsigned long pfn = start;
  6655. unsigned int tries = 0;
  6656. int ret = 0;
  6657. migrate_prep();
  6658. while (pfn < end || !list_empty(&cc->migratepages)) {
  6659. if (fatal_signal_pending(current)) {
  6660. ret = -EINTR;
  6661. break;
  6662. }
  6663. if (list_empty(&cc->migratepages)) {
  6664. cc->nr_migratepages = 0;
  6665. pfn = isolate_migratepages_range(cc, pfn, end);
  6666. if (!pfn) {
  6667. ret = -EINTR;
  6668. break;
  6669. }
  6670. tries = 0;
  6671. } else if (++tries == 5) {
  6672. ret = ret < 0 ? ret : -EBUSY;
  6673. break;
  6674. }
  6675. nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
  6676. &cc->migratepages);
  6677. cc->nr_migratepages -= nr_reclaimed;
  6678. ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
  6679. NULL, 0, cc->mode, MR_CONTIG_RANGE);
  6680. }
  6681. if (ret < 0) {
  6682. putback_movable_pages(&cc->migratepages);
  6683. return ret;
  6684. }
  6685. return 0;
  6686. }
  6687. /**
  6688. * alloc_contig_range() -- tries to allocate given range of pages
  6689. * @start: start PFN to allocate
  6690. * @end: one-past-the-last PFN to allocate
  6691. * @migratetype: migratetype of the underlaying pageblocks (either
  6692. * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
  6693. * in range must have the same migratetype and it must
  6694. * be either of the two.
  6695. * @gfp_mask: GFP mask to use during compaction
  6696. *
  6697. * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
  6698. * aligned. The PFN range must belong to a single zone.
  6699. *
  6700. * The first thing this routine does is attempt to MIGRATE_ISOLATE all
  6701. * pageblocks in the range. Once isolated, the pageblocks should not
  6702. * be modified by others.
  6703. *
  6704. * Returns zero on success or negative error code. On success all
  6705. * pages which PFN is in [start, end) are allocated for the caller and
  6706. * need to be freed with free_contig_range().
  6707. */
  6708. int alloc_contig_range(unsigned long start, unsigned long end,
  6709. unsigned migratetype, gfp_t gfp_mask)
  6710. {
  6711. unsigned long outer_start, outer_end;
  6712. unsigned int order;
  6713. int ret = 0;
  6714. struct compact_control cc = {
  6715. .nr_migratepages = 0,
  6716. .order = -1,
  6717. .zone = page_zone(pfn_to_page(start)),
  6718. .mode = MIGRATE_SYNC,
  6719. .ignore_skip_hint = true,
  6720. .no_set_skip_hint = true,
  6721. .gfp_mask = current_gfp_context(gfp_mask),
  6722. };
  6723. INIT_LIST_HEAD(&cc.migratepages);
  6724. /*
  6725. * What we do here is we mark all pageblocks in range as
  6726. * MIGRATE_ISOLATE. Because pageblock and max order pages may
  6727. * have different sizes, and due to the way page allocator
  6728. * work, we align the range to biggest of the two pages so
  6729. * that page allocator won't try to merge buddies from
  6730. * different pageblocks and change MIGRATE_ISOLATE to some
  6731. * other migration type.
  6732. *
  6733. * Once the pageblocks are marked as MIGRATE_ISOLATE, we
  6734. * migrate the pages from an unaligned range (ie. pages that
  6735. * we are interested in). This will put all the pages in
  6736. * range back to page allocator as MIGRATE_ISOLATE.
  6737. *
  6738. * When this is done, we take the pages in range from page
  6739. * allocator removing them from the buddy system. This way
  6740. * page allocator will never consider using them.
  6741. *
  6742. * This lets us mark the pageblocks back as
  6743. * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
  6744. * aligned range but not in the unaligned, original range are
  6745. * put back to page allocator so that buddy can use them.
  6746. */
  6747. ret = start_isolate_page_range(pfn_max_align_down(start),
  6748. pfn_max_align_up(end), migratetype,
  6749. false);
  6750. if (ret)
  6751. return ret;
  6752. /*
  6753. * In case of -EBUSY, we'd like to know which page causes problem.
  6754. * So, just fall through. test_pages_isolated() has a tracepoint
  6755. * which will report the busy page.
  6756. *
  6757. * It is possible that busy pages could become available before
  6758. * the call to test_pages_isolated, and the range will actually be
  6759. * allocated. So, if we fall through be sure to clear ret so that
  6760. * -EBUSY is not accidentally used or returned to caller.
  6761. */
  6762. ret = __alloc_contig_migrate_range(&cc, start, end);
  6763. if (ret && ret != -EBUSY)
  6764. goto done;
  6765. ret =0;
  6766. /*
  6767. * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
  6768. * aligned blocks that are marked as MIGRATE_ISOLATE. What's
  6769. * more, all pages in [start, end) are free in page allocator.
  6770. * What we are going to do is to allocate all pages from
  6771. * [start, end) (that is remove them from page allocator).
  6772. *
  6773. * The only problem is that pages at the beginning and at the
  6774. * end of interesting range may be not aligned with pages that
  6775. * page allocator holds, ie. they can be part of higher order
  6776. * pages. Because of this, we reserve the bigger range and
  6777. * once this is done free the pages we are not interested in.
  6778. *
  6779. * We don't have to hold zone->lock here because the pages are
  6780. * isolated thus they won't get removed from buddy.
  6781. */
  6782. lru_add_drain_all();
  6783. drain_all_pages(cc.zone);
  6784. order = 0;
  6785. outer_start = start;
  6786. while (!PageBuddy(pfn_to_page(outer_start))) {
  6787. if (++order >= MAX_ORDER) {
  6788. outer_start = start;
  6789. break;
  6790. }
  6791. outer_start &= ~0UL << order;
  6792. }
  6793. if (outer_start != start) {
  6794. order = page_order(pfn_to_page(outer_start));
  6795. /*
  6796. * outer_start page could be small order buddy page and
  6797. * it doesn't include start page. Adjust outer_start
  6798. * in this case to report failed page properly
  6799. * on tracepoint in test_pages_isolated()
  6800. */
  6801. if (outer_start + (1UL << order) <= start)
  6802. outer_start = start;
  6803. }
  6804. /* Make sure the range is really isolated. */
  6805. if (test_pages_isolated(outer_start, end, false)) {
  6806. pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
  6807. __func__, outer_start, end);
  6808. ret = -EBUSY;
  6809. goto done;
  6810. }
  6811. /* Grab isolated pages from freelists. */
  6812. outer_end = isolate_freepages_range(&cc, outer_start, end);
  6813. if (!outer_end) {
  6814. ret = -EBUSY;
  6815. goto done;
  6816. }
  6817. /* Free head and tail (if any) */
  6818. if (start != outer_start)
  6819. free_contig_range(outer_start, start - outer_start);
  6820. if (end != outer_end)
  6821. free_contig_range(end, outer_end - end);
  6822. done:
  6823. undo_isolate_page_range(pfn_max_align_down(start),
  6824. pfn_max_align_up(end), migratetype);
  6825. return ret;
  6826. }
  6827. void free_contig_range(unsigned long pfn, unsigned nr_pages)
  6828. {
  6829. unsigned int count = 0;
  6830. for (; nr_pages--; pfn++) {
  6831. struct page *page = pfn_to_page(pfn);
  6832. count += page_count(page) != 1;
  6833. __free_page(page);
  6834. }
  6835. WARN(count != 0, "%d pages are still in use!\n", count);
  6836. }
  6837. #endif
  6838. #ifdef CONFIG_MEMORY_HOTPLUG
  6839. /*
  6840. * The zone indicated has a new number of managed_pages; batch sizes and percpu
  6841. * page high values need to be recalulated.
  6842. */
  6843. void __meminit zone_pcp_update(struct zone *zone)
  6844. {
  6845. unsigned cpu;
  6846. mutex_lock(&pcp_batch_high_lock);
  6847. for_each_possible_cpu(cpu)
  6848. pageset_set_high_and_batch(zone,
  6849. per_cpu_ptr(zone->pageset, cpu));
  6850. mutex_unlock(&pcp_batch_high_lock);
  6851. }
  6852. #endif
  6853. void zone_pcp_reset(struct zone *zone)
  6854. {
  6855. unsigned long flags;
  6856. int cpu;
  6857. struct per_cpu_pageset *pset;
  6858. /* avoid races with drain_pages() */
  6859. local_irq_save(flags);
  6860. if (zone->pageset != &boot_pageset) {
  6861. for_each_online_cpu(cpu) {
  6862. pset = per_cpu_ptr(zone->pageset, cpu);
  6863. drain_zonestat(zone, pset);
  6864. }
  6865. free_percpu(zone->pageset);
  6866. zone->pageset = &boot_pageset;
  6867. }
  6868. local_irq_restore(flags);
  6869. }
  6870. #ifdef CONFIG_MEMORY_HOTREMOVE
  6871. /*
  6872. * All pages in the range must be in a single zone and isolated
  6873. * before calling this.
  6874. */
  6875. void
  6876. __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
  6877. {
  6878. struct page *page;
  6879. struct zone *zone;
  6880. unsigned int order, i;
  6881. unsigned long pfn;
  6882. unsigned long flags;
  6883. /* find the first valid pfn */
  6884. for (pfn = start_pfn; pfn < end_pfn; pfn++)
  6885. if (pfn_valid(pfn))
  6886. break;
  6887. if (pfn == end_pfn)
  6888. return;
  6889. offline_mem_sections(pfn, end_pfn);
  6890. zone = page_zone(pfn_to_page(pfn));
  6891. spin_lock_irqsave(&zone->lock, flags);
  6892. pfn = start_pfn;
  6893. while (pfn < end_pfn) {
  6894. if (!pfn_valid(pfn)) {
  6895. pfn++;
  6896. continue;
  6897. }
  6898. page = pfn_to_page(pfn);
  6899. /*
  6900. * The HWPoisoned page may be not in buddy system, and
  6901. * page_count() is not 0.
  6902. */
  6903. if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
  6904. pfn++;
  6905. SetPageReserved(page);
  6906. continue;
  6907. }
  6908. BUG_ON(page_count(page));
  6909. BUG_ON(!PageBuddy(page));
  6910. order = page_order(page);
  6911. #ifdef CONFIG_DEBUG_VM
  6912. pr_info("remove from free list %lx %d %lx\n",
  6913. pfn, 1 << order, end_pfn);
  6914. #endif
  6915. list_del(&page->lru);
  6916. rmv_page_order(page);
  6917. zone->free_area[order].nr_free--;
  6918. for (i = 0; i < (1 << order); i++)
  6919. SetPageReserved((page+i));
  6920. pfn += (1 << order);
  6921. }
  6922. spin_unlock_irqrestore(&zone->lock, flags);
  6923. }
  6924. #endif
  6925. bool is_free_buddy_page(struct page *page)
  6926. {
  6927. struct zone *zone = page_zone(page);
  6928. unsigned long pfn = page_to_pfn(page);
  6929. unsigned long flags;
  6930. unsigned int order;
  6931. spin_lock_irqsave(&zone->lock, flags);
  6932. for (order = 0; order < MAX_ORDER; order++) {
  6933. struct page *page_head = page - (pfn & ((1 << order) - 1));
  6934. if (PageBuddy(page_head) && page_order(page_head) >= order)
  6935. break;
  6936. }
  6937. spin_unlock_irqrestore(&zone->lock, flags);
  6938. return order < MAX_ORDER;
  6939. }