hugetlb.c 118 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422
  1. /*
  2. * Generic hugetlb support.
  3. * (C) Nadia Yvette Chambers, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/mm.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/sysctl.h>
  10. #include <linux/highmem.h>
  11. #include <linux/mmu_notifier.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/mempolicy.h>
  15. #include <linux/compiler.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/slab.h>
  21. #include <linux/rmap.h>
  22. #include <linux/swap.h>
  23. #include <linux/swapops.h>
  24. #include <linux/page-isolation.h>
  25. #include <linux/jhash.h>
  26. #include <asm/page.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/tlb.h>
  29. #include <linux/io.h>
  30. #include <linux/hugetlb.h>
  31. #include <linux/hugetlb_cgroup.h>
  32. #include <linux/node.h>
  33. #include "internal.h"
  34. int hugepages_treat_as_movable;
  35. int hugetlb_max_hstate __read_mostly;
  36. unsigned int default_hstate_idx;
  37. struct hstate hstates[HUGE_MAX_HSTATE];
  38. /*
  39. * Minimum page order among possible hugepage sizes, set to a proper value
  40. * at boot time.
  41. */
  42. static unsigned int minimum_order __read_mostly = UINT_MAX;
  43. __initdata LIST_HEAD(huge_boot_pages);
  44. /* for command line parsing */
  45. static struct hstate * __initdata parsed_hstate;
  46. static unsigned long __initdata default_hstate_max_huge_pages;
  47. static unsigned long __initdata default_hstate_size;
  48. static bool __initdata parsed_valid_hugepagesz = true;
  49. /*
  50. * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  51. * free_huge_pages, and surplus_huge_pages.
  52. */
  53. DEFINE_SPINLOCK(hugetlb_lock);
  54. /*
  55. * Serializes faults on the same logical page. This is used to
  56. * prevent spurious OOMs when the hugepage pool is fully utilized.
  57. */
  58. static int num_fault_mutexes;
  59. struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
  60. /* Forward declaration */
  61. static int hugetlb_acct_memory(struct hstate *h, long delta);
  62. static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  63. {
  64. bool free = (spool->count == 0) && (spool->used_hpages == 0);
  65. spin_unlock(&spool->lock);
  66. /* If no pages are used, and no other handles to the subpool
  67. * remain, give up any reservations mased on minimum size and
  68. * free the subpool */
  69. if (free) {
  70. if (spool->min_hpages != -1)
  71. hugetlb_acct_memory(spool->hstate,
  72. -spool->min_hpages);
  73. kfree(spool);
  74. }
  75. }
  76. struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  77. long min_hpages)
  78. {
  79. struct hugepage_subpool *spool;
  80. spool = kzalloc(sizeof(*spool), GFP_KERNEL);
  81. if (!spool)
  82. return NULL;
  83. spin_lock_init(&spool->lock);
  84. spool->count = 1;
  85. spool->max_hpages = max_hpages;
  86. spool->hstate = h;
  87. spool->min_hpages = min_hpages;
  88. if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
  89. kfree(spool);
  90. return NULL;
  91. }
  92. spool->rsv_hpages = min_hpages;
  93. return spool;
  94. }
  95. void hugepage_put_subpool(struct hugepage_subpool *spool)
  96. {
  97. spin_lock(&spool->lock);
  98. BUG_ON(!spool->count);
  99. spool->count--;
  100. unlock_or_release_subpool(spool);
  101. }
  102. /*
  103. * Subpool accounting for allocating and reserving pages.
  104. * Return -ENOMEM if there are not enough resources to satisfy the
  105. * the request. Otherwise, return the number of pages by which the
  106. * global pools must be adjusted (upward). The returned value may
  107. * only be different than the passed value (delta) in the case where
  108. * a subpool minimum size must be manitained.
  109. */
  110. static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
  111. long delta)
  112. {
  113. long ret = delta;
  114. if (!spool)
  115. return ret;
  116. spin_lock(&spool->lock);
  117. if (spool->max_hpages != -1) { /* maximum size accounting */
  118. if ((spool->used_hpages + delta) <= spool->max_hpages)
  119. spool->used_hpages += delta;
  120. else {
  121. ret = -ENOMEM;
  122. goto unlock_ret;
  123. }
  124. }
  125. /* minimum size accounting */
  126. if (spool->min_hpages != -1 && spool->rsv_hpages) {
  127. if (delta > spool->rsv_hpages) {
  128. /*
  129. * Asking for more reserves than those already taken on
  130. * behalf of subpool. Return difference.
  131. */
  132. ret = delta - spool->rsv_hpages;
  133. spool->rsv_hpages = 0;
  134. } else {
  135. ret = 0; /* reserves already accounted for */
  136. spool->rsv_hpages -= delta;
  137. }
  138. }
  139. unlock_ret:
  140. spin_unlock(&spool->lock);
  141. return ret;
  142. }
  143. /*
  144. * Subpool accounting for freeing and unreserving pages.
  145. * Return the number of global page reservations that must be dropped.
  146. * The return value may only be different than the passed value (delta)
  147. * in the case where a subpool minimum size must be maintained.
  148. */
  149. static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
  150. long delta)
  151. {
  152. long ret = delta;
  153. if (!spool)
  154. return delta;
  155. spin_lock(&spool->lock);
  156. if (spool->max_hpages != -1) /* maximum size accounting */
  157. spool->used_hpages -= delta;
  158. /* minimum size accounting */
  159. if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
  160. if (spool->rsv_hpages + delta <= spool->min_hpages)
  161. ret = 0;
  162. else
  163. ret = spool->rsv_hpages + delta - spool->min_hpages;
  164. spool->rsv_hpages += delta;
  165. if (spool->rsv_hpages > spool->min_hpages)
  166. spool->rsv_hpages = spool->min_hpages;
  167. }
  168. /*
  169. * If hugetlbfs_put_super couldn't free spool due to an outstanding
  170. * quota reference, free it now.
  171. */
  172. unlock_or_release_subpool(spool);
  173. return ret;
  174. }
  175. static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
  176. {
  177. return HUGETLBFS_SB(inode->i_sb)->spool;
  178. }
  179. static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  180. {
  181. return subpool_inode(file_inode(vma->vm_file));
  182. }
  183. /*
  184. * Region tracking -- allows tracking of reservations and instantiated pages
  185. * across the pages in a mapping.
  186. *
  187. * The region data structures are embedded into a resv_map and protected
  188. * by a resv_map's lock. The set of regions within the resv_map represent
  189. * reservations for huge pages, or huge pages that have already been
  190. * instantiated within the map. The from and to elements are huge page
  191. * indicies into the associated mapping. from indicates the starting index
  192. * of the region. to represents the first index past the end of the region.
  193. *
  194. * For example, a file region structure with from == 0 and to == 4 represents
  195. * four huge pages in a mapping. It is important to note that the to element
  196. * represents the first element past the end of the region. This is used in
  197. * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  198. *
  199. * Interval notation of the form [from, to) will be used to indicate that
  200. * the endpoint from is inclusive and to is exclusive.
  201. */
  202. struct file_region {
  203. struct list_head link;
  204. long from;
  205. long to;
  206. };
  207. /*
  208. * Add the huge page range represented by [f, t) to the reserve
  209. * map. In the normal case, existing regions will be expanded
  210. * to accommodate the specified range. Sufficient regions should
  211. * exist for expansion due to the previous call to region_chg
  212. * with the same range. However, it is possible that region_del
  213. * could have been called after region_chg and modifed the map
  214. * in such a way that no region exists to be expanded. In this
  215. * case, pull a region descriptor from the cache associated with
  216. * the map and use that for the new range.
  217. *
  218. * Return the number of new huge pages added to the map. This
  219. * number is greater than or equal to zero.
  220. */
  221. static long region_add(struct resv_map *resv, long f, long t)
  222. {
  223. struct list_head *head = &resv->regions;
  224. struct file_region *rg, *nrg, *trg;
  225. long add = 0;
  226. spin_lock(&resv->lock);
  227. /* Locate the region we are either in or before. */
  228. list_for_each_entry(rg, head, link)
  229. if (f <= rg->to)
  230. break;
  231. /*
  232. * If no region exists which can be expanded to include the
  233. * specified range, the list must have been modified by an
  234. * interleving call to region_del(). Pull a region descriptor
  235. * from the cache and use it for this range.
  236. */
  237. if (&rg->link == head || t < rg->from) {
  238. VM_BUG_ON(resv->region_cache_count <= 0);
  239. resv->region_cache_count--;
  240. nrg = list_first_entry(&resv->region_cache, struct file_region,
  241. link);
  242. list_del(&nrg->link);
  243. nrg->from = f;
  244. nrg->to = t;
  245. list_add(&nrg->link, rg->link.prev);
  246. add += t - f;
  247. goto out_locked;
  248. }
  249. /* Round our left edge to the current segment if it encloses us. */
  250. if (f > rg->from)
  251. f = rg->from;
  252. /* Check for and consume any regions we now overlap with. */
  253. nrg = rg;
  254. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  255. if (&rg->link == head)
  256. break;
  257. if (rg->from > t)
  258. break;
  259. /* If this area reaches higher then extend our area to
  260. * include it completely. If this is not the first area
  261. * which we intend to reuse, free it. */
  262. if (rg->to > t)
  263. t = rg->to;
  264. if (rg != nrg) {
  265. /* Decrement return value by the deleted range.
  266. * Another range will span this area so that by
  267. * end of routine add will be >= zero
  268. */
  269. add -= (rg->to - rg->from);
  270. list_del(&rg->link);
  271. kfree(rg);
  272. }
  273. }
  274. add += (nrg->from - f); /* Added to beginning of region */
  275. nrg->from = f;
  276. add += t - nrg->to; /* Added to end of region */
  277. nrg->to = t;
  278. out_locked:
  279. resv->adds_in_progress--;
  280. spin_unlock(&resv->lock);
  281. VM_BUG_ON(add < 0);
  282. return add;
  283. }
  284. /*
  285. * Examine the existing reserve map and determine how many
  286. * huge pages in the specified range [f, t) are NOT currently
  287. * represented. This routine is called before a subsequent
  288. * call to region_add that will actually modify the reserve
  289. * map to add the specified range [f, t). region_chg does
  290. * not change the number of huge pages represented by the
  291. * map. However, if the existing regions in the map can not
  292. * be expanded to represent the new range, a new file_region
  293. * structure is added to the map as a placeholder. This is
  294. * so that the subsequent region_add call will have all the
  295. * regions it needs and will not fail.
  296. *
  297. * Upon entry, region_chg will also examine the cache of region descriptors
  298. * associated with the map. If there are not enough descriptors cached, one
  299. * will be allocated for the in progress add operation.
  300. *
  301. * Returns the number of huge pages that need to be added to the existing
  302. * reservation map for the range [f, t). This number is greater or equal to
  303. * zero. -ENOMEM is returned if a new file_region structure or cache entry
  304. * is needed and can not be allocated.
  305. */
  306. static long region_chg(struct resv_map *resv, long f, long t)
  307. {
  308. struct list_head *head = &resv->regions;
  309. struct file_region *rg, *nrg = NULL;
  310. long chg = 0;
  311. retry:
  312. spin_lock(&resv->lock);
  313. retry_locked:
  314. resv->adds_in_progress++;
  315. /*
  316. * Check for sufficient descriptors in the cache to accommodate
  317. * the number of in progress add operations.
  318. */
  319. if (resv->adds_in_progress > resv->region_cache_count) {
  320. struct file_region *trg;
  321. VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
  322. /* Must drop lock to allocate a new descriptor. */
  323. resv->adds_in_progress--;
  324. spin_unlock(&resv->lock);
  325. trg = kmalloc(sizeof(*trg), GFP_KERNEL);
  326. if (!trg) {
  327. kfree(nrg);
  328. return -ENOMEM;
  329. }
  330. spin_lock(&resv->lock);
  331. list_add(&trg->link, &resv->region_cache);
  332. resv->region_cache_count++;
  333. goto retry_locked;
  334. }
  335. /* Locate the region we are before or in. */
  336. list_for_each_entry(rg, head, link)
  337. if (f <= rg->to)
  338. break;
  339. /* If we are below the current region then a new region is required.
  340. * Subtle, allocate a new region at the position but make it zero
  341. * size such that we can guarantee to record the reservation. */
  342. if (&rg->link == head || t < rg->from) {
  343. if (!nrg) {
  344. resv->adds_in_progress--;
  345. spin_unlock(&resv->lock);
  346. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  347. if (!nrg)
  348. return -ENOMEM;
  349. nrg->from = f;
  350. nrg->to = f;
  351. INIT_LIST_HEAD(&nrg->link);
  352. goto retry;
  353. }
  354. list_add(&nrg->link, rg->link.prev);
  355. chg = t - f;
  356. goto out_nrg;
  357. }
  358. /* Round our left edge to the current segment if it encloses us. */
  359. if (f > rg->from)
  360. f = rg->from;
  361. chg = t - f;
  362. /* Check for and consume any regions we now overlap with. */
  363. list_for_each_entry(rg, rg->link.prev, link) {
  364. if (&rg->link == head)
  365. break;
  366. if (rg->from > t)
  367. goto out;
  368. /* We overlap with this area, if it extends further than
  369. * us then we must extend ourselves. Account for its
  370. * existing reservation. */
  371. if (rg->to > t) {
  372. chg += rg->to - t;
  373. t = rg->to;
  374. }
  375. chg -= rg->to - rg->from;
  376. }
  377. out:
  378. spin_unlock(&resv->lock);
  379. /* We already know we raced and no longer need the new region */
  380. kfree(nrg);
  381. return chg;
  382. out_nrg:
  383. spin_unlock(&resv->lock);
  384. return chg;
  385. }
  386. /*
  387. * Abort the in progress add operation. The adds_in_progress field
  388. * of the resv_map keeps track of the operations in progress between
  389. * calls to region_chg and region_add. Operations are sometimes
  390. * aborted after the call to region_chg. In such cases, region_abort
  391. * is called to decrement the adds_in_progress counter.
  392. *
  393. * NOTE: The range arguments [f, t) are not needed or used in this
  394. * routine. They are kept to make reading the calling code easier as
  395. * arguments will match the associated region_chg call.
  396. */
  397. static void region_abort(struct resv_map *resv, long f, long t)
  398. {
  399. spin_lock(&resv->lock);
  400. VM_BUG_ON(!resv->region_cache_count);
  401. resv->adds_in_progress--;
  402. spin_unlock(&resv->lock);
  403. }
  404. /*
  405. * Delete the specified range [f, t) from the reserve map. If the
  406. * t parameter is LONG_MAX, this indicates that ALL regions after f
  407. * should be deleted. Locate the regions which intersect [f, t)
  408. * and either trim, delete or split the existing regions.
  409. *
  410. * Returns the number of huge pages deleted from the reserve map.
  411. * In the normal case, the return value is zero or more. In the
  412. * case where a region must be split, a new region descriptor must
  413. * be allocated. If the allocation fails, -ENOMEM will be returned.
  414. * NOTE: If the parameter t == LONG_MAX, then we will never split
  415. * a region and possibly return -ENOMEM. Callers specifying
  416. * t == LONG_MAX do not need to check for -ENOMEM error.
  417. */
  418. static long region_del(struct resv_map *resv, long f, long t)
  419. {
  420. struct list_head *head = &resv->regions;
  421. struct file_region *rg, *trg;
  422. struct file_region *nrg = NULL;
  423. long del = 0;
  424. retry:
  425. spin_lock(&resv->lock);
  426. list_for_each_entry_safe(rg, trg, head, link) {
  427. /*
  428. * Skip regions before the range to be deleted. file_region
  429. * ranges are normally of the form [from, to). However, there
  430. * may be a "placeholder" entry in the map which is of the form
  431. * (from, to) with from == to. Check for placeholder entries
  432. * at the beginning of the range to be deleted.
  433. */
  434. if (rg->to <= f && (rg->to != rg->from || rg->to != f))
  435. continue;
  436. if (rg->from >= t)
  437. break;
  438. if (f > rg->from && t < rg->to) { /* Must split region */
  439. /*
  440. * Check for an entry in the cache before dropping
  441. * lock and attempting allocation.
  442. */
  443. if (!nrg &&
  444. resv->region_cache_count > resv->adds_in_progress) {
  445. nrg = list_first_entry(&resv->region_cache,
  446. struct file_region,
  447. link);
  448. list_del(&nrg->link);
  449. resv->region_cache_count--;
  450. }
  451. if (!nrg) {
  452. spin_unlock(&resv->lock);
  453. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  454. if (!nrg)
  455. return -ENOMEM;
  456. goto retry;
  457. }
  458. del += t - f;
  459. /* New entry for end of split region */
  460. nrg->from = t;
  461. nrg->to = rg->to;
  462. INIT_LIST_HEAD(&nrg->link);
  463. /* Original entry is trimmed */
  464. rg->to = f;
  465. list_add(&nrg->link, &rg->link);
  466. nrg = NULL;
  467. break;
  468. }
  469. if (f <= rg->from && t >= rg->to) { /* Remove entire region */
  470. del += rg->to - rg->from;
  471. list_del(&rg->link);
  472. kfree(rg);
  473. continue;
  474. }
  475. if (f <= rg->from) { /* Trim beginning of region */
  476. del += t - rg->from;
  477. rg->from = t;
  478. } else { /* Trim end of region */
  479. del += rg->to - f;
  480. rg->to = f;
  481. }
  482. }
  483. spin_unlock(&resv->lock);
  484. kfree(nrg);
  485. return del;
  486. }
  487. /*
  488. * A rare out of memory error was encountered which prevented removal of
  489. * the reserve map region for a page. The huge page itself was free'ed
  490. * and removed from the page cache. This routine will adjust the subpool
  491. * usage count, and the global reserve count if needed. By incrementing
  492. * these counts, the reserve map entry which could not be deleted will
  493. * appear as a "reserved" entry instead of simply dangling with incorrect
  494. * counts.
  495. */
  496. void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
  497. {
  498. struct hugepage_subpool *spool = subpool_inode(inode);
  499. long rsv_adjust;
  500. rsv_adjust = hugepage_subpool_get_pages(spool, 1);
  501. if (restore_reserve && rsv_adjust) {
  502. struct hstate *h = hstate_inode(inode);
  503. hugetlb_acct_memory(h, 1);
  504. }
  505. }
  506. /*
  507. * Count and return the number of huge pages in the reserve map
  508. * that intersect with the range [f, t).
  509. */
  510. static long region_count(struct resv_map *resv, long f, long t)
  511. {
  512. struct list_head *head = &resv->regions;
  513. struct file_region *rg;
  514. long chg = 0;
  515. spin_lock(&resv->lock);
  516. /* Locate each segment we overlap with, and count that overlap. */
  517. list_for_each_entry(rg, head, link) {
  518. long seg_from;
  519. long seg_to;
  520. if (rg->to <= f)
  521. continue;
  522. if (rg->from >= t)
  523. break;
  524. seg_from = max(rg->from, f);
  525. seg_to = min(rg->to, t);
  526. chg += seg_to - seg_from;
  527. }
  528. spin_unlock(&resv->lock);
  529. return chg;
  530. }
  531. /*
  532. * Convert the address within this vma to the page offset within
  533. * the mapping, in pagecache page units; huge pages here.
  534. */
  535. static pgoff_t vma_hugecache_offset(struct hstate *h,
  536. struct vm_area_struct *vma, unsigned long address)
  537. {
  538. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  539. (vma->vm_pgoff >> huge_page_order(h));
  540. }
  541. pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  542. unsigned long address)
  543. {
  544. return vma_hugecache_offset(hstate_vma(vma), vma, address);
  545. }
  546. EXPORT_SYMBOL_GPL(linear_hugepage_index);
  547. /*
  548. * Return the size of the pages allocated when backing a VMA. In the majority
  549. * cases this will be same size as used by the page table entries.
  550. */
  551. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  552. {
  553. struct hstate *hstate;
  554. if (!is_vm_hugetlb_page(vma))
  555. return PAGE_SIZE;
  556. hstate = hstate_vma(vma);
  557. return 1UL << huge_page_shift(hstate);
  558. }
  559. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  560. /*
  561. * Return the page size being used by the MMU to back a VMA. In the majority
  562. * of cases, the page size used by the kernel matches the MMU size. On
  563. * architectures where it differs, an architecture-specific version of this
  564. * function is required.
  565. */
  566. #ifndef vma_mmu_pagesize
  567. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  568. {
  569. return vma_kernel_pagesize(vma);
  570. }
  571. #endif
  572. /*
  573. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  574. * bits of the reservation map pointer, which are always clear due to
  575. * alignment.
  576. */
  577. #define HPAGE_RESV_OWNER (1UL << 0)
  578. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  579. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  580. /*
  581. * These helpers are used to track how many pages are reserved for
  582. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  583. * is guaranteed to have their future faults succeed.
  584. *
  585. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  586. * the reserve counters are updated with the hugetlb_lock held. It is safe
  587. * to reset the VMA at fork() time as it is not in use yet and there is no
  588. * chance of the global counters getting corrupted as a result of the values.
  589. *
  590. * The private mapping reservation is represented in a subtly different
  591. * manner to a shared mapping. A shared mapping has a region map associated
  592. * with the underlying file, this region map represents the backing file
  593. * pages which have ever had a reservation assigned which this persists even
  594. * after the page is instantiated. A private mapping has a region map
  595. * associated with the original mmap which is attached to all VMAs which
  596. * reference it, this region map represents those offsets which have consumed
  597. * reservation ie. where pages have been instantiated.
  598. */
  599. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  600. {
  601. return (unsigned long)vma->vm_private_data;
  602. }
  603. static void set_vma_private_data(struct vm_area_struct *vma,
  604. unsigned long value)
  605. {
  606. vma->vm_private_data = (void *)value;
  607. }
  608. struct resv_map *resv_map_alloc(void)
  609. {
  610. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  611. struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
  612. if (!resv_map || !rg) {
  613. kfree(resv_map);
  614. kfree(rg);
  615. return NULL;
  616. }
  617. kref_init(&resv_map->refs);
  618. spin_lock_init(&resv_map->lock);
  619. INIT_LIST_HEAD(&resv_map->regions);
  620. resv_map->adds_in_progress = 0;
  621. INIT_LIST_HEAD(&resv_map->region_cache);
  622. list_add(&rg->link, &resv_map->region_cache);
  623. resv_map->region_cache_count = 1;
  624. return resv_map;
  625. }
  626. void resv_map_release(struct kref *ref)
  627. {
  628. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  629. struct list_head *head = &resv_map->region_cache;
  630. struct file_region *rg, *trg;
  631. /* Clear out any active regions before we release the map. */
  632. region_del(resv_map, 0, LONG_MAX);
  633. /* ... and any entries left in the cache */
  634. list_for_each_entry_safe(rg, trg, head, link) {
  635. list_del(&rg->link);
  636. kfree(rg);
  637. }
  638. VM_BUG_ON(resv_map->adds_in_progress);
  639. kfree(resv_map);
  640. }
  641. static inline struct resv_map *inode_resv_map(struct inode *inode)
  642. {
  643. return inode->i_mapping->private_data;
  644. }
  645. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  646. {
  647. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  648. if (vma->vm_flags & VM_MAYSHARE) {
  649. struct address_space *mapping = vma->vm_file->f_mapping;
  650. struct inode *inode = mapping->host;
  651. return inode_resv_map(inode);
  652. } else {
  653. return (struct resv_map *)(get_vma_private_data(vma) &
  654. ~HPAGE_RESV_MASK);
  655. }
  656. }
  657. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  658. {
  659. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  660. VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
  661. set_vma_private_data(vma, (get_vma_private_data(vma) &
  662. HPAGE_RESV_MASK) | (unsigned long)map);
  663. }
  664. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  665. {
  666. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  667. VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
  668. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  669. }
  670. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  671. {
  672. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  673. return (get_vma_private_data(vma) & flag) != 0;
  674. }
  675. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  676. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  677. {
  678. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  679. if (!(vma->vm_flags & VM_MAYSHARE))
  680. vma->vm_private_data = (void *)0;
  681. }
  682. /* Returns true if the VMA has associated reserve pages */
  683. static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
  684. {
  685. if (vma->vm_flags & VM_NORESERVE) {
  686. /*
  687. * This address is already reserved by other process(chg == 0),
  688. * so, we should decrement reserved count. Without decrementing,
  689. * reserve count remains after releasing inode, because this
  690. * allocated page will go into page cache and is regarded as
  691. * coming from reserved pool in releasing step. Currently, we
  692. * don't have any other solution to deal with this situation
  693. * properly, so add work-around here.
  694. */
  695. if (vma->vm_flags & VM_MAYSHARE && chg == 0)
  696. return true;
  697. else
  698. return false;
  699. }
  700. /* Shared mappings always use reserves */
  701. if (vma->vm_flags & VM_MAYSHARE) {
  702. /*
  703. * We know VM_NORESERVE is not set. Therefore, there SHOULD
  704. * be a region map for all pages. The only situation where
  705. * there is no region map is if a hole was punched via
  706. * fallocate. In this case, there really are no reverves to
  707. * use. This situation is indicated if chg != 0.
  708. */
  709. if (chg)
  710. return false;
  711. else
  712. return true;
  713. }
  714. /*
  715. * Only the process that called mmap() has reserves for
  716. * private mappings.
  717. */
  718. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  719. return true;
  720. return false;
  721. }
  722. static void enqueue_huge_page(struct hstate *h, struct page *page)
  723. {
  724. int nid = page_to_nid(page);
  725. list_move(&page->lru, &h->hugepage_freelists[nid]);
  726. h->free_huge_pages++;
  727. h->free_huge_pages_node[nid]++;
  728. }
  729. static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
  730. {
  731. struct page *page;
  732. list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
  733. if (!is_migrate_isolate_page(page))
  734. break;
  735. /*
  736. * if 'non-isolated free hugepage' not found on the list,
  737. * the allocation fails.
  738. */
  739. if (&h->hugepage_freelists[nid] == &page->lru)
  740. return NULL;
  741. list_move(&page->lru, &h->hugepage_activelist);
  742. set_page_refcounted(page);
  743. h->free_huge_pages--;
  744. h->free_huge_pages_node[nid]--;
  745. return page;
  746. }
  747. /* Movability of hugepages depends on migration support. */
  748. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  749. {
  750. if (hugepages_treat_as_movable || hugepage_migration_supported(h))
  751. return GFP_HIGHUSER_MOVABLE;
  752. else
  753. return GFP_HIGHUSER;
  754. }
  755. static struct page *dequeue_huge_page_vma(struct hstate *h,
  756. struct vm_area_struct *vma,
  757. unsigned long address, int avoid_reserve,
  758. long chg)
  759. {
  760. struct page *page = NULL;
  761. struct mempolicy *mpol;
  762. nodemask_t *nodemask;
  763. struct zonelist *zonelist;
  764. struct zone *zone;
  765. struct zoneref *z;
  766. unsigned int cpuset_mems_cookie;
  767. /*
  768. * A child process with MAP_PRIVATE mappings created by their parent
  769. * have no page reserves. This check ensures that reservations are
  770. * not "stolen". The child may still get SIGKILLed
  771. */
  772. if (!vma_has_reserves(vma, chg) &&
  773. h->free_huge_pages - h->resv_huge_pages == 0)
  774. goto err;
  775. /* If reserves cannot be used, ensure enough pages are in the pool */
  776. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  777. goto err;
  778. retry_cpuset:
  779. cpuset_mems_cookie = read_mems_allowed_begin();
  780. zonelist = huge_zonelist(vma, address,
  781. htlb_alloc_mask(h), &mpol, &nodemask);
  782. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  783. MAX_NR_ZONES - 1, nodemask) {
  784. if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
  785. page = dequeue_huge_page_node(h, zone_to_nid(zone));
  786. if (page) {
  787. if (avoid_reserve)
  788. break;
  789. if (!vma_has_reserves(vma, chg))
  790. break;
  791. SetPagePrivate(page);
  792. h->resv_huge_pages--;
  793. break;
  794. }
  795. }
  796. }
  797. mpol_cond_put(mpol);
  798. if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
  799. goto retry_cpuset;
  800. return page;
  801. err:
  802. return NULL;
  803. }
  804. /*
  805. * common helper functions for hstate_next_node_to_{alloc|free}.
  806. * We may have allocated or freed a huge page based on a different
  807. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  808. * be outside of *nodes_allowed. Ensure that we use an allowed
  809. * node for alloc or free.
  810. */
  811. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  812. {
  813. nid = next_node_in(nid, *nodes_allowed);
  814. VM_BUG_ON(nid >= MAX_NUMNODES);
  815. return nid;
  816. }
  817. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  818. {
  819. if (!node_isset(nid, *nodes_allowed))
  820. nid = next_node_allowed(nid, nodes_allowed);
  821. return nid;
  822. }
  823. /*
  824. * returns the previously saved node ["this node"] from which to
  825. * allocate a persistent huge page for the pool and advance the
  826. * next node from which to allocate, handling wrap at end of node
  827. * mask.
  828. */
  829. static int hstate_next_node_to_alloc(struct hstate *h,
  830. nodemask_t *nodes_allowed)
  831. {
  832. int nid;
  833. VM_BUG_ON(!nodes_allowed);
  834. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  835. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  836. return nid;
  837. }
  838. /*
  839. * helper for free_pool_huge_page() - return the previously saved
  840. * node ["this node"] from which to free a huge page. Advance the
  841. * next node id whether or not we find a free huge page to free so
  842. * that the next attempt to free addresses the next node.
  843. */
  844. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  845. {
  846. int nid;
  847. VM_BUG_ON(!nodes_allowed);
  848. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  849. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  850. return nid;
  851. }
  852. #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
  853. for (nr_nodes = nodes_weight(*mask); \
  854. nr_nodes > 0 && \
  855. ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
  856. nr_nodes--)
  857. #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
  858. for (nr_nodes = nodes_weight(*mask); \
  859. nr_nodes > 0 && \
  860. ((node = hstate_next_node_to_free(hs, mask)) || 1); \
  861. nr_nodes--)
  862. #if defined(CONFIG_X86_64) && ((defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA))
  863. static void destroy_compound_gigantic_page(struct page *page,
  864. unsigned int order)
  865. {
  866. int i;
  867. int nr_pages = 1 << order;
  868. struct page *p = page + 1;
  869. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  870. clear_compound_head(p);
  871. set_page_refcounted(p);
  872. }
  873. set_compound_order(page, 0);
  874. __ClearPageHead(page);
  875. }
  876. static void free_gigantic_page(struct page *page, unsigned int order)
  877. {
  878. free_contig_range(page_to_pfn(page), 1 << order);
  879. }
  880. static int __alloc_gigantic_page(unsigned long start_pfn,
  881. unsigned long nr_pages)
  882. {
  883. unsigned long end_pfn = start_pfn + nr_pages;
  884. return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  885. }
  886. static bool pfn_range_valid_gigantic(struct zone *z,
  887. unsigned long start_pfn, unsigned long nr_pages)
  888. {
  889. unsigned long i, end_pfn = start_pfn + nr_pages;
  890. struct page *page;
  891. for (i = start_pfn; i < end_pfn; i++) {
  892. if (!pfn_valid(i))
  893. return false;
  894. page = pfn_to_page(i);
  895. if (page_zone(page) != z)
  896. return false;
  897. if (PageReserved(page))
  898. return false;
  899. if (page_count(page) > 0)
  900. return false;
  901. if (PageHuge(page))
  902. return false;
  903. }
  904. return true;
  905. }
  906. static bool zone_spans_last_pfn(const struct zone *zone,
  907. unsigned long start_pfn, unsigned long nr_pages)
  908. {
  909. unsigned long last_pfn = start_pfn + nr_pages - 1;
  910. return zone_spans_pfn(zone, last_pfn);
  911. }
  912. static struct page *alloc_gigantic_page(int nid, unsigned int order)
  913. {
  914. unsigned long nr_pages = 1 << order;
  915. unsigned long ret, pfn, flags;
  916. struct zone *z;
  917. z = NODE_DATA(nid)->node_zones;
  918. for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
  919. spin_lock_irqsave(&z->lock, flags);
  920. pfn = ALIGN(z->zone_start_pfn, nr_pages);
  921. while (zone_spans_last_pfn(z, pfn, nr_pages)) {
  922. if (pfn_range_valid_gigantic(z, pfn, nr_pages)) {
  923. /*
  924. * We release the zone lock here because
  925. * alloc_contig_range() will also lock the zone
  926. * at some point. If there's an allocation
  927. * spinning on this lock, it may win the race
  928. * and cause alloc_contig_range() to fail...
  929. */
  930. spin_unlock_irqrestore(&z->lock, flags);
  931. ret = __alloc_gigantic_page(pfn, nr_pages);
  932. if (!ret)
  933. return pfn_to_page(pfn);
  934. spin_lock_irqsave(&z->lock, flags);
  935. }
  936. pfn += nr_pages;
  937. }
  938. spin_unlock_irqrestore(&z->lock, flags);
  939. }
  940. return NULL;
  941. }
  942. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
  943. static void prep_compound_gigantic_page(struct page *page, unsigned int order);
  944. static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
  945. {
  946. struct page *page;
  947. page = alloc_gigantic_page(nid, huge_page_order(h));
  948. if (page) {
  949. prep_compound_gigantic_page(page, huge_page_order(h));
  950. prep_new_huge_page(h, page, nid);
  951. }
  952. return page;
  953. }
  954. static int alloc_fresh_gigantic_page(struct hstate *h,
  955. nodemask_t *nodes_allowed)
  956. {
  957. struct page *page = NULL;
  958. int nr_nodes, node;
  959. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  960. page = alloc_fresh_gigantic_page_node(h, node);
  961. if (page)
  962. return 1;
  963. }
  964. return 0;
  965. }
  966. static inline bool gigantic_page_supported(void) { return true; }
  967. #else
  968. static inline bool gigantic_page_supported(void) { return false; }
  969. static inline void free_gigantic_page(struct page *page, unsigned int order) { }
  970. static inline void destroy_compound_gigantic_page(struct page *page,
  971. unsigned int order) { }
  972. static inline int alloc_fresh_gigantic_page(struct hstate *h,
  973. nodemask_t *nodes_allowed) { return 0; }
  974. #endif
  975. static void update_and_free_page(struct hstate *h, struct page *page)
  976. {
  977. int i;
  978. if (hstate_is_gigantic(h) && !gigantic_page_supported())
  979. return;
  980. h->nr_huge_pages--;
  981. h->nr_huge_pages_node[page_to_nid(page)]--;
  982. for (i = 0; i < pages_per_huge_page(h); i++) {
  983. page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
  984. 1 << PG_referenced | 1 << PG_dirty |
  985. 1 << PG_active | 1 << PG_private |
  986. 1 << PG_writeback);
  987. }
  988. VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
  989. set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
  990. set_page_refcounted(page);
  991. if (hstate_is_gigantic(h)) {
  992. destroy_compound_gigantic_page(page, huge_page_order(h));
  993. free_gigantic_page(page, huge_page_order(h));
  994. } else {
  995. __free_pages(page, huge_page_order(h));
  996. }
  997. }
  998. struct hstate *size_to_hstate(unsigned long size)
  999. {
  1000. struct hstate *h;
  1001. for_each_hstate(h) {
  1002. if (huge_page_size(h) == size)
  1003. return h;
  1004. }
  1005. return NULL;
  1006. }
  1007. /*
  1008. * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
  1009. * to hstate->hugepage_activelist.)
  1010. *
  1011. * This function can be called for tail pages, but never returns true for them.
  1012. */
  1013. bool page_huge_active(struct page *page)
  1014. {
  1015. VM_BUG_ON_PAGE(!PageHuge(page), page);
  1016. return PageHead(page) && PagePrivate(&page[1]);
  1017. }
  1018. /* never called for tail page */
  1019. static void set_page_huge_active(struct page *page)
  1020. {
  1021. VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
  1022. SetPagePrivate(&page[1]);
  1023. }
  1024. static void clear_page_huge_active(struct page *page)
  1025. {
  1026. VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
  1027. ClearPagePrivate(&page[1]);
  1028. }
  1029. void free_huge_page(struct page *page)
  1030. {
  1031. /*
  1032. * Can't pass hstate in here because it is called from the
  1033. * compound page destructor.
  1034. */
  1035. struct hstate *h = page_hstate(page);
  1036. int nid = page_to_nid(page);
  1037. struct hugepage_subpool *spool =
  1038. (struct hugepage_subpool *)page_private(page);
  1039. bool restore_reserve;
  1040. set_page_private(page, 0);
  1041. page->mapping = NULL;
  1042. VM_BUG_ON_PAGE(page_count(page), page);
  1043. VM_BUG_ON_PAGE(page_mapcount(page), page);
  1044. restore_reserve = PagePrivate(page);
  1045. ClearPagePrivate(page);
  1046. /*
  1047. * A return code of zero implies that the subpool will be under its
  1048. * minimum size if the reservation is not restored after page is free.
  1049. * Therefore, force restore_reserve operation.
  1050. */
  1051. if (hugepage_subpool_put_pages(spool, 1) == 0)
  1052. restore_reserve = true;
  1053. spin_lock(&hugetlb_lock);
  1054. clear_page_huge_active(page);
  1055. hugetlb_cgroup_uncharge_page(hstate_index(h),
  1056. pages_per_huge_page(h), page);
  1057. if (restore_reserve)
  1058. h->resv_huge_pages++;
  1059. if (h->surplus_huge_pages_node[nid]) {
  1060. /* remove the page from active list */
  1061. list_del(&page->lru);
  1062. update_and_free_page(h, page);
  1063. h->surplus_huge_pages--;
  1064. h->surplus_huge_pages_node[nid]--;
  1065. } else {
  1066. arch_clear_hugepage_flags(page);
  1067. enqueue_huge_page(h, page);
  1068. }
  1069. spin_unlock(&hugetlb_lock);
  1070. }
  1071. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  1072. {
  1073. INIT_LIST_HEAD(&page->lru);
  1074. set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
  1075. spin_lock(&hugetlb_lock);
  1076. set_hugetlb_cgroup(page, NULL);
  1077. h->nr_huge_pages++;
  1078. h->nr_huge_pages_node[nid]++;
  1079. spin_unlock(&hugetlb_lock);
  1080. put_page(page); /* free it into the hugepage allocator */
  1081. }
  1082. static void prep_compound_gigantic_page(struct page *page, unsigned int order)
  1083. {
  1084. int i;
  1085. int nr_pages = 1 << order;
  1086. struct page *p = page + 1;
  1087. /* we rely on prep_new_huge_page to set the destructor */
  1088. set_compound_order(page, order);
  1089. __ClearPageReserved(page);
  1090. __SetPageHead(page);
  1091. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  1092. /*
  1093. * For gigantic hugepages allocated through bootmem at
  1094. * boot, it's safer to be consistent with the not-gigantic
  1095. * hugepages and clear the PG_reserved bit from all tail pages
  1096. * too. Otherwse drivers using get_user_pages() to access tail
  1097. * pages may get the reference counting wrong if they see
  1098. * PG_reserved set on a tail page (despite the head page not
  1099. * having PG_reserved set). Enforcing this consistency between
  1100. * head and tail pages allows drivers to optimize away a check
  1101. * on the head page when they need know if put_page() is needed
  1102. * after get_user_pages().
  1103. */
  1104. __ClearPageReserved(p);
  1105. set_page_count(p, 0);
  1106. set_compound_head(p, page);
  1107. }
  1108. atomic_set(compound_mapcount_ptr(page), -1);
  1109. }
  1110. /*
  1111. * PageHuge() only returns true for hugetlbfs pages, but not for normal or
  1112. * transparent huge pages. See the PageTransHuge() documentation for more
  1113. * details.
  1114. */
  1115. int PageHuge(struct page *page)
  1116. {
  1117. if (!PageCompound(page))
  1118. return 0;
  1119. page = compound_head(page);
  1120. return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
  1121. }
  1122. EXPORT_SYMBOL_GPL(PageHuge);
  1123. /*
  1124. * PageHeadHuge() only returns true for hugetlbfs head page, but not for
  1125. * normal or transparent huge pages.
  1126. */
  1127. int PageHeadHuge(struct page *page_head)
  1128. {
  1129. if (!PageHead(page_head))
  1130. return 0;
  1131. return get_compound_page_dtor(page_head) == free_huge_page;
  1132. }
  1133. pgoff_t __basepage_index(struct page *page)
  1134. {
  1135. struct page *page_head = compound_head(page);
  1136. pgoff_t index = page_index(page_head);
  1137. unsigned long compound_idx;
  1138. if (!PageHuge(page_head))
  1139. return page_index(page);
  1140. if (compound_order(page_head) >= MAX_ORDER)
  1141. compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
  1142. else
  1143. compound_idx = page - page_head;
  1144. return (index << compound_order(page_head)) + compound_idx;
  1145. }
  1146. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  1147. {
  1148. struct page *page;
  1149. page = __alloc_pages_node(nid,
  1150. htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
  1151. __GFP_REPEAT|__GFP_NOWARN,
  1152. huge_page_order(h));
  1153. if (page) {
  1154. prep_new_huge_page(h, page, nid);
  1155. }
  1156. return page;
  1157. }
  1158. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  1159. {
  1160. struct page *page;
  1161. int nr_nodes, node;
  1162. int ret = 0;
  1163. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1164. page = alloc_fresh_huge_page_node(h, node);
  1165. if (page) {
  1166. ret = 1;
  1167. break;
  1168. }
  1169. }
  1170. if (ret)
  1171. count_vm_event(HTLB_BUDDY_PGALLOC);
  1172. else
  1173. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  1174. return ret;
  1175. }
  1176. /*
  1177. * Free huge page from pool from next node to free.
  1178. * Attempt to keep persistent huge pages more or less
  1179. * balanced over allowed nodes.
  1180. * Called with hugetlb_lock locked.
  1181. */
  1182. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  1183. bool acct_surplus)
  1184. {
  1185. int nr_nodes, node;
  1186. int ret = 0;
  1187. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  1188. /*
  1189. * If we're returning unused surplus pages, only examine
  1190. * nodes with surplus pages.
  1191. */
  1192. if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
  1193. !list_empty(&h->hugepage_freelists[node])) {
  1194. struct page *page =
  1195. list_entry(h->hugepage_freelists[node].next,
  1196. struct page, lru);
  1197. list_del(&page->lru);
  1198. h->free_huge_pages--;
  1199. h->free_huge_pages_node[node]--;
  1200. if (acct_surplus) {
  1201. h->surplus_huge_pages--;
  1202. h->surplus_huge_pages_node[node]--;
  1203. }
  1204. update_and_free_page(h, page);
  1205. ret = 1;
  1206. break;
  1207. }
  1208. }
  1209. return ret;
  1210. }
  1211. /*
  1212. * Dissolve a given free hugepage into free buddy pages. This function does
  1213. * nothing for in-use (including surplus) hugepages.
  1214. */
  1215. static void dissolve_free_huge_page(struct page *page)
  1216. {
  1217. spin_lock(&hugetlb_lock);
  1218. if (PageHuge(page) && !page_count(page)) {
  1219. struct hstate *h = page_hstate(page);
  1220. int nid = page_to_nid(page);
  1221. list_del(&page->lru);
  1222. h->free_huge_pages--;
  1223. h->free_huge_pages_node[nid]--;
  1224. update_and_free_page(h, page);
  1225. }
  1226. spin_unlock(&hugetlb_lock);
  1227. }
  1228. /*
  1229. * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
  1230. * make specified memory blocks removable from the system.
  1231. * Note that start_pfn should aligned with (minimum) hugepage size.
  1232. */
  1233. void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
  1234. {
  1235. unsigned long pfn;
  1236. if (!hugepages_supported())
  1237. return;
  1238. VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << minimum_order));
  1239. for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
  1240. dissolve_free_huge_page(pfn_to_page(pfn));
  1241. }
  1242. /*
  1243. * There are 3 ways this can get called:
  1244. * 1. With vma+addr: we use the VMA's memory policy
  1245. * 2. With !vma, but nid=NUMA_NO_NODE: We try to allocate a huge
  1246. * page from any node, and let the buddy allocator itself figure
  1247. * it out.
  1248. * 3. With !vma, but nid!=NUMA_NO_NODE. We allocate a huge page
  1249. * strictly from 'nid'
  1250. */
  1251. static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
  1252. struct vm_area_struct *vma, unsigned long addr, int nid)
  1253. {
  1254. int order = huge_page_order(h);
  1255. gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
  1256. unsigned int cpuset_mems_cookie;
  1257. /*
  1258. * We need a VMA to get a memory policy. If we do not
  1259. * have one, we use the 'nid' argument.
  1260. *
  1261. * The mempolicy stuff below has some non-inlined bits
  1262. * and calls ->vm_ops. That makes it hard to optimize at
  1263. * compile-time, even when NUMA is off and it does
  1264. * nothing. This helps the compiler optimize it out.
  1265. */
  1266. if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
  1267. /*
  1268. * If a specific node is requested, make sure to
  1269. * get memory from there, but only when a node
  1270. * is explicitly specified.
  1271. */
  1272. if (nid != NUMA_NO_NODE)
  1273. gfp |= __GFP_THISNODE;
  1274. /*
  1275. * Make sure to call something that can handle
  1276. * nid=NUMA_NO_NODE
  1277. */
  1278. return alloc_pages_node(nid, gfp, order);
  1279. }
  1280. /*
  1281. * OK, so we have a VMA. Fetch the mempolicy and try to
  1282. * allocate a huge page with it. We will only reach this
  1283. * when CONFIG_NUMA=y.
  1284. */
  1285. do {
  1286. struct page *page;
  1287. struct mempolicy *mpol;
  1288. struct zonelist *zl;
  1289. nodemask_t *nodemask;
  1290. cpuset_mems_cookie = read_mems_allowed_begin();
  1291. zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
  1292. mpol_cond_put(mpol);
  1293. page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
  1294. if (page)
  1295. return page;
  1296. } while (read_mems_allowed_retry(cpuset_mems_cookie));
  1297. return NULL;
  1298. }
  1299. /*
  1300. * There are two ways to allocate a huge page:
  1301. * 1. When you have a VMA and an address (like a fault)
  1302. * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
  1303. *
  1304. * 'vma' and 'addr' are only for (1). 'nid' is always NUMA_NO_NODE in
  1305. * this case which signifies that the allocation should be done with
  1306. * respect for the VMA's memory policy.
  1307. *
  1308. * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
  1309. * implies that memory policies will not be taken in to account.
  1310. */
  1311. static struct page *__alloc_buddy_huge_page(struct hstate *h,
  1312. struct vm_area_struct *vma, unsigned long addr, int nid)
  1313. {
  1314. struct page *page;
  1315. unsigned int r_nid;
  1316. if (hstate_is_gigantic(h))
  1317. return NULL;
  1318. /*
  1319. * Make sure that anyone specifying 'nid' is not also specifying a VMA.
  1320. * This makes sure the caller is picking _one_ of the modes with which
  1321. * we can call this function, not both.
  1322. */
  1323. if (vma || (addr != -1)) {
  1324. VM_WARN_ON_ONCE(addr == -1);
  1325. VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
  1326. }
  1327. /*
  1328. * Assume we will successfully allocate the surplus page to
  1329. * prevent racing processes from causing the surplus to exceed
  1330. * overcommit
  1331. *
  1332. * This however introduces a different race, where a process B
  1333. * tries to grow the static hugepage pool while alloc_pages() is
  1334. * called by process A. B will only examine the per-node
  1335. * counters in determining if surplus huge pages can be
  1336. * converted to normal huge pages in adjust_pool_surplus(). A
  1337. * won't be able to increment the per-node counter, until the
  1338. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  1339. * no more huge pages can be converted from surplus to normal
  1340. * state (and doesn't try to convert again). Thus, we have a
  1341. * case where a surplus huge page exists, the pool is grown, and
  1342. * the surplus huge page still exists after, even though it
  1343. * should just have been converted to a normal huge page. This
  1344. * does not leak memory, though, as the hugepage will be freed
  1345. * once it is out of use. It also does not allow the counters to
  1346. * go out of whack in adjust_pool_surplus() as we don't modify
  1347. * the node values until we've gotten the hugepage and only the
  1348. * per-node value is checked there.
  1349. */
  1350. spin_lock(&hugetlb_lock);
  1351. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  1352. spin_unlock(&hugetlb_lock);
  1353. return NULL;
  1354. } else {
  1355. h->nr_huge_pages++;
  1356. h->surplus_huge_pages++;
  1357. }
  1358. spin_unlock(&hugetlb_lock);
  1359. page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
  1360. spin_lock(&hugetlb_lock);
  1361. if (page) {
  1362. INIT_LIST_HEAD(&page->lru);
  1363. r_nid = page_to_nid(page);
  1364. set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
  1365. set_hugetlb_cgroup(page, NULL);
  1366. /*
  1367. * We incremented the global counters already
  1368. */
  1369. h->nr_huge_pages_node[r_nid]++;
  1370. h->surplus_huge_pages_node[r_nid]++;
  1371. __count_vm_event(HTLB_BUDDY_PGALLOC);
  1372. } else {
  1373. h->nr_huge_pages--;
  1374. h->surplus_huge_pages--;
  1375. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  1376. }
  1377. spin_unlock(&hugetlb_lock);
  1378. return page;
  1379. }
  1380. /*
  1381. * Allocate a huge page from 'nid'. Note, 'nid' may be
  1382. * NUMA_NO_NODE, which means that it may be allocated
  1383. * anywhere.
  1384. */
  1385. static
  1386. struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
  1387. {
  1388. unsigned long addr = -1;
  1389. return __alloc_buddy_huge_page(h, NULL, addr, nid);
  1390. }
  1391. /*
  1392. * Use the VMA's mpolicy to allocate a huge page from the buddy.
  1393. */
  1394. static
  1395. struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
  1396. struct vm_area_struct *vma, unsigned long addr)
  1397. {
  1398. return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
  1399. }
  1400. /*
  1401. * This allocation function is useful in the context where vma is irrelevant.
  1402. * E.g. soft-offlining uses this function because it only cares physical
  1403. * address of error page.
  1404. */
  1405. struct page *alloc_huge_page_node(struct hstate *h, int nid)
  1406. {
  1407. struct page *page = NULL;
  1408. spin_lock(&hugetlb_lock);
  1409. if (h->free_huge_pages - h->resv_huge_pages > 0)
  1410. page = dequeue_huge_page_node(h, nid);
  1411. spin_unlock(&hugetlb_lock);
  1412. if (!page)
  1413. page = __alloc_buddy_huge_page_no_mpol(h, nid);
  1414. return page;
  1415. }
  1416. /*
  1417. * Increase the hugetlb pool such that it can accommodate a reservation
  1418. * of size 'delta'.
  1419. */
  1420. static int gather_surplus_pages(struct hstate *h, int delta)
  1421. {
  1422. struct list_head surplus_list;
  1423. struct page *page, *tmp;
  1424. int ret, i;
  1425. int needed, allocated;
  1426. bool alloc_ok = true;
  1427. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  1428. if (needed <= 0) {
  1429. h->resv_huge_pages += delta;
  1430. return 0;
  1431. }
  1432. allocated = 0;
  1433. INIT_LIST_HEAD(&surplus_list);
  1434. ret = -ENOMEM;
  1435. retry:
  1436. spin_unlock(&hugetlb_lock);
  1437. for (i = 0; i < needed; i++) {
  1438. page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
  1439. if (!page) {
  1440. alloc_ok = false;
  1441. break;
  1442. }
  1443. list_add(&page->lru, &surplus_list);
  1444. }
  1445. allocated += i;
  1446. /*
  1447. * After retaking hugetlb_lock, we need to recalculate 'needed'
  1448. * because either resv_huge_pages or free_huge_pages may have changed.
  1449. */
  1450. spin_lock(&hugetlb_lock);
  1451. needed = (h->resv_huge_pages + delta) -
  1452. (h->free_huge_pages + allocated);
  1453. if (needed > 0) {
  1454. if (alloc_ok)
  1455. goto retry;
  1456. /*
  1457. * We were not able to allocate enough pages to
  1458. * satisfy the entire reservation so we free what
  1459. * we've allocated so far.
  1460. */
  1461. goto free;
  1462. }
  1463. /*
  1464. * The surplus_list now contains _at_least_ the number of extra pages
  1465. * needed to accommodate the reservation. Add the appropriate number
  1466. * of pages to the hugetlb pool and free the extras back to the buddy
  1467. * allocator. Commit the entire reservation here to prevent another
  1468. * process from stealing the pages as they are added to the pool but
  1469. * before they are reserved.
  1470. */
  1471. needed += allocated;
  1472. h->resv_huge_pages += delta;
  1473. ret = 0;
  1474. /* Free the needed pages to the hugetlb pool */
  1475. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  1476. if ((--needed) < 0)
  1477. break;
  1478. /*
  1479. * This page is now managed by the hugetlb allocator and has
  1480. * no users -- drop the buddy allocator's reference.
  1481. */
  1482. put_page_testzero(page);
  1483. VM_BUG_ON_PAGE(page_count(page), page);
  1484. enqueue_huge_page(h, page);
  1485. }
  1486. free:
  1487. spin_unlock(&hugetlb_lock);
  1488. /* Free unnecessary surplus pages to the buddy allocator */
  1489. list_for_each_entry_safe(page, tmp, &surplus_list, lru)
  1490. put_page(page);
  1491. spin_lock(&hugetlb_lock);
  1492. return ret;
  1493. }
  1494. /*
  1495. * When releasing a hugetlb pool reservation, any surplus pages that were
  1496. * allocated to satisfy the reservation must be explicitly freed if they were
  1497. * never used.
  1498. * Called with hugetlb_lock held.
  1499. */
  1500. static void return_unused_surplus_pages(struct hstate *h,
  1501. unsigned long unused_resv_pages)
  1502. {
  1503. unsigned long nr_pages;
  1504. /* Uncommit the reservation */
  1505. h->resv_huge_pages -= unused_resv_pages;
  1506. /* Cannot return gigantic pages currently */
  1507. if (hstate_is_gigantic(h))
  1508. return;
  1509. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  1510. /*
  1511. * We want to release as many surplus pages as possible, spread
  1512. * evenly across all nodes with memory. Iterate across these nodes
  1513. * until we can no longer free unreserved surplus pages. This occurs
  1514. * when the nodes with surplus pages have no free pages.
  1515. * free_pool_huge_page() will balance the the freed pages across the
  1516. * on-line nodes with memory and will handle the hstate accounting.
  1517. */
  1518. while (nr_pages--) {
  1519. if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
  1520. break;
  1521. cond_resched_lock(&hugetlb_lock);
  1522. }
  1523. }
  1524. /*
  1525. * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
  1526. * are used by the huge page allocation routines to manage reservations.
  1527. *
  1528. * vma_needs_reservation is called to determine if the huge page at addr
  1529. * within the vma has an associated reservation. If a reservation is
  1530. * needed, the value 1 is returned. The caller is then responsible for
  1531. * managing the global reservation and subpool usage counts. After
  1532. * the huge page has been allocated, vma_commit_reservation is called
  1533. * to add the page to the reservation map. If the page allocation fails,
  1534. * the reservation must be ended instead of committed. vma_end_reservation
  1535. * is called in such cases.
  1536. *
  1537. * In the normal case, vma_commit_reservation returns the same value
  1538. * as the preceding vma_needs_reservation call. The only time this
  1539. * is not the case is if a reserve map was changed between calls. It
  1540. * is the responsibility of the caller to notice the difference and
  1541. * take appropriate action.
  1542. */
  1543. enum vma_resv_mode {
  1544. VMA_NEEDS_RESV,
  1545. VMA_COMMIT_RESV,
  1546. VMA_END_RESV,
  1547. };
  1548. static long __vma_reservation_common(struct hstate *h,
  1549. struct vm_area_struct *vma, unsigned long addr,
  1550. enum vma_resv_mode mode)
  1551. {
  1552. struct resv_map *resv;
  1553. pgoff_t idx;
  1554. long ret;
  1555. resv = vma_resv_map(vma);
  1556. if (!resv)
  1557. return 1;
  1558. idx = vma_hugecache_offset(h, vma, addr);
  1559. switch (mode) {
  1560. case VMA_NEEDS_RESV:
  1561. ret = region_chg(resv, idx, idx + 1);
  1562. break;
  1563. case VMA_COMMIT_RESV:
  1564. ret = region_add(resv, idx, idx + 1);
  1565. break;
  1566. case VMA_END_RESV:
  1567. region_abort(resv, idx, idx + 1);
  1568. ret = 0;
  1569. break;
  1570. default:
  1571. BUG();
  1572. }
  1573. if (vma->vm_flags & VM_MAYSHARE)
  1574. return ret;
  1575. else
  1576. return ret < 0 ? ret : 0;
  1577. }
  1578. static long vma_needs_reservation(struct hstate *h,
  1579. struct vm_area_struct *vma, unsigned long addr)
  1580. {
  1581. return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
  1582. }
  1583. static long vma_commit_reservation(struct hstate *h,
  1584. struct vm_area_struct *vma, unsigned long addr)
  1585. {
  1586. return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
  1587. }
  1588. static void vma_end_reservation(struct hstate *h,
  1589. struct vm_area_struct *vma, unsigned long addr)
  1590. {
  1591. (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
  1592. }
  1593. struct page *alloc_huge_page(struct vm_area_struct *vma,
  1594. unsigned long addr, int avoid_reserve)
  1595. {
  1596. struct hugepage_subpool *spool = subpool_vma(vma);
  1597. struct hstate *h = hstate_vma(vma);
  1598. struct page *page;
  1599. long map_chg, map_commit;
  1600. long gbl_chg;
  1601. int ret, idx;
  1602. struct hugetlb_cgroup *h_cg;
  1603. idx = hstate_index(h);
  1604. /*
  1605. * Examine the region/reserve map to determine if the process
  1606. * has a reservation for the page to be allocated. A return
  1607. * code of zero indicates a reservation exists (no change).
  1608. */
  1609. map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
  1610. if (map_chg < 0)
  1611. return ERR_PTR(-ENOMEM);
  1612. /*
  1613. * Processes that did not create the mapping will have no
  1614. * reserves as indicated by the region/reserve map. Check
  1615. * that the allocation will not exceed the subpool limit.
  1616. * Allocations for MAP_NORESERVE mappings also need to be
  1617. * checked against any subpool limit.
  1618. */
  1619. if (map_chg || avoid_reserve) {
  1620. gbl_chg = hugepage_subpool_get_pages(spool, 1);
  1621. if (gbl_chg < 0) {
  1622. vma_end_reservation(h, vma, addr);
  1623. return ERR_PTR(-ENOSPC);
  1624. }
  1625. /*
  1626. * Even though there was no reservation in the region/reserve
  1627. * map, there could be reservations associated with the
  1628. * subpool that can be used. This would be indicated if the
  1629. * return value of hugepage_subpool_get_pages() is zero.
  1630. * However, if avoid_reserve is specified we still avoid even
  1631. * the subpool reservations.
  1632. */
  1633. if (avoid_reserve)
  1634. gbl_chg = 1;
  1635. }
  1636. ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
  1637. if (ret)
  1638. goto out_subpool_put;
  1639. spin_lock(&hugetlb_lock);
  1640. /*
  1641. * glb_chg is passed to indicate whether or not a page must be taken
  1642. * from the global free pool (global change). gbl_chg == 0 indicates
  1643. * a reservation exists for the allocation.
  1644. */
  1645. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
  1646. if (!page) {
  1647. spin_unlock(&hugetlb_lock);
  1648. page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
  1649. if (!page)
  1650. goto out_uncharge_cgroup;
  1651. if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
  1652. SetPagePrivate(page);
  1653. h->resv_huge_pages--;
  1654. }
  1655. spin_lock(&hugetlb_lock);
  1656. list_move(&page->lru, &h->hugepage_activelist);
  1657. /* Fall through */
  1658. }
  1659. hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
  1660. spin_unlock(&hugetlb_lock);
  1661. set_page_private(page, (unsigned long)spool);
  1662. map_commit = vma_commit_reservation(h, vma, addr);
  1663. if (unlikely(map_chg > map_commit)) {
  1664. /*
  1665. * The page was added to the reservation map between
  1666. * vma_needs_reservation and vma_commit_reservation.
  1667. * This indicates a race with hugetlb_reserve_pages.
  1668. * Adjust for the subpool count incremented above AND
  1669. * in hugetlb_reserve_pages for the same page. Also,
  1670. * the reservation count added in hugetlb_reserve_pages
  1671. * no longer applies.
  1672. */
  1673. long rsv_adjust;
  1674. rsv_adjust = hugepage_subpool_put_pages(spool, 1);
  1675. hugetlb_acct_memory(h, -rsv_adjust);
  1676. }
  1677. return page;
  1678. out_uncharge_cgroup:
  1679. hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
  1680. out_subpool_put:
  1681. if (map_chg || avoid_reserve)
  1682. hugepage_subpool_put_pages(spool, 1);
  1683. vma_end_reservation(h, vma, addr);
  1684. return ERR_PTR(-ENOSPC);
  1685. }
  1686. /*
  1687. * alloc_huge_page()'s wrapper which simply returns the page if allocation
  1688. * succeeds, otherwise NULL. This function is called from new_vma_page(),
  1689. * where no ERR_VALUE is expected to be returned.
  1690. */
  1691. struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
  1692. unsigned long addr, int avoid_reserve)
  1693. {
  1694. struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
  1695. if (IS_ERR(page))
  1696. page = NULL;
  1697. return page;
  1698. }
  1699. int __weak alloc_bootmem_huge_page(struct hstate *h)
  1700. {
  1701. struct huge_bootmem_page *m;
  1702. int nr_nodes, node;
  1703. for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
  1704. void *addr;
  1705. addr = memblock_virt_alloc_try_nid_nopanic(
  1706. huge_page_size(h), huge_page_size(h),
  1707. 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
  1708. if (addr) {
  1709. /*
  1710. * Use the beginning of the huge page to store the
  1711. * huge_bootmem_page struct (until gather_bootmem
  1712. * puts them into the mem_map).
  1713. */
  1714. m = addr;
  1715. goto found;
  1716. }
  1717. }
  1718. return 0;
  1719. found:
  1720. BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
  1721. /* Put them into a private list first because mem_map is not up yet */
  1722. list_add(&m->list, &huge_boot_pages);
  1723. m->hstate = h;
  1724. return 1;
  1725. }
  1726. static void __init prep_compound_huge_page(struct page *page,
  1727. unsigned int order)
  1728. {
  1729. if (unlikely(order > (MAX_ORDER - 1)))
  1730. prep_compound_gigantic_page(page, order);
  1731. else
  1732. prep_compound_page(page, order);
  1733. }
  1734. /* Put bootmem huge pages into the standard lists after mem_map is up */
  1735. static void __init gather_bootmem_prealloc(void)
  1736. {
  1737. struct huge_bootmem_page *m;
  1738. list_for_each_entry(m, &huge_boot_pages, list) {
  1739. struct hstate *h = m->hstate;
  1740. struct page *page;
  1741. #ifdef CONFIG_HIGHMEM
  1742. page = pfn_to_page(m->phys >> PAGE_SHIFT);
  1743. memblock_free_late(__pa(m),
  1744. sizeof(struct huge_bootmem_page));
  1745. #else
  1746. page = virt_to_page(m);
  1747. #endif
  1748. WARN_ON(page_count(page) != 1);
  1749. prep_compound_huge_page(page, h->order);
  1750. WARN_ON(PageReserved(page));
  1751. prep_new_huge_page(h, page, page_to_nid(page));
  1752. /*
  1753. * If we had gigantic hugepages allocated at boot time, we need
  1754. * to restore the 'stolen' pages to totalram_pages in order to
  1755. * fix confusing memory reports from free(1) and another
  1756. * side-effects, like CommitLimit going negative.
  1757. */
  1758. if (hstate_is_gigantic(h))
  1759. adjust_managed_page_count(page, 1 << h->order);
  1760. }
  1761. }
  1762. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  1763. {
  1764. unsigned long i;
  1765. for (i = 0; i < h->max_huge_pages; ++i) {
  1766. if (hstate_is_gigantic(h)) {
  1767. if (!alloc_bootmem_huge_page(h))
  1768. break;
  1769. } else if (!alloc_fresh_huge_page(h,
  1770. &node_states[N_MEMORY]))
  1771. break;
  1772. }
  1773. h->max_huge_pages = i;
  1774. }
  1775. static void __init hugetlb_init_hstates(void)
  1776. {
  1777. struct hstate *h;
  1778. for_each_hstate(h) {
  1779. if (minimum_order > huge_page_order(h))
  1780. minimum_order = huge_page_order(h);
  1781. /* oversize hugepages were init'ed in early boot */
  1782. if (!hstate_is_gigantic(h))
  1783. hugetlb_hstate_alloc_pages(h);
  1784. }
  1785. VM_BUG_ON(minimum_order == UINT_MAX);
  1786. }
  1787. static char * __init memfmt(char *buf, unsigned long n)
  1788. {
  1789. if (n >= (1UL << 30))
  1790. sprintf(buf, "%lu GB", n >> 30);
  1791. else if (n >= (1UL << 20))
  1792. sprintf(buf, "%lu MB", n >> 20);
  1793. else
  1794. sprintf(buf, "%lu KB", n >> 10);
  1795. return buf;
  1796. }
  1797. static void __init report_hugepages(void)
  1798. {
  1799. struct hstate *h;
  1800. for_each_hstate(h) {
  1801. char buf[32];
  1802. pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
  1803. memfmt(buf, huge_page_size(h)),
  1804. h->free_huge_pages);
  1805. }
  1806. }
  1807. #ifdef CONFIG_HIGHMEM
  1808. static void try_to_free_low(struct hstate *h, unsigned long count,
  1809. nodemask_t *nodes_allowed)
  1810. {
  1811. int i;
  1812. if (hstate_is_gigantic(h))
  1813. return;
  1814. for_each_node_mask(i, *nodes_allowed) {
  1815. struct page *page, *next;
  1816. struct list_head *freel = &h->hugepage_freelists[i];
  1817. list_for_each_entry_safe(page, next, freel, lru) {
  1818. if (count >= h->nr_huge_pages)
  1819. return;
  1820. if (PageHighMem(page))
  1821. continue;
  1822. list_del(&page->lru);
  1823. update_and_free_page(h, page);
  1824. h->free_huge_pages--;
  1825. h->free_huge_pages_node[page_to_nid(page)]--;
  1826. }
  1827. }
  1828. }
  1829. #else
  1830. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1831. nodemask_t *nodes_allowed)
  1832. {
  1833. }
  1834. #endif
  1835. /*
  1836. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1837. * balanced by operating on them in a round-robin fashion.
  1838. * Returns 1 if an adjustment was made.
  1839. */
  1840. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1841. int delta)
  1842. {
  1843. int nr_nodes, node;
  1844. VM_BUG_ON(delta != -1 && delta != 1);
  1845. if (delta < 0) {
  1846. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1847. if (h->surplus_huge_pages_node[node])
  1848. goto found;
  1849. }
  1850. } else {
  1851. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  1852. if (h->surplus_huge_pages_node[node] <
  1853. h->nr_huge_pages_node[node])
  1854. goto found;
  1855. }
  1856. }
  1857. return 0;
  1858. found:
  1859. h->surplus_huge_pages += delta;
  1860. h->surplus_huge_pages_node[node] += delta;
  1861. return 1;
  1862. }
  1863. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1864. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1865. nodemask_t *nodes_allowed)
  1866. {
  1867. unsigned long min_count, ret;
  1868. if (hstate_is_gigantic(h) && !gigantic_page_supported())
  1869. return h->max_huge_pages;
  1870. /*
  1871. * Increase the pool size
  1872. * First take pages out of surplus state. Then make up the
  1873. * remaining difference by allocating fresh huge pages.
  1874. *
  1875. * We might race with __alloc_buddy_huge_page() here and be unable
  1876. * to convert a surplus huge page to a normal huge page. That is
  1877. * not critical, though, it just means the overall size of the
  1878. * pool might be one hugepage larger than it needs to be, but
  1879. * within all the constraints specified by the sysctls.
  1880. */
  1881. spin_lock(&hugetlb_lock);
  1882. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1883. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1884. break;
  1885. }
  1886. while (count > persistent_huge_pages(h)) {
  1887. /*
  1888. * If this allocation races such that we no longer need the
  1889. * page, free_huge_page will handle it by freeing the page
  1890. * and reducing the surplus.
  1891. */
  1892. spin_unlock(&hugetlb_lock);
  1893. if (hstate_is_gigantic(h))
  1894. ret = alloc_fresh_gigantic_page(h, nodes_allowed);
  1895. else
  1896. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1897. spin_lock(&hugetlb_lock);
  1898. if (!ret)
  1899. goto out;
  1900. /* Bail for signals. Probably ctrl-c from user */
  1901. if (signal_pending(current))
  1902. goto out;
  1903. }
  1904. /*
  1905. * Decrease the pool size
  1906. * First return free pages to the buddy allocator (being careful
  1907. * to keep enough around to satisfy reservations). Then place
  1908. * pages into surplus state as needed so the pool will shrink
  1909. * to the desired size as pages become free.
  1910. *
  1911. * By placing pages into the surplus state independent of the
  1912. * overcommit value, we are allowing the surplus pool size to
  1913. * exceed overcommit. There are few sane options here. Since
  1914. * __alloc_buddy_huge_page() is checking the global counter,
  1915. * though, we'll note that we're not allowed to exceed surplus
  1916. * and won't grow the pool anywhere else. Not until one of the
  1917. * sysctls are changed, or the surplus pages go out of use.
  1918. */
  1919. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1920. min_count = max(count, min_count);
  1921. try_to_free_low(h, min_count, nodes_allowed);
  1922. while (min_count < persistent_huge_pages(h)) {
  1923. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1924. break;
  1925. cond_resched_lock(&hugetlb_lock);
  1926. }
  1927. while (count < persistent_huge_pages(h)) {
  1928. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1929. break;
  1930. }
  1931. out:
  1932. ret = persistent_huge_pages(h);
  1933. spin_unlock(&hugetlb_lock);
  1934. return ret;
  1935. }
  1936. #define HSTATE_ATTR_RO(_name) \
  1937. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1938. #define HSTATE_ATTR(_name) \
  1939. static struct kobj_attribute _name##_attr = \
  1940. __ATTR(_name, 0644, _name##_show, _name##_store)
  1941. static struct kobject *hugepages_kobj;
  1942. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1943. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  1944. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  1945. {
  1946. int i;
  1947. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1948. if (hstate_kobjs[i] == kobj) {
  1949. if (nidp)
  1950. *nidp = NUMA_NO_NODE;
  1951. return &hstates[i];
  1952. }
  1953. return kobj_to_node_hstate(kobj, nidp);
  1954. }
  1955. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  1956. struct kobj_attribute *attr, char *buf)
  1957. {
  1958. struct hstate *h;
  1959. unsigned long nr_huge_pages;
  1960. int nid;
  1961. h = kobj_to_hstate(kobj, &nid);
  1962. if (nid == NUMA_NO_NODE)
  1963. nr_huge_pages = h->nr_huge_pages;
  1964. else
  1965. nr_huge_pages = h->nr_huge_pages_node[nid];
  1966. return sprintf(buf, "%lu\n", nr_huge_pages);
  1967. }
  1968. static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
  1969. struct hstate *h, int nid,
  1970. unsigned long count, size_t len)
  1971. {
  1972. int err;
  1973. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  1974. if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
  1975. err = -EINVAL;
  1976. goto out;
  1977. }
  1978. if (nid == NUMA_NO_NODE) {
  1979. /*
  1980. * global hstate attribute
  1981. */
  1982. if (!(obey_mempolicy &&
  1983. init_nodemask_of_mempolicy(nodes_allowed))) {
  1984. NODEMASK_FREE(nodes_allowed);
  1985. nodes_allowed = &node_states[N_MEMORY];
  1986. }
  1987. } else if (nodes_allowed) {
  1988. /*
  1989. * per node hstate attribute: adjust count to global,
  1990. * but restrict alloc/free to the specified node.
  1991. */
  1992. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  1993. init_nodemask_of_node(nodes_allowed, nid);
  1994. } else
  1995. nodes_allowed = &node_states[N_MEMORY];
  1996. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  1997. if (nodes_allowed != &node_states[N_MEMORY])
  1998. NODEMASK_FREE(nodes_allowed);
  1999. return len;
  2000. out:
  2001. NODEMASK_FREE(nodes_allowed);
  2002. return err;
  2003. }
  2004. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  2005. struct kobject *kobj, const char *buf,
  2006. size_t len)
  2007. {
  2008. struct hstate *h;
  2009. unsigned long count;
  2010. int nid;
  2011. int err;
  2012. err = kstrtoul(buf, 10, &count);
  2013. if (err)
  2014. return err;
  2015. h = kobj_to_hstate(kobj, &nid);
  2016. return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
  2017. }
  2018. static ssize_t nr_hugepages_show(struct kobject *kobj,
  2019. struct kobj_attribute *attr, char *buf)
  2020. {
  2021. return nr_hugepages_show_common(kobj, attr, buf);
  2022. }
  2023. static ssize_t nr_hugepages_store(struct kobject *kobj,
  2024. struct kobj_attribute *attr, const char *buf, size_t len)
  2025. {
  2026. return nr_hugepages_store_common(false, kobj, buf, len);
  2027. }
  2028. HSTATE_ATTR(nr_hugepages);
  2029. #ifdef CONFIG_NUMA
  2030. /*
  2031. * hstate attribute for optionally mempolicy-based constraint on persistent
  2032. * huge page alloc/free.
  2033. */
  2034. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  2035. struct kobj_attribute *attr, char *buf)
  2036. {
  2037. return nr_hugepages_show_common(kobj, attr, buf);
  2038. }
  2039. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  2040. struct kobj_attribute *attr, const char *buf, size_t len)
  2041. {
  2042. return nr_hugepages_store_common(true, kobj, buf, len);
  2043. }
  2044. HSTATE_ATTR(nr_hugepages_mempolicy);
  2045. #endif
  2046. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  2047. struct kobj_attribute *attr, char *buf)
  2048. {
  2049. struct hstate *h = kobj_to_hstate(kobj, NULL);
  2050. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  2051. }
  2052. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  2053. struct kobj_attribute *attr, const char *buf, size_t count)
  2054. {
  2055. int err;
  2056. unsigned long input;
  2057. struct hstate *h = kobj_to_hstate(kobj, NULL);
  2058. if (hstate_is_gigantic(h))
  2059. return -EINVAL;
  2060. err = kstrtoul(buf, 10, &input);
  2061. if (err)
  2062. return err;
  2063. spin_lock(&hugetlb_lock);
  2064. h->nr_overcommit_huge_pages = input;
  2065. spin_unlock(&hugetlb_lock);
  2066. return count;
  2067. }
  2068. HSTATE_ATTR(nr_overcommit_hugepages);
  2069. static ssize_t free_hugepages_show(struct kobject *kobj,
  2070. struct kobj_attribute *attr, char *buf)
  2071. {
  2072. struct hstate *h;
  2073. unsigned long free_huge_pages;
  2074. int nid;
  2075. h = kobj_to_hstate(kobj, &nid);
  2076. if (nid == NUMA_NO_NODE)
  2077. free_huge_pages = h->free_huge_pages;
  2078. else
  2079. free_huge_pages = h->free_huge_pages_node[nid];
  2080. return sprintf(buf, "%lu\n", free_huge_pages);
  2081. }
  2082. HSTATE_ATTR_RO(free_hugepages);
  2083. static ssize_t resv_hugepages_show(struct kobject *kobj,
  2084. struct kobj_attribute *attr, char *buf)
  2085. {
  2086. struct hstate *h = kobj_to_hstate(kobj, NULL);
  2087. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  2088. }
  2089. HSTATE_ATTR_RO(resv_hugepages);
  2090. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  2091. struct kobj_attribute *attr, char *buf)
  2092. {
  2093. struct hstate *h;
  2094. unsigned long surplus_huge_pages;
  2095. int nid;
  2096. h = kobj_to_hstate(kobj, &nid);
  2097. if (nid == NUMA_NO_NODE)
  2098. surplus_huge_pages = h->surplus_huge_pages;
  2099. else
  2100. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  2101. return sprintf(buf, "%lu\n", surplus_huge_pages);
  2102. }
  2103. HSTATE_ATTR_RO(surplus_hugepages);
  2104. static struct attribute *hstate_attrs[] = {
  2105. &nr_hugepages_attr.attr,
  2106. &nr_overcommit_hugepages_attr.attr,
  2107. &free_hugepages_attr.attr,
  2108. &resv_hugepages_attr.attr,
  2109. &surplus_hugepages_attr.attr,
  2110. #ifdef CONFIG_NUMA
  2111. &nr_hugepages_mempolicy_attr.attr,
  2112. #endif
  2113. NULL,
  2114. };
  2115. static struct attribute_group hstate_attr_group = {
  2116. .attrs = hstate_attrs,
  2117. };
  2118. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  2119. struct kobject **hstate_kobjs,
  2120. struct attribute_group *hstate_attr_group)
  2121. {
  2122. int retval;
  2123. int hi = hstate_index(h);
  2124. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  2125. if (!hstate_kobjs[hi])
  2126. return -ENOMEM;
  2127. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  2128. if (retval)
  2129. kobject_put(hstate_kobjs[hi]);
  2130. return retval;
  2131. }
  2132. static void __init hugetlb_sysfs_init(void)
  2133. {
  2134. struct hstate *h;
  2135. int err;
  2136. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  2137. if (!hugepages_kobj)
  2138. return;
  2139. for_each_hstate(h) {
  2140. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  2141. hstate_kobjs, &hstate_attr_group);
  2142. if (err)
  2143. pr_err("Hugetlb: Unable to add hstate %s", h->name);
  2144. }
  2145. }
  2146. #ifdef CONFIG_NUMA
  2147. /*
  2148. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  2149. * with node devices in node_devices[] using a parallel array. The array
  2150. * index of a node device or _hstate == node id.
  2151. * This is here to avoid any static dependency of the node device driver, in
  2152. * the base kernel, on the hugetlb module.
  2153. */
  2154. struct node_hstate {
  2155. struct kobject *hugepages_kobj;
  2156. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  2157. };
  2158. static struct node_hstate node_hstates[MAX_NUMNODES];
  2159. /*
  2160. * A subset of global hstate attributes for node devices
  2161. */
  2162. static struct attribute *per_node_hstate_attrs[] = {
  2163. &nr_hugepages_attr.attr,
  2164. &free_hugepages_attr.attr,
  2165. &surplus_hugepages_attr.attr,
  2166. NULL,
  2167. };
  2168. static struct attribute_group per_node_hstate_attr_group = {
  2169. .attrs = per_node_hstate_attrs,
  2170. };
  2171. /*
  2172. * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  2173. * Returns node id via non-NULL nidp.
  2174. */
  2175. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  2176. {
  2177. int nid;
  2178. for (nid = 0; nid < nr_node_ids; nid++) {
  2179. struct node_hstate *nhs = &node_hstates[nid];
  2180. int i;
  2181. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  2182. if (nhs->hstate_kobjs[i] == kobj) {
  2183. if (nidp)
  2184. *nidp = nid;
  2185. return &hstates[i];
  2186. }
  2187. }
  2188. BUG();
  2189. return NULL;
  2190. }
  2191. /*
  2192. * Unregister hstate attributes from a single node device.
  2193. * No-op if no hstate attributes attached.
  2194. */
  2195. static void hugetlb_unregister_node(struct node *node)
  2196. {
  2197. struct hstate *h;
  2198. struct node_hstate *nhs = &node_hstates[node->dev.id];
  2199. if (!nhs->hugepages_kobj)
  2200. return; /* no hstate attributes */
  2201. for_each_hstate(h) {
  2202. int idx = hstate_index(h);
  2203. if (nhs->hstate_kobjs[idx]) {
  2204. kobject_put(nhs->hstate_kobjs[idx]);
  2205. nhs->hstate_kobjs[idx] = NULL;
  2206. }
  2207. }
  2208. kobject_put(nhs->hugepages_kobj);
  2209. nhs->hugepages_kobj = NULL;
  2210. }
  2211. /*
  2212. * Register hstate attributes for a single node device.
  2213. * No-op if attributes already registered.
  2214. */
  2215. static void hugetlb_register_node(struct node *node)
  2216. {
  2217. struct hstate *h;
  2218. struct node_hstate *nhs = &node_hstates[node->dev.id];
  2219. int err;
  2220. if (nhs->hugepages_kobj)
  2221. return; /* already allocated */
  2222. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  2223. &node->dev.kobj);
  2224. if (!nhs->hugepages_kobj)
  2225. return;
  2226. for_each_hstate(h) {
  2227. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  2228. nhs->hstate_kobjs,
  2229. &per_node_hstate_attr_group);
  2230. if (err) {
  2231. pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
  2232. h->name, node->dev.id);
  2233. hugetlb_unregister_node(node);
  2234. break;
  2235. }
  2236. }
  2237. }
  2238. /*
  2239. * hugetlb init time: register hstate attributes for all registered node
  2240. * devices of nodes that have memory. All on-line nodes should have
  2241. * registered their associated device by this time.
  2242. */
  2243. static void __init hugetlb_register_all_nodes(void)
  2244. {
  2245. int nid;
  2246. for_each_node_state(nid, N_MEMORY) {
  2247. struct node *node = node_devices[nid];
  2248. if (node->dev.id == nid)
  2249. hugetlb_register_node(node);
  2250. }
  2251. /*
  2252. * Let the node device driver know we're here so it can
  2253. * [un]register hstate attributes on node hotplug.
  2254. */
  2255. register_hugetlbfs_with_node(hugetlb_register_node,
  2256. hugetlb_unregister_node);
  2257. }
  2258. #else /* !CONFIG_NUMA */
  2259. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  2260. {
  2261. BUG();
  2262. if (nidp)
  2263. *nidp = -1;
  2264. return NULL;
  2265. }
  2266. static void hugetlb_register_all_nodes(void) { }
  2267. #endif
  2268. static int __init hugetlb_init(void)
  2269. {
  2270. int i;
  2271. if (!hugepages_supported())
  2272. return 0;
  2273. if (!size_to_hstate(default_hstate_size)) {
  2274. default_hstate_size = HPAGE_SIZE;
  2275. if (!size_to_hstate(default_hstate_size))
  2276. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  2277. }
  2278. default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
  2279. if (default_hstate_max_huge_pages) {
  2280. if (!default_hstate.max_huge_pages)
  2281. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  2282. }
  2283. hugetlb_init_hstates();
  2284. gather_bootmem_prealloc();
  2285. report_hugepages();
  2286. hugetlb_sysfs_init();
  2287. hugetlb_register_all_nodes();
  2288. hugetlb_cgroup_file_init();
  2289. #ifdef CONFIG_SMP
  2290. num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
  2291. #else
  2292. num_fault_mutexes = 1;
  2293. #endif
  2294. hugetlb_fault_mutex_table =
  2295. kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
  2296. BUG_ON(!hugetlb_fault_mutex_table);
  2297. for (i = 0; i < num_fault_mutexes; i++)
  2298. mutex_init(&hugetlb_fault_mutex_table[i]);
  2299. return 0;
  2300. }
  2301. subsys_initcall(hugetlb_init);
  2302. /* Should be called on processing a hugepagesz=... option */
  2303. void __init hugetlb_bad_size(void)
  2304. {
  2305. parsed_valid_hugepagesz = false;
  2306. }
  2307. void __init hugetlb_add_hstate(unsigned int order)
  2308. {
  2309. struct hstate *h;
  2310. unsigned long i;
  2311. if (size_to_hstate(PAGE_SIZE << order)) {
  2312. pr_warn("hugepagesz= specified twice, ignoring\n");
  2313. return;
  2314. }
  2315. BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
  2316. BUG_ON(order == 0);
  2317. h = &hstates[hugetlb_max_hstate++];
  2318. h->order = order;
  2319. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  2320. h->nr_huge_pages = 0;
  2321. h->free_huge_pages = 0;
  2322. for (i = 0; i < MAX_NUMNODES; ++i)
  2323. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  2324. INIT_LIST_HEAD(&h->hugepage_activelist);
  2325. h->next_nid_to_alloc = first_memory_node;
  2326. h->next_nid_to_free = first_memory_node;
  2327. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  2328. huge_page_size(h)/1024);
  2329. parsed_hstate = h;
  2330. }
  2331. static int __init hugetlb_nrpages_setup(char *s)
  2332. {
  2333. unsigned long *mhp;
  2334. static unsigned long *last_mhp;
  2335. if (!parsed_valid_hugepagesz) {
  2336. pr_warn("hugepages = %s preceded by "
  2337. "an unsupported hugepagesz, ignoring\n", s);
  2338. parsed_valid_hugepagesz = true;
  2339. return 1;
  2340. }
  2341. /*
  2342. * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
  2343. * so this hugepages= parameter goes to the "default hstate".
  2344. */
  2345. else if (!hugetlb_max_hstate)
  2346. mhp = &default_hstate_max_huge_pages;
  2347. else
  2348. mhp = &parsed_hstate->max_huge_pages;
  2349. if (mhp == last_mhp) {
  2350. pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
  2351. return 1;
  2352. }
  2353. if (sscanf(s, "%lu", mhp) <= 0)
  2354. *mhp = 0;
  2355. /*
  2356. * Global state is always initialized later in hugetlb_init.
  2357. * But we need to allocate >= MAX_ORDER hstates here early to still
  2358. * use the bootmem allocator.
  2359. */
  2360. if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
  2361. hugetlb_hstate_alloc_pages(parsed_hstate);
  2362. last_mhp = mhp;
  2363. return 1;
  2364. }
  2365. __setup("hugepages=", hugetlb_nrpages_setup);
  2366. static int __init hugetlb_default_setup(char *s)
  2367. {
  2368. default_hstate_size = memparse(s, &s);
  2369. return 1;
  2370. }
  2371. __setup("default_hugepagesz=", hugetlb_default_setup);
  2372. static unsigned int cpuset_mems_nr(unsigned int *array)
  2373. {
  2374. int node;
  2375. unsigned int nr = 0;
  2376. for_each_node_mask(node, cpuset_current_mems_allowed)
  2377. nr += array[node];
  2378. return nr;
  2379. }
  2380. #ifdef CONFIG_SYSCTL
  2381. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  2382. struct ctl_table *table, int write,
  2383. void __user *buffer, size_t *length, loff_t *ppos)
  2384. {
  2385. struct hstate *h = &default_hstate;
  2386. unsigned long tmp = h->max_huge_pages;
  2387. int ret;
  2388. if (!hugepages_supported())
  2389. return -EOPNOTSUPP;
  2390. table->data = &tmp;
  2391. table->maxlen = sizeof(unsigned long);
  2392. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  2393. if (ret)
  2394. goto out;
  2395. if (write)
  2396. ret = __nr_hugepages_store_common(obey_mempolicy, h,
  2397. NUMA_NO_NODE, tmp, *length);
  2398. out:
  2399. return ret;
  2400. }
  2401. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  2402. void __user *buffer, size_t *length, loff_t *ppos)
  2403. {
  2404. return hugetlb_sysctl_handler_common(false, table, write,
  2405. buffer, length, ppos);
  2406. }
  2407. #ifdef CONFIG_NUMA
  2408. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  2409. void __user *buffer, size_t *length, loff_t *ppos)
  2410. {
  2411. return hugetlb_sysctl_handler_common(true, table, write,
  2412. buffer, length, ppos);
  2413. }
  2414. #endif /* CONFIG_NUMA */
  2415. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  2416. void __user *buffer,
  2417. size_t *length, loff_t *ppos)
  2418. {
  2419. struct hstate *h = &default_hstate;
  2420. unsigned long tmp;
  2421. int ret;
  2422. if (!hugepages_supported())
  2423. return -EOPNOTSUPP;
  2424. tmp = h->nr_overcommit_huge_pages;
  2425. if (write && hstate_is_gigantic(h))
  2426. return -EINVAL;
  2427. table->data = &tmp;
  2428. table->maxlen = sizeof(unsigned long);
  2429. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  2430. if (ret)
  2431. goto out;
  2432. if (write) {
  2433. spin_lock(&hugetlb_lock);
  2434. h->nr_overcommit_huge_pages = tmp;
  2435. spin_unlock(&hugetlb_lock);
  2436. }
  2437. out:
  2438. return ret;
  2439. }
  2440. #endif /* CONFIG_SYSCTL */
  2441. void hugetlb_report_meminfo(struct seq_file *m)
  2442. {
  2443. struct hstate *h = &default_hstate;
  2444. if (!hugepages_supported())
  2445. return;
  2446. seq_printf(m,
  2447. "HugePages_Total: %5lu\n"
  2448. "HugePages_Free: %5lu\n"
  2449. "HugePages_Rsvd: %5lu\n"
  2450. "HugePages_Surp: %5lu\n"
  2451. "Hugepagesize: %8lu kB\n",
  2452. h->nr_huge_pages,
  2453. h->free_huge_pages,
  2454. h->resv_huge_pages,
  2455. h->surplus_huge_pages,
  2456. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  2457. }
  2458. int hugetlb_report_node_meminfo(int nid, char *buf)
  2459. {
  2460. struct hstate *h = &default_hstate;
  2461. if (!hugepages_supported())
  2462. return 0;
  2463. return sprintf(buf,
  2464. "Node %d HugePages_Total: %5u\n"
  2465. "Node %d HugePages_Free: %5u\n"
  2466. "Node %d HugePages_Surp: %5u\n",
  2467. nid, h->nr_huge_pages_node[nid],
  2468. nid, h->free_huge_pages_node[nid],
  2469. nid, h->surplus_huge_pages_node[nid]);
  2470. }
  2471. void hugetlb_show_meminfo(void)
  2472. {
  2473. struct hstate *h;
  2474. int nid;
  2475. if (!hugepages_supported())
  2476. return;
  2477. for_each_node_state(nid, N_MEMORY)
  2478. for_each_hstate(h)
  2479. pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
  2480. nid,
  2481. h->nr_huge_pages_node[nid],
  2482. h->free_huge_pages_node[nid],
  2483. h->surplus_huge_pages_node[nid],
  2484. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  2485. }
  2486. void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
  2487. {
  2488. seq_printf(m, "HugetlbPages:\t%8lu kB\n",
  2489. atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
  2490. }
  2491. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  2492. unsigned long hugetlb_total_pages(void)
  2493. {
  2494. struct hstate *h;
  2495. unsigned long nr_total_pages = 0;
  2496. for_each_hstate(h)
  2497. nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
  2498. return nr_total_pages;
  2499. }
  2500. static int hugetlb_acct_memory(struct hstate *h, long delta)
  2501. {
  2502. int ret = -ENOMEM;
  2503. spin_lock(&hugetlb_lock);
  2504. /*
  2505. * When cpuset is configured, it breaks the strict hugetlb page
  2506. * reservation as the accounting is done on a global variable. Such
  2507. * reservation is completely rubbish in the presence of cpuset because
  2508. * the reservation is not checked against page availability for the
  2509. * current cpuset. Application can still potentially OOM'ed by kernel
  2510. * with lack of free htlb page in cpuset that the task is in.
  2511. * Attempt to enforce strict accounting with cpuset is almost
  2512. * impossible (or too ugly) because cpuset is too fluid that
  2513. * task or memory node can be dynamically moved between cpusets.
  2514. *
  2515. * The change of semantics for shared hugetlb mapping with cpuset is
  2516. * undesirable. However, in order to preserve some of the semantics,
  2517. * we fall back to check against current free page availability as
  2518. * a best attempt and hopefully to minimize the impact of changing
  2519. * semantics that cpuset has.
  2520. */
  2521. if (delta > 0) {
  2522. if (gather_surplus_pages(h, delta) < 0)
  2523. goto out;
  2524. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  2525. return_unused_surplus_pages(h, delta);
  2526. goto out;
  2527. }
  2528. }
  2529. ret = 0;
  2530. if (delta < 0)
  2531. return_unused_surplus_pages(h, (unsigned long) -delta);
  2532. out:
  2533. spin_unlock(&hugetlb_lock);
  2534. return ret;
  2535. }
  2536. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  2537. {
  2538. struct resv_map *resv = vma_resv_map(vma);
  2539. /*
  2540. * This new VMA should share its siblings reservation map if present.
  2541. * The VMA will only ever have a valid reservation map pointer where
  2542. * it is being copied for another still existing VMA. As that VMA
  2543. * has a reference to the reservation map it cannot disappear until
  2544. * after this open call completes. It is therefore safe to take a
  2545. * new reference here without additional locking.
  2546. */
  2547. if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2548. kref_get(&resv->refs);
  2549. }
  2550. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  2551. {
  2552. struct hstate *h = hstate_vma(vma);
  2553. struct resv_map *resv = vma_resv_map(vma);
  2554. struct hugepage_subpool *spool = subpool_vma(vma);
  2555. unsigned long reserve, start, end;
  2556. long gbl_reserve;
  2557. if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2558. return;
  2559. start = vma_hugecache_offset(h, vma, vma->vm_start);
  2560. end = vma_hugecache_offset(h, vma, vma->vm_end);
  2561. reserve = (end - start) - region_count(resv, start, end);
  2562. kref_put(&resv->refs, resv_map_release);
  2563. if (reserve) {
  2564. /*
  2565. * Decrement reserve counts. The global reserve count may be
  2566. * adjusted if the subpool has a minimum size.
  2567. */
  2568. gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
  2569. hugetlb_acct_memory(h, -gbl_reserve);
  2570. }
  2571. }
  2572. /*
  2573. * We cannot handle pagefaults against hugetlb pages at all. They cause
  2574. * handle_mm_fault() to try to instantiate regular-sized pages in the
  2575. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  2576. * this far.
  2577. */
  2578. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  2579. {
  2580. BUG();
  2581. return 0;
  2582. }
  2583. const struct vm_operations_struct hugetlb_vm_ops = {
  2584. .fault = hugetlb_vm_op_fault,
  2585. .open = hugetlb_vm_op_open,
  2586. .close = hugetlb_vm_op_close,
  2587. };
  2588. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  2589. int writable)
  2590. {
  2591. pte_t entry;
  2592. if (writable) {
  2593. entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
  2594. vma->vm_page_prot)));
  2595. } else {
  2596. entry = huge_pte_wrprotect(mk_huge_pte(page,
  2597. vma->vm_page_prot));
  2598. }
  2599. entry = pte_mkyoung(entry);
  2600. entry = pte_mkhuge(entry);
  2601. entry = arch_make_huge_pte(entry, vma, page, writable);
  2602. return entry;
  2603. }
  2604. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  2605. unsigned long address, pte_t *ptep)
  2606. {
  2607. pte_t entry;
  2608. entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
  2609. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
  2610. update_mmu_cache(vma, address, ptep);
  2611. }
  2612. static int is_hugetlb_entry_migration(pte_t pte)
  2613. {
  2614. swp_entry_t swp;
  2615. if (huge_pte_none(pte) || pte_present(pte))
  2616. return 0;
  2617. swp = pte_to_swp_entry(pte);
  2618. if (non_swap_entry(swp) && is_migration_entry(swp))
  2619. return 1;
  2620. else
  2621. return 0;
  2622. }
  2623. static int is_hugetlb_entry_hwpoisoned(pte_t pte)
  2624. {
  2625. swp_entry_t swp;
  2626. if (huge_pte_none(pte) || pte_present(pte))
  2627. return 0;
  2628. swp = pte_to_swp_entry(pte);
  2629. if (non_swap_entry(swp) && is_hwpoison_entry(swp))
  2630. return 1;
  2631. else
  2632. return 0;
  2633. }
  2634. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  2635. struct vm_area_struct *vma)
  2636. {
  2637. pte_t *src_pte, *dst_pte, entry;
  2638. struct page *ptepage;
  2639. unsigned long addr;
  2640. int cow;
  2641. struct hstate *h = hstate_vma(vma);
  2642. unsigned long sz = huge_page_size(h);
  2643. unsigned long mmun_start; /* For mmu_notifiers */
  2644. unsigned long mmun_end; /* For mmu_notifiers */
  2645. int ret = 0;
  2646. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  2647. mmun_start = vma->vm_start;
  2648. mmun_end = vma->vm_end;
  2649. if (cow)
  2650. mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
  2651. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  2652. spinlock_t *src_ptl, *dst_ptl;
  2653. src_pte = huge_pte_offset(src, addr);
  2654. if (!src_pte)
  2655. continue;
  2656. dst_pte = huge_pte_alloc(dst, addr, sz);
  2657. if (!dst_pte) {
  2658. ret = -ENOMEM;
  2659. break;
  2660. }
  2661. /* If the pagetables are shared don't copy or take references */
  2662. if (dst_pte == src_pte)
  2663. continue;
  2664. dst_ptl = huge_pte_lock(h, dst, dst_pte);
  2665. src_ptl = huge_pte_lockptr(h, src, src_pte);
  2666. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  2667. entry = huge_ptep_get(src_pte);
  2668. if (huge_pte_none(entry)) { /* skip none entry */
  2669. ;
  2670. } else if (unlikely(is_hugetlb_entry_migration(entry) ||
  2671. is_hugetlb_entry_hwpoisoned(entry))) {
  2672. swp_entry_t swp_entry = pte_to_swp_entry(entry);
  2673. if (is_write_migration_entry(swp_entry) && cow) {
  2674. /*
  2675. * COW mappings require pages in both
  2676. * parent and child to be set to read.
  2677. */
  2678. make_migration_entry_read(&swp_entry);
  2679. entry = swp_entry_to_pte(swp_entry);
  2680. set_huge_pte_at(src, addr, src_pte, entry);
  2681. }
  2682. set_huge_pte_at(dst, addr, dst_pte, entry);
  2683. } else {
  2684. if (cow) {
  2685. huge_ptep_set_wrprotect(src, addr, src_pte);
  2686. mmu_notifier_invalidate_range(src, mmun_start,
  2687. mmun_end);
  2688. }
  2689. entry = huge_ptep_get(src_pte);
  2690. ptepage = pte_page(entry);
  2691. get_page(ptepage);
  2692. page_dup_rmap(ptepage, true);
  2693. set_huge_pte_at(dst, addr, dst_pte, entry);
  2694. hugetlb_count_add(pages_per_huge_page(h), dst);
  2695. }
  2696. spin_unlock(src_ptl);
  2697. spin_unlock(dst_ptl);
  2698. }
  2699. if (cow)
  2700. mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
  2701. return ret;
  2702. }
  2703. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  2704. unsigned long start, unsigned long end,
  2705. struct page *ref_page)
  2706. {
  2707. int force_flush = 0;
  2708. struct mm_struct *mm = vma->vm_mm;
  2709. unsigned long address;
  2710. pte_t *ptep;
  2711. pte_t pte;
  2712. spinlock_t *ptl;
  2713. struct page *page;
  2714. struct hstate *h = hstate_vma(vma);
  2715. unsigned long sz = huge_page_size(h);
  2716. const unsigned long mmun_start = start; /* For mmu_notifiers */
  2717. const unsigned long mmun_end = end; /* For mmu_notifiers */
  2718. WARN_ON(!is_vm_hugetlb_page(vma));
  2719. BUG_ON(start & ~huge_page_mask(h));
  2720. BUG_ON(end & ~huge_page_mask(h));
  2721. tlb_start_vma(tlb, vma);
  2722. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2723. address = start;
  2724. again:
  2725. for (; address < end; address += sz) {
  2726. ptep = huge_pte_offset(mm, address);
  2727. if (!ptep)
  2728. continue;
  2729. ptl = huge_pte_lock(h, mm, ptep);
  2730. if (huge_pmd_unshare(mm, &address, ptep))
  2731. goto unlock;
  2732. pte = huge_ptep_get(ptep);
  2733. if (huge_pte_none(pte))
  2734. goto unlock;
  2735. /*
  2736. * Migrating hugepage or HWPoisoned hugepage is already
  2737. * unmapped and its refcount is dropped, so just clear pte here.
  2738. */
  2739. if (unlikely(!pte_present(pte))) {
  2740. huge_pte_clear(mm, address, ptep);
  2741. goto unlock;
  2742. }
  2743. page = pte_page(pte);
  2744. /*
  2745. * If a reference page is supplied, it is because a specific
  2746. * page is being unmapped, not a range. Ensure the page we
  2747. * are about to unmap is the actual page of interest.
  2748. */
  2749. if (ref_page) {
  2750. if (page != ref_page)
  2751. goto unlock;
  2752. /*
  2753. * Mark the VMA as having unmapped its page so that
  2754. * future faults in this VMA will fail rather than
  2755. * looking like data was lost
  2756. */
  2757. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  2758. }
  2759. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2760. tlb_remove_tlb_entry(tlb, ptep, address);
  2761. if (huge_pte_dirty(pte))
  2762. set_page_dirty(page);
  2763. hugetlb_count_sub(pages_per_huge_page(h), mm);
  2764. page_remove_rmap(page, true);
  2765. force_flush = !__tlb_remove_page(tlb, page);
  2766. if (force_flush) {
  2767. address += sz;
  2768. spin_unlock(ptl);
  2769. break;
  2770. }
  2771. /* Bail out after unmapping reference page if supplied */
  2772. if (ref_page) {
  2773. spin_unlock(ptl);
  2774. break;
  2775. }
  2776. unlock:
  2777. spin_unlock(ptl);
  2778. }
  2779. /*
  2780. * mmu_gather ran out of room to batch pages, we break out of
  2781. * the PTE lock to avoid doing the potential expensive TLB invalidate
  2782. * and page-free while holding it.
  2783. */
  2784. if (force_flush) {
  2785. force_flush = 0;
  2786. tlb_flush_mmu(tlb);
  2787. if (address < end && !ref_page)
  2788. goto again;
  2789. }
  2790. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2791. tlb_end_vma(tlb, vma);
  2792. }
  2793. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  2794. struct vm_area_struct *vma, unsigned long start,
  2795. unsigned long end, struct page *ref_page)
  2796. {
  2797. __unmap_hugepage_range(tlb, vma, start, end, ref_page);
  2798. /*
  2799. * Clear this flag so that x86's huge_pmd_share page_table_shareable
  2800. * test will fail on a vma being torn down, and not grab a page table
  2801. * on its way out. We're lucky that the flag has such an appropriate
  2802. * name, and can in fact be safely cleared here. We could clear it
  2803. * before the __unmap_hugepage_range above, but all that's necessary
  2804. * is to clear it before releasing the i_mmap_rwsem. This works
  2805. * because in the context this is called, the VMA is about to be
  2806. * destroyed and the i_mmap_rwsem is held.
  2807. */
  2808. vma->vm_flags &= ~VM_MAYSHARE;
  2809. }
  2810. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  2811. unsigned long end, struct page *ref_page)
  2812. {
  2813. struct mm_struct *mm;
  2814. struct mmu_gather tlb;
  2815. mm = vma->vm_mm;
  2816. tlb_gather_mmu(&tlb, mm, start, end);
  2817. __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
  2818. tlb_finish_mmu(&tlb, start, end);
  2819. }
  2820. /*
  2821. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  2822. * mappping it owns the reserve page for. The intention is to unmap the page
  2823. * from other VMAs and let the children be SIGKILLed if they are faulting the
  2824. * same region.
  2825. */
  2826. static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  2827. struct page *page, unsigned long address)
  2828. {
  2829. struct hstate *h = hstate_vma(vma);
  2830. struct vm_area_struct *iter_vma;
  2831. struct address_space *mapping;
  2832. pgoff_t pgoff;
  2833. /*
  2834. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  2835. * from page cache lookup which is in HPAGE_SIZE units.
  2836. */
  2837. address = address & huge_page_mask(h);
  2838. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
  2839. vma->vm_pgoff;
  2840. mapping = file_inode(vma->vm_file)->i_mapping;
  2841. /*
  2842. * Take the mapping lock for the duration of the table walk. As
  2843. * this mapping should be shared between all the VMAs,
  2844. * __unmap_hugepage_range() is called as the lock is already held
  2845. */
  2846. i_mmap_lock_write(mapping);
  2847. vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
  2848. /* Do not unmap the current VMA */
  2849. if (iter_vma == vma)
  2850. continue;
  2851. /*
  2852. * Shared VMAs have their own reserves and do not affect
  2853. * MAP_PRIVATE accounting but it is possible that a shared
  2854. * VMA is using the same page so check and skip such VMAs.
  2855. */
  2856. if (iter_vma->vm_flags & VM_MAYSHARE)
  2857. continue;
  2858. /*
  2859. * Unmap the page from other VMAs without their own reserves.
  2860. * They get marked to be SIGKILLed if they fault in these
  2861. * areas. This is because a future no-page fault on this VMA
  2862. * could insert a zeroed page instead of the data existing
  2863. * from the time of fork. This would look like data corruption
  2864. */
  2865. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  2866. unmap_hugepage_range(iter_vma, address,
  2867. address + huge_page_size(h), page);
  2868. }
  2869. i_mmap_unlock_write(mapping);
  2870. }
  2871. /*
  2872. * Hugetlb_cow() should be called with page lock of the original hugepage held.
  2873. * Called with hugetlb_instantiation_mutex held and pte_page locked so we
  2874. * cannot race with other handlers or page migration.
  2875. * Keep the pte_same checks anyway to make transition from the mutex easier.
  2876. */
  2877. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  2878. unsigned long address, pte_t *ptep, pte_t pte,
  2879. struct page *pagecache_page, spinlock_t *ptl)
  2880. {
  2881. struct hstate *h = hstate_vma(vma);
  2882. struct page *old_page, *new_page;
  2883. int ret = 0, outside_reserve = 0;
  2884. unsigned long mmun_start; /* For mmu_notifiers */
  2885. unsigned long mmun_end; /* For mmu_notifiers */
  2886. old_page = pte_page(pte);
  2887. retry_avoidcopy:
  2888. /* If no-one else is actually using this page, avoid the copy
  2889. * and just make the page writable */
  2890. if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
  2891. page_move_anon_rmap(old_page, vma, address);
  2892. set_huge_ptep_writable(vma, address, ptep);
  2893. return 0;
  2894. }
  2895. /*
  2896. * If the process that created a MAP_PRIVATE mapping is about to
  2897. * perform a COW due to a shared page count, attempt to satisfy
  2898. * the allocation without using the existing reserves. The pagecache
  2899. * page is used to determine if the reserve at this address was
  2900. * consumed or not. If reserves were used, a partial faulted mapping
  2901. * at the time of fork() could consume its reserves on COW instead
  2902. * of the full address range.
  2903. */
  2904. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  2905. old_page != pagecache_page)
  2906. outside_reserve = 1;
  2907. get_page(old_page);
  2908. /*
  2909. * Drop page table lock as buddy allocator may be called. It will
  2910. * be acquired again before returning to the caller, as expected.
  2911. */
  2912. spin_unlock(ptl);
  2913. new_page = alloc_huge_page(vma, address, outside_reserve);
  2914. if (IS_ERR(new_page)) {
  2915. /*
  2916. * If a process owning a MAP_PRIVATE mapping fails to COW,
  2917. * it is due to references held by a child and an insufficient
  2918. * huge page pool. To guarantee the original mappers
  2919. * reliability, unmap the page from child processes. The child
  2920. * may get SIGKILLed if it later faults.
  2921. */
  2922. if (outside_reserve) {
  2923. put_page(old_page);
  2924. BUG_ON(huge_pte_none(pte));
  2925. unmap_ref_private(mm, vma, old_page, address);
  2926. BUG_ON(huge_pte_none(pte));
  2927. spin_lock(ptl);
  2928. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2929. if (likely(ptep &&
  2930. pte_same(huge_ptep_get(ptep), pte)))
  2931. goto retry_avoidcopy;
  2932. /*
  2933. * race occurs while re-acquiring page table
  2934. * lock, and our job is done.
  2935. */
  2936. return 0;
  2937. }
  2938. ret = (PTR_ERR(new_page) == -ENOMEM) ?
  2939. VM_FAULT_OOM : VM_FAULT_SIGBUS;
  2940. goto out_release_old;
  2941. }
  2942. /*
  2943. * When the original hugepage is shared one, it does not have
  2944. * anon_vma prepared.
  2945. */
  2946. if (unlikely(anon_vma_prepare(vma))) {
  2947. ret = VM_FAULT_OOM;
  2948. goto out_release_all;
  2949. }
  2950. copy_user_huge_page(new_page, old_page, address, vma,
  2951. pages_per_huge_page(h));
  2952. __SetPageUptodate(new_page);
  2953. set_page_huge_active(new_page);
  2954. mmun_start = address & huge_page_mask(h);
  2955. mmun_end = mmun_start + huge_page_size(h);
  2956. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2957. /*
  2958. * Retake the page table lock to check for racing updates
  2959. * before the page tables are altered
  2960. */
  2961. spin_lock(ptl);
  2962. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2963. if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
  2964. ClearPagePrivate(new_page);
  2965. /* Break COW */
  2966. huge_ptep_clear_flush(vma, address, ptep);
  2967. mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
  2968. set_huge_pte_at(mm, address, ptep,
  2969. make_huge_pte(vma, new_page, 1));
  2970. page_remove_rmap(old_page, true);
  2971. hugepage_add_new_anon_rmap(new_page, vma, address);
  2972. /* Make the old page be freed below */
  2973. new_page = old_page;
  2974. }
  2975. spin_unlock(ptl);
  2976. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2977. out_release_all:
  2978. put_page(new_page);
  2979. out_release_old:
  2980. put_page(old_page);
  2981. spin_lock(ptl); /* Caller expects lock to be held */
  2982. return ret;
  2983. }
  2984. /* Return the pagecache page at a given address within a VMA */
  2985. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  2986. struct vm_area_struct *vma, unsigned long address)
  2987. {
  2988. struct address_space *mapping;
  2989. pgoff_t idx;
  2990. mapping = vma->vm_file->f_mapping;
  2991. idx = vma_hugecache_offset(h, vma, address);
  2992. return find_lock_page(mapping, idx);
  2993. }
  2994. /*
  2995. * Return whether there is a pagecache page to back given address within VMA.
  2996. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  2997. */
  2998. static bool hugetlbfs_pagecache_present(struct hstate *h,
  2999. struct vm_area_struct *vma, unsigned long address)
  3000. {
  3001. struct address_space *mapping;
  3002. pgoff_t idx;
  3003. struct page *page;
  3004. mapping = vma->vm_file->f_mapping;
  3005. idx = vma_hugecache_offset(h, vma, address);
  3006. page = find_get_page(mapping, idx);
  3007. if (page)
  3008. put_page(page);
  3009. return page != NULL;
  3010. }
  3011. int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
  3012. pgoff_t idx)
  3013. {
  3014. struct inode *inode = mapping->host;
  3015. struct hstate *h = hstate_inode(inode);
  3016. int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  3017. if (err)
  3018. return err;
  3019. ClearPagePrivate(page);
  3020. spin_lock(&inode->i_lock);
  3021. inode->i_blocks += blocks_per_huge_page(h);
  3022. spin_unlock(&inode->i_lock);
  3023. return 0;
  3024. }
  3025. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  3026. struct address_space *mapping, pgoff_t idx,
  3027. unsigned long address, pte_t *ptep, unsigned int flags)
  3028. {
  3029. struct hstate *h = hstate_vma(vma);
  3030. int ret = VM_FAULT_SIGBUS;
  3031. int anon_rmap = 0;
  3032. unsigned long size;
  3033. struct page *page;
  3034. pte_t new_pte;
  3035. spinlock_t *ptl;
  3036. /*
  3037. * Currently, we are forced to kill the process in the event the
  3038. * original mapper has unmapped pages from the child due to a failed
  3039. * COW. Warn that such a situation has occurred as it may not be obvious
  3040. */
  3041. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  3042. pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
  3043. current->pid);
  3044. return ret;
  3045. }
  3046. /*
  3047. * Use page lock to guard against racing truncation
  3048. * before we get page_table_lock.
  3049. */
  3050. retry:
  3051. page = find_lock_page(mapping, idx);
  3052. if (!page) {
  3053. size = i_size_read(mapping->host) >> huge_page_shift(h);
  3054. if (idx >= size)
  3055. goto out;
  3056. page = alloc_huge_page(vma, address, 0);
  3057. if (IS_ERR(page)) {
  3058. ret = PTR_ERR(page);
  3059. if (ret == -ENOMEM)
  3060. ret = VM_FAULT_OOM;
  3061. else
  3062. ret = VM_FAULT_SIGBUS;
  3063. goto out;
  3064. }
  3065. clear_huge_page(page, address, pages_per_huge_page(h));
  3066. __SetPageUptodate(page);
  3067. set_page_huge_active(page);
  3068. if (vma->vm_flags & VM_MAYSHARE) {
  3069. int err = huge_add_to_page_cache(page, mapping, idx);
  3070. if (err) {
  3071. put_page(page);
  3072. if (err == -EEXIST)
  3073. goto retry;
  3074. goto out;
  3075. }
  3076. } else {
  3077. lock_page(page);
  3078. if (unlikely(anon_vma_prepare(vma))) {
  3079. ret = VM_FAULT_OOM;
  3080. goto backout_unlocked;
  3081. }
  3082. anon_rmap = 1;
  3083. }
  3084. } else {
  3085. /*
  3086. * If memory error occurs between mmap() and fault, some process
  3087. * don't have hwpoisoned swap entry for errored virtual address.
  3088. * So we need to block hugepage fault by PG_hwpoison bit check.
  3089. */
  3090. if (unlikely(PageHWPoison(page))) {
  3091. ret = VM_FAULT_HWPOISON |
  3092. VM_FAULT_SET_HINDEX(hstate_index(h));
  3093. goto backout_unlocked;
  3094. }
  3095. }
  3096. /*
  3097. * If we are going to COW a private mapping later, we examine the
  3098. * pending reservations for this page now. This will ensure that
  3099. * any allocations necessary to record that reservation occur outside
  3100. * the spinlock.
  3101. */
  3102. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  3103. if (vma_needs_reservation(h, vma, address) < 0) {
  3104. ret = VM_FAULT_OOM;
  3105. goto backout_unlocked;
  3106. }
  3107. /* Just decrements count, does not deallocate */
  3108. vma_end_reservation(h, vma, address);
  3109. }
  3110. ptl = huge_pte_lockptr(h, mm, ptep);
  3111. spin_lock(ptl);
  3112. size = i_size_read(mapping->host) >> huge_page_shift(h);
  3113. if (idx >= size)
  3114. goto backout;
  3115. ret = 0;
  3116. if (!huge_pte_none(huge_ptep_get(ptep)))
  3117. goto backout;
  3118. if (anon_rmap) {
  3119. ClearPagePrivate(page);
  3120. hugepage_add_new_anon_rmap(page, vma, address);
  3121. } else
  3122. page_dup_rmap(page, true);
  3123. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  3124. && (vma->vm_flags & VM_SHARED)));
  3125. set_huge_pte_at(mm, address, ptep, new_pte);
  3126. hugetlb_count_add(pages_per_huge_page(h), mm);
  3127. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  3128. /* Optimization, do the COW without a second fault */
  3129. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
  3130. }
  3131. spin_unlock(ptl);
  3132. unlock_page(page);
  3133. out:
  3134. return ret;
  3135. backout:
  3136. spin_unlock(ptl);
  3137. backout_unlocked:
  3138. unlock_page(page);
  3139. put_page(page);
  3140. goto out;
  3141. }
  3142. #ifdef CONFIG_SMP
  3143. u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  3144. struct vm_area_struct *vma,
  3145. struct address_space *mapping,
  3146. pgoff_t idx, unsigned long address)
  3147. {
  3148. unsigned long key[2];
  3149. u32 hash;
  3150. if (vma->vm_flags & VM_SHARED) {
  3151. key[0] = (unsigned long) mapping;
  3152. key[1] = idx;
  3153. } else {
  3154. key[0] = (unsigned long) mm;
  3155. key[1] = address >> huge_page_shift(h);
  3156. }
  3157. hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
  3158. return hash & (num_fault_mutexes - 1);
  3159. }
  3160. #else
  3161. /*
  3162. * For uniprocesor systems we always use a single mutex, so just
  3163. * return 0 and avoid the hashing overhead.
  3164. */
  3165. u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  3166. struct vm_area_struct *vma,
  3167. struct address_space *mapping,
  3168. pgoff_t idx, unsigned long address)
  3169. {
  3170. return 0;
  3171. }
  3172. #endif
  3173. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  3174. unsigned long address, unsigned int flags)
  3175. {
  3176. pte_t *ptep, entry;
  3177. spinlock_t *ptl;
  3178. int ret;
  3179. u32 hash;
  3180. pgoff_t idx;
  3181. struct page *page = NULL;
  3182. struct page *pagecache_page = NULL;
  3183. struct hstate *h = hstate_vma(vma);
  3184. struct address_space *mapping;
  3185. int need_wait_lock = 0;
  3186. address &= huge_page_mask(h);
  3187. ptep = huge_pte_offset(mm, address);
  3188. if (ptep) {
  3189. entry = huge_ptep_get(ptep);
  3190. if (unlikely(is_hugetlb_entry_migration(entry))) {
  3191. migration_entry_wait_huge(vma, mm, ptep);
  3192. return 0;
  3193. } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
  3194. return VM_FAULT_HWPOISON_LARGE |
  3195. VM_FAULT_SET_HINDEX(hstate_index(h));
  3196. } else {
  3197. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  3198. if (!ptep)
  3199. return VM_FAULT_OOM;
  3200. }
  3201. mapping = vma->vm_file->f_mapping;
  3202. idx = vma_hugecache_offset(h, vma, address);
  3203. /*
  3204. * Serialize hugepage allocation and instantiation, so that we don't
  3205. * get spurious allocation failures if two CPUs race to instantiate
  3206. * the same page in the page cache.
  3207. */
  3208. hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
  3209. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  3210. entry = huge_ptep_get(ptep);
  3211. if (huge_pte_none(entry)) {
  3212. ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
  3213. goto out_mutex;
  3214. }
  3215. ret = 0;
  3216. /*
  3217. * entry could be a migration/hwpoison entry at this point, so this
  3218. * check prevents the kernel from going below assuming that we have
  3219. * a active hugepage in pagecache. This goto expects the 2nd page fault,
  3220. * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
  3221. * handle it.
  3222. */
  3223. if (!pte_present(entry))
  3224. goto out_mutex;
  3225. /*
  3226. * If we are going to COW the mapping later, we examine the pending
  3227. * reservations for this page now. This will ensure that any
  3228. * allocations necessary to record that reservation occur outside the
  3229. * spinlock. For private mappings, we also lookup the pagecache
  3230. * page now as it is used to determine if a reservation has been
  3231. * consumed.
  3232. */
  3233. if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
  3234. if (vma_needs_reservation(h, vma, address) < 0) {
  3235. ret = VM_FAULT_OOM;
  3236. goto out_mutex;
  3237. }
  3238. /* Just decrements count, does not deallocate */
  3239. vma_end_reservation(h, vma, address);
  3240. if (!(vma->vm_flags & VM_MAYSHARE))
  3241. pagecache_page = hugetlbfs_pagecache_page(h,
  3242. vma, address);
  3243. }
  3244. ptl = huge_pte_lock(h, mm, ptep);
  3245. /* Check for a racing update before calling hugetlb_cow */
  3246. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  3247. goto out_ptl;
  3248. /*
  3249. * hugetlb_cow() requires page locks of pte_page(entry) and
  3250. * pagecache_page, so here we need take the former one
  3251. * when page != pagecache_page or !pagecache_page.
  3252. */
  3253. page = pte_page(entry);
  3254. if (page != pagecache_page)
  3255. if (!trylock_page(page)) {
  3256. need_wait_lock = 1;
  3257. goto out_ptl;
  3258. }
  3259. get_page(page);
  3260. if (flags & FAULT_FLAG_WRITE) {
  3261. if (!huge_pte_write(entry)) {
  3262. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  3263. pagecache_page, ptl);
  3264. goto out_put_page;
  3265. }
  3266. entry = huge_pte_mkdirty(entry);
  3267. }
  3268. entry = pte_mkyoung(entry);
  3269. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  3270. flags & FAULT_FLAG_WRITE))
  3271. update_mmu_cache(vma, address, ptep);
  3272. out_put_page:
  3273. if (page != pagecache_page)
  3274. unlock_page(page);
  3275. put_page(page);
  3276. out_ptl:
  3277. spin_unlock(ptl);
  3278. if (pagecache_page) {
  3279. unlock_page(pagecache_page);
  3280. put_page(pagecache_page);
  3281. }
  3282. out_mutex:
  3283. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  3284. /*
  3285. * Generally it's safe to hold refcount during waiting page lock. But
  3286. * here we just wait to defer the next page fault to avoid busy loop and
  3287. * the page is not used after unlocked before returning from the current
  3288. * page fault. So we are safe from accessing freed page, even if we wait
  3289. * here without taking refcount.
  3290. */
  3291. if (need_wait_lock)
  3292. wait_on_page_locked(page);
  3293. return ret;
  3294. }
  3295. long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  3296. struct page **pages, struct vm_area_struct **vmas,
  3297. unsigned long *position, unsigned long *nr_pages,
  3298. long i, unsigned int flags)
  3299. {
  3300. unsigned long pfn_offset;
  3301. unsigned long vaddr = *position;
  3302. unsigned long remainder = *nr_pages;
  3303. struct hstate *h = hstate_vma(vma);
  3304. while (vaddr < vma->vm_end && remainder) {
  3305. pte_t *pte;
  3306. spinlock_t *ptl = NULL;
  3307. int absent;
  3308. struct page *page;
  3309. /*
  3310. * If we have a pending SIGKILL, don't keep faulting pages and
  3311. * potentially allocating memory.
  3312. */
  3313. if (unlikely(fatal_signal_pending(current))) {
  3314. remainder = 0;
  3315. break;
  3316. }
  3317. /*
  3318. * Some archs (sparc64, sh*) have multiple pte_ts to
  3319. * each hugepage. We have to make sure we get the
  3320. * first, for the page indexing below to work.
  3321. *
  3322. * Note that page table lock is not held when pte is null.
  3323. */
  3324. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  3325. if (pte)
  3326. ptl = huge_pte_lock(h, mm, pte);
  3327. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  3328. /*
  3329. * When coredumping, it suits get_dump_page if we just return
  3330. * an error where there's an empty slot with no huge pagecache
  3331. * to back it. This way, we avoid allocating a hugepage, and
  3332. * the sparse dumpfile avoids allocating disk blocks, but its
  3333. * huge holes still show up with zeroes where they need to be.
  3334. */
  3335. if (absent && (flags & FOLL_DUMP) &&
  3336. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  3337. if (pte)
  3338. spin_unlock(ptl);
  3339. remainder = 0;
  3340. break;
  3341. }
  3342. /*
  3343. * We need call hugetlb_fault for both hugepages under migration
  3344. * (in which case hugetlb_fault waits for the migration,) and
  3345. * hwpoisoned hugepages (in which case we need to prevent the
  3346. * caller from accessing to them.) In order to do this, we use
  3347. * here is_swap_pte instead of is_hugetlb_entry_migration and
  3348. * is_hugetlb_entry_hwpoisoned. This is because it simply covers
  3349. * both cases, and because we can't follow correct pages
  3350. * directly from any kind of swap entries.
  3351. */
  3352. if (absent || is_swap_pte(huge_ptep_get(pte)) ||
  3353. ((flags & FOLL_WRITE) &&
  3354. !huge_pte_write(huge_ptep_get(pte)))) {
  3355. int ret;
  3356. if (pte)
  3357. spin_unlock(ptl);
  3358. ret = hugetlb_fault(mm, vma, vaddr,
  3359. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  3360. if (!(ret & VM_FAULT_ERROR))
  3361. continue;
  3362. remainder = 0;
  3363. break;
  3364. }
  3365. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  3366. page = pte_page(huge_ptep_get(pte));
  3367. same_page:
  3368. if (pages) {
  3369. pages[i] = mem_map_offset(page, pfn_offset);
  3370. get_page(pages[i]);
  3371. }
  3372. if (vmas)
  3373. vmas[i] = vma;
  3374. vaddr += PAGE_SIZE;
  3375. ++pfn_offset;
  3376. --remainder;
  3377. ++i;
  3378. if (vaddr < vma->vm_end && remainder &&
  3379. pfn_offset < pages_per_huge_page(h)) {
  3380. /*
  3381. * We use pfn_offset to avoid touching the pageframes
  3382. * of this compound page.
  3383. */
  3384. goto same_page;
  3385. }
  3386. spin_unlock(ptl);
  3387. }
  3388. *nr_pages = remainder;
  3389. *position = vaddr;
  3390. return i ? i : -EFAULT;
  3391. }
  3392. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  3393. unsigned long address, unsigned long end, pgprot_t newprot)
  3394. {
  3395. struct mm_struct *mm = vma->vm_mm;
  3396. unsigned long start = address;
  3397. pte_t *ptep;
  3398. pte_t pte;
  3399. struct hstate *h = hstate_vma(vma);
  3400. unsigned long pages = 0;
  3401. BUG_ON(address >= end);
  3402. flush_cache_range(vma, address, end);
  3403. mmu_notifier_invalidate_range_start(mm, start, end);
  3404. i_mmap_lock_write(vma->vm_file->f_mapping);
  3405. for (; address < end; address += huge_page_size(h)) {
  3406. spinlock_t *ptl;
  3407. ptep = huge_pte_offset(mm, address);
  3408. if (!ptep)
  3409. continue;
  3410. ptl = huge_pte_lock(h, mm, ptep);
  3411. if (huge_pmd_unshare(mm, &address, ptep)) {
  3412. pages++;
  3413. spin_unlock(ptl);
  3414. continue;
  3415. }
  3416. pte = huge_ptep_get(ptep);
  3417. if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
  3418. spin_unlock(ptl);
  3419. continue;
  3420. }
  3421. if (unlikely(is_hugetlb_entry_migration(pte))) {
  3422. swp_entry_t entry = pte_to_swp_entry(pte);
  3423. if (is_write_migration_entry(entry)) {
  3424. pte_t newpte;
  3425. make_migration_entry_read(&entry);
  3426. newpte = swp_entry_to_pte(entry);
  3427. set_huge_pte_at(mm, address, ptep, newpte);
  3428. pages++;
  3429. }
  3430. spin_unlock(ptl);
  3431. continue;
  3432. }
  3433. if (!huge_pte_none(pte)) {
  3434. pte = huge_ptep_get_and_clear(mm, address, ptep);
  3435. pte = pte_mkhuge(huge_pte_modify(pte, newprot));
  3436. pte = arch_make_huge_pte(pte, vma, NULL, 0);
  3437. set_huge_pte_at(mm, address, ptep, pte);
  3438. pages++;
  3439. }
  3440. spin_unlock(ptl);
  3441. }
  3442. /*
  3443. * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
  3444. * may have cleared our pud entry and done put_page on the page table:
  3445. * once we release i_mmap_rwsem, another task can do the final put_page
  3446. * and that page table be reused and filled with junk.
  3447. */
  3448. flush_tlb_range(vma, start, end);
  3449. mmu_notifier_invalidate_range(mm, start, end);
  3450. i_mmap_unlock_write(vma->vm_file->f_mapping);
  3451. mmu_notifier_invalidate_range_end(mm, start, end);
  3452. return pages << h->order;
  3453. }
  3454. int hugetlb_reserve_pages(struct inode *inode,
  3455. long from, long to,
  3456. struct vm_area_struct *vma,
  3457. vm_flags_t vm_flags)
  3458. {
  3459. long ret, chg;
  3460. struct hstate *h = hstate_inode(inode);
  3461. struct hugepage_subpool *spool = subpool_inode(inode);
  3462. struct resv_map *resv_map;
  3463. long gbl_reserve;
  3464. /*
  3465. * Only apply hugepage reservation if asked. At fault time, an
  3466. * attempt will be made for VM_NORESERVE to allocate a page
  3467. * without using reserves
  3468. */
  3469. if (vm_flags & VM_NORESERVE)
  3470. return 0;
  3471. /*
  3472. * Shared mappings base their reservation on the number of pages that
  3473. * are already allocated on behalf of the file. Private mappings need
  3474. * to reserve the full area even if read-only as mprotect() may be
  3475. * called to make the mapping read-write. Assume !vma is a shm mapping
  3476. */
  3477. if (!vma || vma->vm_flags & VM_MAYSHARE) {
  3478. resv_map = inode_resv_map(inode);
  3479. chg = region_chg(resv_map, from, to);
  3480. } else {
  3481. resv_map = resv_map_alloc();
  3482. if (!resv_map)
  3483. return -ENOMEM;
  3484. chg = to - from;
  3485. set_vma_resv_map(vma, resv_map);
  3486. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  3487. }
  3488. if (chg < 0) {
  3489. ret = chg;
  3490. goto out_err;
  3491. }
  3492. /*
  3493. * There must be enough pages in the subpool for the mapping. If
  3494. * the subpool has a minimum size, there may be some global
  3495. * reservations already in place (gbl_reserve).
  3496. */
  3497. gbl_reserve = hugepage_subpool_get_pages(spool, chg);
  3498. if (gbl_reserve < 0) {
  3499. ret = -ENOSPC;
  3500. goto out_err;
  3501. }
  3502. /*
  3503. * Check enough hugepages are available for the reservation.
  3504. * Hand the pages back to the subpool if there are not
  3505. */
  3506. ret = hugetlb_acct_memory(h, gbl_reserve);
  3507. if (ret < 0) {
  3508. /* put back original number of pages, chg */
  3509. (void)hugepage_subpool_put_pages(spool, chg);
  3510. goto out_err;
  3511. }
  3512. /*
  3513. * Account for the reservations made. Shared mappings record regions
  3514. * that have reservations as they are shared by multiple VMAs.
  3515. * When the last VMA disappears, the region map says how much
  3516. * the reservation was and the page cache tells how much of
  3517. * the reservation was consumed. Private mappings are per-VMA and
  3518. * only the consumed reservations are tracked. When the VMA
  3519. * disappears, the original reservation is the VMA size and the
  3520. * consumed reservations are stored in the map. Hence, nothing
  3521. * else has to be done for private mappings here
  3522. */
  3523. if (!vma || vma->vm_flags & VM_MAYSHARE) {
  3524. long add = region_add(resv_map, from, to);
  3525. if (unlikely(chg > add)) {
  3526. /*
  3527. * pages in this range were added to the reserve
  3528. * map between region_chg and region_add. This
  3529. * indicates a race with alloc_huge_page. Adjust
  3530. * the subpool and reserve counts modified above
  3531. * based on the difference.
  3532. */
  3533. long rsv_adjust;
  3534. rsv_adjust = hugepage_subpool_put_pages(spool,
  3535. chg - add);
  3536. hugetlb_acct_memory(h, -rsv_adjust);
  3537. }
  3538. }
  3539. return 0;
  3540. out_err:
  3541. if (!vma || vma->vm_flags & VM_MAYSHARE)
  3542. region_abort(resv_map, from, to);
  3543. if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  3544. kref_put(&resv_map->refs, resv_map_release);
  3545. return ret;
  3546. }
  3547. long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
  3548. long freed)
  3549. {
  3550. struct hstate *h = hstate_inode(inode);
  3551. struct resv_map *resv_map = inode_resv_map(inode);
  3552. long chg = 0;
  3553. struct hugepage_subpool *spool = subpool_inode(inode);
  3554. long gbl_reserve;
  3555. if (resv_map) {
  3556. chg = region_del(resv_map, start, end);
  3557. /*
  3558. * region_del() can fail in the rare case where a region
  3559. * must be split and another region descriptor can not be
  3560. * allocated. If end == LONG_MAX, it will not fail.
  3561. */
  3562. if (chg < 0)
  3563. return chg;
  3564. }
  3565. spin_lock(&inode->i_lock);
  3566. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  3567. spin_unlock(&inode->i_lock);
  3568. /*
  3569. * If the subpool has a minimum size, the number of global
  3570. * reservations to be released may be adjusted.
  3571. */
  3572. gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
  3573. hugetlb_acct_memory(h, -gbl_reserve);
  3574. return 0;
  3575. }
  3576. #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
  3577. static unsigned long page_table_shareable(struct vm_area_struct *svma,
  3578. struct vm_area_struct *vma,
  3579. unsigned long addr, pgoff_t idx)
  3580. {
  3581. unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
  3582. svma->vm_start;
  3583. unsigned long sbase = saddr & PUD_MASK;
  3584. unsigned long s_end = sbase + PUD_SIZE;
  3585. /* Allow segments to share if only one is marked locked */
  3586. unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
  3587. unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
  3588. /*
  3589. * match the virtual addresses, permission and the alignment of the
  3590. * page table page.
  3591. */
  3592. if (pmd_index(addr) != pmd_index(saddr) ||
  3593. vm_flags != svm_flags ||
  3594. sbase < svma->vm_start || svma->vm_end < s_end)
  3595. return 0;
  3596. return saddr;
  3597. }
  3598. static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
  3599. {
  3600. unsigned long base = addr & PUD_MASK;
  3601. unsigned long end = base + PUD_SIZE;
  3602. /*
  3603. * check on proper vm_flags and page table alignment
  3604. */
  3605. if (vma->vm_flags & VM_MAYSHARE &&
  3606. vma->vm_start <= base && end <= vma->vm_end)
  3607. return true;
  3608. return false;
  3609. }
  3610. /*
  3611. * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  3612. * and returns the corresponding pte. While this is not necessary for the
  3613. * !shared pmd case because we can allocate the pmd later as well, it makes the
  3614. * code much cleaner. pmd allocation is essential for the shared case because
  3615. * pud has to be populated inside the same i_mmap_rwsem section - otherwise
  3616. * racing tasks could either miss the sharing (see huge_pte_offset) or select a
  3617. * bad pmd for sharing.
  3618. */
  3619. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  3620. {
  3621. struct vm_area_struct *vma = find_vma(mm, addr);
  3622. struct address_space *mapping = vma->vm_file->f_mapping;
  3623. pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  3624. vma->vm_pgoff;
  3625. struct vm_area_struct *svma;
  3626. unsigned long saddr;
  3627. pte_t *spte = NULL;
  3628. pte_t *pte;
  3629. spinlock_t *ptl;
  3630. if (!vma_shareable(vma, addr))
  3631. return (pte_t *)pmd_alloc(mm, pud, addr);
  3632. i_mmap_lock_write(mapping);
  3633. vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
  3634. if (svma == vma)
  3635. continue;
  3636. saddr = page_table_shareable(svma, vma, addr, idx);
  3637. if (saddr) {
  3638. spte = huge_pte_offset(svma->vm_mm, saddr);
  3639. if (spte) {
  3640. mm_inc_nr_pmds(mm);
  3641. get_page(virt_to_page(spte));
  3642. break;
  3643. }
  3644. }
  3645. }
  3646. if (!spte)
  3647. goto out;
  3648. ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
  3649. spin_lock(ptl);
  3650. if (pud_none(*pud)) {
  3651. pud_populate(mm, pud,
  3652. (pmd_t *)((unsigned long)spte & PAGE_MASK));
  3653. } else {
  3654. put_page(virt_to_page(spte));
  3655. mm_inc_nr_pmds(mm);
  3656. }
  3657. spin_unlock(ptl);
  3658. out:
  3659. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  3660. i_mmap_unlock_write(mapping);
  3661. return pte;
  3662. }
  3663. /*
  3664. * unmap huge page backed by shared pte.
  3665. *
  3666. * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
  3667. * indicated by page_count > 1, unmap is achieved by clearing pud and
  3668. * decrementing the ref count. If count == 1, the pte page is not shared.
  3669. *
  3670. * called with page table lock held.
  3671. *
  3672. * returns: 1 successfully unmapped a shared pte page
  3673. * 0 the underlying pte page is not shared, or it is the last user
  3674. */
  3675. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  3676. {
  3677. pgd_t *pgd = pgd_offset(mm, *addr);
  3678. pud_t *pud = pud_offset(pgd, *addr);
  3679. BUG_ON(page_count(virt_to_page(ptep)) == 0);
  3680. if (page_count(virt_to_page(ptep)) == 1)
  3681. return 0;
  3682. pud_clear(pud);
  3683. put_page(virt_to_page(ptep));
  3684. mm_dec_nr_pmds(mm);
  3685. *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
  3686. return 1;
  3687. }
  3688. #define want_pmd_share() (1)
  3689. #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  3690. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  3691. {
  3692. return NULL;
  3693. }
  3694. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  3695. {
  3696. return 0;
  3697. }
  3698. #define want_pmd_share() (0)
  3699. #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  3700. #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
  3701. pte_t *huge_pte_alloc(struct mm_struct *mm,
  3702. unsigned long addr, unsigned long sz)
  3703. {
  3704. pgd_t *pgd;
  3705. pud_t *pud;
  3706. pte_t *pte = NULL;
  3707. pgd = pgd_offset(mm, addr);
  3708. pud = pud_alloc(mm, pgd, addr);
  3709. if (pud) {
  3710. if (sz == PUD_SIZE) {
  3711. pte = (pte_t *)pud;
  3712. } else {
  3713. BUG_ON(sz != PMD_SIZE);
  3714. if (want_pmd_share() && pud_none(*pud))
  3715. pte = huge_pmd_share(mm, addr, pud);
  3716. else
  3717. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  3718. }
  3719. }
  3720. BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
  3721. return pte;
  3722. }
  3723. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  3724. {
  3725. pgd_t *pgd;
  3726. pud_t *pud;
  3727. pmd_t *pmd = NULL;
  3728. pgd = pgd_offset(mm, addr);
  3729. if (pgd_present(*pgd)) {
  3730. pud = pud_offset(pgd, addr);
  3731. if (pud_present(*pud)) {
  3732. if (pud_huge(*pud))
  3733. return (pte_t *)pud;
  3734. pmd = pmd_offset(pud, addr);
  3735. }
  3736. }
  3737. return (pte_t *) pmd;
  3738. }
  3739. #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  3740. /*
  3741. * These functions are overwritable if your architecture needs its own
  3742. * behavior.
  3743. */
  3744. struct page * __weak
  3745. follow_huge_addr(struct mm_struct *mm, unsigned long address,
  3746. int write)
  3747. {
  3748. return ERR_PTR(-EINVAL);
  3749. }
  3750. struct page * __weak
  3751. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  3752. pmd_t *pmd, int flags)
  3753. {
  3754. struct page *page = NULL;
  3755. spinlock_t *ptl;
  3756. retry:
  3757. ptl = pmd_lockptr(mm, pmd);
  3758. spin_lock(ptl);
  3759. /*
  3760. * make sure that the address range covered by this pmd is not
  3761. * unmapped from other threads.
  3762. */
  3763. if (!pmd_huge(*pmd))
  3764. goto out;
  3765. if (pmd_present(*pmd)) {
  3766. page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
  3767. if (flags & FOLL_GET)
  3768. get_page(page);
  3769. } else {
  3770. if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
  3771. spin_unlock(ptl);
  3772. __migration_entry_wait(mm, (pte_t *)pmd, ptl);
  3773. goto retry;
  3774. }
  3775. /*
  3776. * hwpoisoned entry is treated as no_page_table in
  3777. * follow_page_mask().
  3778. */
  3779. }
  3780. out:
  3781. spin_unlock(ptl);
  3782. return page;
  3783. }
  3784. struct page * __weak
  3785. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  3786. pud_t *pud, int flags)
  3787. {
  3788. if (flags & FOLL_GET)
  3789. return NULL;
  3790. return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
  3791. }
  3792. #ifdef CONFIG_MEMORY_FAILURE
  3793. /*
  3794. * This function is called from memory failure code.
  3795. * Assume the caller holds page lock of the head page.
  3796. */
  3797. int dequeue_hwpoisoned_huge_page(struct page *hpage)
  3798. {
  3799. struct hstate *h = page_hstate(hpage);
  3800. int nid = page_to_nid(hpage);
  3801. int ret = -EBUSY;
  3802. spin_lock(&hugetlb_lock);
  3803. /*
  3804. * Just checking !page_huge_active is not enough, because that could be
  3805. * an isolated/hwpoisoned hugepage (which have >0 refcount).
  3806. */
  3807. if (!page_huge_active(hpage) && !page_count(hpage)) {
  3808. /*
  3809. * Hwpoisoned hugepage isn't linked to activelist or freelist,
  3810. * but dangling hpage->lru can trigger list-debug warnings
  3811. * (this happens when we call unpoison_memory() on it),
  3812. * so let it point to itself with list_del_init().
  3813. */
  3814. list_del_init(&hpage->lru);
  3815. set_page_refcounted(hpage);
  3816. h->free_huge_pages--;
  3817. h->free_huge_pages_node[nid]--;
  3818. ret = 0;
  3819. }
  3820. spin_unlock(&hugetlb_lock);
  3821. return ret;
  3822. }
  3823. #endif
  3824. bool isolate_huge_page(struct page *page, struct list_head *list)
  3825. {
  3826. bool ret = true;
  3827. VM_BUG_ON_PAGE(!PageHead(page), page);
  3828. spin_lock(&hugetlb_lock);
  3829. if (!page_huge_active(page) || !get_page_unless_zero(page)) {
  3830. ret = false;
  3831. goto unlock;
  3832. }
  3833. clear_page_huge_active(page);
  3834. list_move_tail(&page->lru, list);
  3835. unlock:
  3836. spin_unlock(&hugetlb_lock);
  3837. return ret;
  3838. }
  3839. void putback_active_hugepage(struct page *page)
  3840. {
  3841. VM_BUG_ON_PAGE(!PageHead(page), page);
  3842. spin_lock(&hugetlb_lock);
  3843. set_page_huge_active(page);
  3844. list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
  3845. spin_unlock(&hugetlb_lock);
  3846. put_page(page);
  3847. }