hugetlb.c 129 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811
  1. /*
  2. * Generic hugetlb support.
  3. * (C) Nadia Yvette Chambers, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/mm.h>
  8. #include <linux/seq_file.h>
  9. #include <linux/sysctl.h>
  10. #include <linux/highmem.h>
  11. #include <linux/mmu_notifier.h>
  12. #include <linux/nodemask.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/mempolicy.h>
  15. #include <linux/compiler.h>
  16. #include <linux/cpuset.h>
  17. #include <linux/mutex.h>
  18. #include <linux/bootmem.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/slab.h>
  21. #include <linux/sched/signal.h>
  22. #include <linux/rmap.h>
  23. #include <linux/string_helpers.h>
  24. #include <linux/swap.h>
  25. #include <linux/swapops.h>
  26. #include <linux/jhash.h>
  27. #include <asm/page.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/tlb.h>
  30. #include <linux/io.h>
  31. #include <linux/hugetlb.h>
  32. #include <linux/hugetlb_cgroup.h>
  33. #include <linux/node.h>
  34. #include <linux/userfaultfd_k.h>
  35. #include "internal.h"
  36. int hugetlb_max_hstate __read_mostly;
  37. unsigned int default_hstate_idx;
  38. struct hstate hstates[HUGE_MAX_HSTATE];
  39. /*
  40. * Minimum page order among possible hugepage sizes, set to a proper value
  41. * at boot time.
  42. */
  43. static unsigned int minimum_order __read_mostly = UINT_MAX;
  44. __initdata LIST_HEAD(huge_boot_pages);
  45. /* for command line parsing */
  46. static struct hstate * __initdata parsed_hstate;
  47. static unsigned long __initdata default_hstate_max_huge_pages;
  48. static unsigned long __initdata default_hstate_size;
  49. static bool __initdata parsed_valid_hugepagesz = true;
  50. /*
  51. * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  52. * free_huge_pages, and surplus_huge_pages.
  53. */
  54. DEFINE_SPINLOCK(hugetlb_lock);
  55. /*
  56. * Serializes faults on the same logical page. This is used to
  57. * prevent spurious OOMs when the hugepage pool is fully utilized.
  58. */
  59. static int num_fault_mutexes;
  60. struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
  61. /* Forward declaration */
  62. static int hugetlb_acct_memory(struct hstate *h, long delta);
  63. static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  64. {
  65. bool free = (spool->count == 0) && (spool->used_hpages == 0);
  66. spin_unlock(&spool->lock);
  67. /* If no pages are used, and no other handles to the subpool
  68. * remain, give up any reservations mased on minimum size and
  69. * free the subpool */
  70. if (free) {
  71. if (spool->min_hpages != -1)
  72. hugetlb_acct_memory(spool->hstate,
  73. -spool->min_hpages);
  74. kfree(spool);
  75. }
  76. }
  77. struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  78. long min_hpages)
  79. {
  80. struct hugepage_subpool *spool;
  81. spool = kzalloc(sizeof(*spool), GFP_KERNEL);
  82. if (!spool)
  83. return NULL;
  84. spin_lock_init(&spool->lock);
  85. spool->count = 1;
  86. spool->max_hpages = max_hpages;
  87. spool->hstate = h;
  88. spool->min_hpages = min_hpages;
  89. if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
  90. kfree(spool);
  91. return NULL;
  92. }
  93. spool->rsv_hpages = min_hpages;
  94. return spool;
  95. }
  96. void hugepage_put_subpool(struct hugepage_subpool *spool)
  97. {
  98. spin_lock(&spool->lock);
  99. BUG_ON(!spool->count);
  100. spool->count--;
  101. unlock_or_release_subpool(spool);
  102. }
  103. /*
  104. * Subpool accounting for allocating and reserving pages.
  105. * Return -ENOMEM if there are not enough resources to satisfy the
  106. * the request. Otherwise, return the number of pages by which the
  107. * global pools must be adjusted (upward). The returned value may
  108. * only be different than the passed value (delta) in the case where
  109. * a subpool minimum size must be manitained.
  110. */
  111. static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
  112. long delta)
  113. {
  114. long ret = delta;
  115. if (!spool)
  116. return ret;
  117. spin_lock(&spool->lock);
  118. if (spool->max_hpages != -1) { /* maximum size accounting */
  119. if ((spool->used_hpages + delta) <= spool->max_hpages)
  120. spool->used_hpages += delta;
  121. else {
  122. ret = -ENOMEM;
  123. goto unlock_ret;
  124. }
  125. }
  126. /* minimum size accounting */
  127. if (spool->min_hpages != -1 && spool->rsv_hpages) {
  128. if (delta > spool->rsv_hpages) {
  129. /*
  130. * Asking for more reserves than those already taken on
  131. * behalf of subpool. Return difference.
  132. */
  133. ret = delta - spool->rsv_hpages;
  134. spool->rsv_hpages = 0;
  135. } else {
  136. ret = 0; /* reserves already accounted for */
  137. spool->rsv_hpages -= delta;
  138. }
  139. }
  140. unlock_ret:
  141. spin_unlock(&spool->lock);
  142. return ret;
  143. }
  144. /*
  145. * Subpool accounting for freeing and unreserving pages.
  146. * Return the number of global page reservations that must be dropped.
  147. * The return value may only be different than the passed value (delta)
  148. * in the case where a subpool minimum size must be maintained.
  149. */
  150. static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
  151. long delta)
  152. {
  153. long ret = delta;
  154. if (!spool)
  155. return delta;
  156. spin_lock(&spool->lock);
  157. if (spool->max_hpages != -1) /* maximum size accounting */
  158. spool->used_hpages -= delta;
  159. /* minimum size accounting */
  160. if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
  161. if (spool->rsv_hpages + delta <= spool->min_hpages)
  162. ret = 0;
  163. else
  164. ret = spool->rsv_hpages + delta - spool->min_hpages;
  165. spool->rsv_hpages += delta;
  166. if (spool->rsv_hpages > spool->min_hpages)
  167. spool->rsv_hpages = spool->min_hpages;
  168. }
  169. /*
  170. * If hugetlbfs_put_super couldn't free spool due to an outstanding
  171. * quota reference, free it now.
  172. */
  173. unlock_or_release_subpool(spool);
  174. return ret;
  175. }
  176. static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
  177. {
  178. return HUGETLBFS_SB(inode->i_sb)->spool;
  179. }
  180. static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  181. {
  182. return subpool_inode(file_inode(vma->vm_file));
  183. }
  184. /*
  185. * Region tracking -- allows tracking of reservations and instantiated pages
  186. * across the pages in a mapping.
  187. *
  188. * The region data structures are embedded into a resv_map and protected
  189. * by a resv_map's lock. The set of regions within the resv_map represent
  190. * reservations for huge pages, or huge pages that have already been
  191. * instantiated within the map. The from and to elements are huge page
  192. * indicies into the associated mapping. from indicates the starting index
  193. * of the region. to represents the first index past the end of the region.
  194. *
  195. * For example, a file region structure with from == 0 and to == 4 represents
  196. * four huge pages in a mapping. It is important to note that the to element
  197. * represents the first element past the end of the region. This is used in
  198. * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  199. *
  200. * Interval notation of the form [from, to) will be used to indicate that
  201. * the endpoint from is inclusive and to is exclusive.
  202. */
  203. struct file_region {
  204. struct list_head link;
  205. long from;
  206. long to;
  207. };
  208. /*
  209. * Add the huge page range represented by [f, t) to the reserve
  210. * map. In the normal case, existing regions will be expanded
  211. * to accommodate the specified range. Sufficient regions should
  212. * exist for expansion due to the previous call to region_chg
  213. * with the same range. However, it is possible that region_del
  214. * could have been called after region_chg and modifed the map
  215. * in such a way that no region exists to be expanded. In this
  216. * case, pull a region descriptor from the cache associated with
  217. * the map and use that for the new range.
  218. *
  219. * Return the number of new huge pages added to the map. This
  220. * number is greater than or equal to zero.
  221. */
  222. static long region_add(struct resv_map *resv, long f, long t)
  223. {
  224. struct list_head *head = &resv->regions;
  225. struct file_region *rg, *nrg, *trg;
  226. long add = 0;
  227. spin_lock(&resv->lock);
  228. /* Locate the region we are either in or before. */
  229. list_for_each_entry(rg, head, link)
  230. if (f <= rg->to)
  231. break;
  232. /*
  233. * If no region exists which can be expanded to include the
  234. * specified range, the list must have been modified by an
  235. * interleving call to region_del(). Pull a region descriptor
  236. * from the cache and use it for this range.
  237. */
  238. if (&rg->link == head || t < rg->from) {
  239. VM_BUG_ON(resv->region_cache_count <= 0);
  240. resv->region_cache_count--;
  241. nrg = list_first_entry(&resv->region_cache, struct file_region,
  242. link);
  243. list_del(&nrg->link);
  244. nrg->from = f;
  245. nrg->to = t;
  246. list_add(&nrg->link, rg->link.prev);
  247. add += t - f;
  248. goto out_locked;
  249. }
  250. /* Round our left edge to the current segment if it encloses us. */
  251. if (f > rg->from)
  252. f = rg->from;
  253. /* Check for and consume any regions we now overlap with. */
  254. nrg = rg;
  255. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  256. if (&rg->link == head)
  257. break;
  258. if (rg->from > t)
  259. break;
  260. /* If this area reaches higher then extend our area to
  261. * include it completely. If this is not the first area
  262. * which we intend to reuse, free it. */
  263. if (rg->to > t)
  264. t = rg->to;
  265. if (rg != nrg) {
  266. /* Decrement return value by the deleted range.
  267. * Another range will span this area so that by
  268. * end of routine add will be >= zero
  269. */
  270. add -= (rg->to - rg->from);
  271. list_del(&rg->link);
  272. kfree(rg);
  273. }
  274. }
  275. add += (nrg->from - f); /* Added to beginning of region */
  276. nrg->from = f;
  277. add += t - nrg->to; /* Added to end of region */
  278. nrg->to = t;
  279. out_locked:
  280. resv->adds_in_progress--;
  281. spin_unlock(&resv->lock);
  282. VM_BUG_ON(add < 0);
  283. return add;
  284. }
  285. /*
  286. * Examine the existing reserve map and determine how many
  287. * huge pages in the specified range [f, t) are NOT currently
  288. * represented. This routine is called before a subsequent
  289. * call to region_add that will actually modify the reserve
  290. * map to add the specified range [f, t). region_chg does
  291. * not change the number of huge pages represented by the
  292. * map. However, if the existing regions in the map can not
  293. * be expanded to represent the new range, a new file_region
  294. * structure is added to the map as a placeholder. This is
  295. * so that the subsequent region_add call will have all the
  296. * regions it needs and will not fail.
  297. *
  298. * Upon entry, region_chg will also examine the cache of region descriptors
  299. * associated with the map. If there are not enough descriptors cached, one
  300. * will be allocated for the in progress add operation.
  301. *
  302. * Returns the number of huge pages that need to be added to the existing
  303. * reservation map for the range [f, t). This number is greater or equal to
  304. * zero. -ENOMEM is returned if a new file_region structure or cache entry
  305. * is needed and can not be allocated.
  306. */
  307. static long region_chg(struct resv_map *resv, long f, long t)
  308. {
  309. struct list_head *head = &resv->regions;
  310. struct file_region *rg, *nrg = NULL;
  311. long chg = 0;
  312. retry:
  313. spin_lock(&resv->lock);
  314. retry_locked:
  315. resv->adds_in_progress++;
  316. /*
  317. * Check for sufficient descriptors in the cache to accommodate
  318. * the number of in progress add operations.
  319. */
  320. if (resv->adds_in_progress > resv->region_cache_count) {
  321. struct file_region *trg;
  322. VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
  323. /* Must drop lock to allocate a new descriptor. */
  324. resv->adds_in_progress--;
  325. spin_unlock(&resv->lock);
  326. trg = kmalloc(sizeof(*trg), GFP_KERNEL);
  327. if (!trg) {
  328. kfree(nrg);
  329. return -ENOMEM;
  330. }
  331. spin_lock(&resv->lock);
  332. list_add(&trg->link, &resv->region_cache);
  333. resv->region_cache_count++;
  334. goto retry_locked;
  335. }
  336. /* Locate the region we are before or in. */
  337. list_for_each_entry(rg, head, link)
  338. if (f <= rg->to)
  339. break;
  340. /* If we are below the current region then a new region is required.
  341. * Subtle, allocate a new region at the position but make it zero
  342. * size such that we can guarantee to record the reservation. */
  343. if (&rg->link == head || t < rg->from) {
  344. if (!nrg) {
  345. resv->adds_in_progress--;
  346. spin_unlock(&resv->lock);
  347. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  348. if (!nrg)
  349. return -ENOMEM;
  350. nrg->from = f;
  351. nrg->to = f;
  352. INIT_LIST_HEAD(&nrg->link);
  353. goto retry;
  354. }
  355. list_add(&nrg->link, rg->link.prev);
  356. chg = t - f;
  357. goto out_nrg;
  358. }
  359. /* Round our left edge to the current segment if it encloses us. */
  360. if (f > rg->from)
  361. f = rg->from;
  362. chg = t - f;
  363. /* Check for and consume any regions we now overlap with. */
  364. list_for_each_entry(rg, rg->link.prev, link) {
  365. if (&rg->link == head)
  366. break;
  367. if (rg->from > t)
  368. goto out;
  369. /* We overlap with this area, if it extends further than
  370. * us then we must extend ourselves. Account for its
  371. * existing reservation. */
  372. if (rg->to > t) {
  373. chg += rg->to - t;
  374. t = rg->to;
  375. }
  376. chg -= rg->to - rg->from;
  377. }
  378. out:
  379. spin_unlock(&resv->lock);
  380. /* We already know we raced and no longer need the new region */
  381. kfree(nrg);
  382. return chg;
  383. out_nrg:
  384. spin_unlock(&resv->lock);
  385. return chg;
  386. }
  387. /*
  388. * Abort the in progress add operation. The adds_in_progress field
  389. * of the resv_map keeps track of the operations in progress between
  390. * calls to region_chg and region_add. Operations are sometimes
  391. * aborted after the call to region_chg. In such cases, region_abort
  392. * is called to decrement the adds_in_progress counter.
  393. *
  394. * NOTE: The range arguments [f, t) are not needed or used in this
  395. * routine. They are kept to make reading the calling code easier as
  396. * arguments will match the associated region_chg call.
  397. */
  398. static void region_abort(struct resv_map *resv, long f, long t)
  399. {
  400. spin_lock(&resv->lock);
  401. VM_BUG_ON(!resv->region_cache_count);
  402. resv->adds_in_progress--;
  403. spin_unlock(&resv->lock);
  404. }
  405. /*
  406. * Delete the specified range [f, t) from the reserve map. If the
  407. * t parameter is LONG_MAX, this indicates that ALL regions after f
  408. * should be deleted. Locate the regions which intersect [f, t)
  409. * and either trim, delete or split the existing regions.
  410. *
  411. * Returns the number of huge pages deleted from the reserve map.
  412. * In the normal case, the return value is zero or more. In the
  413. * case where a region must be split, a new region descriptor must
  414. * be allocated. If the allocation fails, -ENOMEM will be returned.
  415. * NOTE: If the parameter t == LONG_MAX, then we will never split
  416. * a region and possibly return -ENOMEM. Callers specifying
  417. * t == LONG_MAX do not need to check for -ENOMEM error.
  418. */
  419. static long region_del(struct resv_map *resv, long f, long t)
  420. {
  421. struct list_head *head = &resv->regions;
  422. struct file_region *rg, *trg;
  423. struct file_region *nrg = NULL;
  424. long del = 0;
  425. retry:
  426. spin_lock(&resv->lock);
  427. list_for_each_entry_safe(rg, trg, head, link) {
  428. /*
  429. * Skip regions before the range to be deleted. file_region
  430. * ranges are normally of the form [from, to). However, there
  431. * may be a "placeholder" entry in the map which is of the form
  432. * (from, to) with from == to. Check for placeholder entries
  433. * at the beginning of the range to be deleted.
  434. */
  435. if (rg->to <= f && (rg->to != rg->from || rg->to != f))
  436. continue;
  437. if (rg->from >= t)
  438. break;
  439. if (f > rg->from && t < rg->to) { /* Must split region */
  440. /*
  441. * Check for an entry in the cache before dropping
  442. * lock and attempting allocation.
  443. */
  444. if (!nrg &&
  445. resv->region_cache_count > resv->adds_in_progress) {
  446. nrg = list_first_entry(&resv->region_cache,
  447. struct file_region,
  448. link);
  449. list_del(&nrg->link);
  450. resv->region_cache_count--;
  451. }
  452. if (!nrg) {
  453. spin_unlock(&resv->lock);
  454. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  455. if (!nrg)
  456. return -ENOMEM;
  457. goto retry;
  458. }
  459. del += t - f;
  460. /* New entry for end of split region */
  461. nrg->from = t;
  462. nrg->to = rg->to;
  463. INIT_LIST_HEAD(&nrg->link);
  464. /* Original entry is trimmed */
  465. rg->to = f;
  466. list_add(&nrg->link, &rg->link);
  467. nrg = NULL;
  468. break;
  469. }
  470. if (f <= rg->from && t >= rg->to) { /* Remove entire region */
  471. del += rg->to - rg->from;
  472. list_del(&rg->link);
  473. kfree(rg);
  474. continue;
  475. }
  476. if (f <= rg->from) { /* Trim beginning of region */
  477. del += t - rg->from;
  478. rg->from = t;
  479. } else { /* Trim end of region */
  480. del += rg->to - f;
  481. rg->to = f;
  482. }
  483. }
  484. spin_unlock(&resv->lock);
  485. kfree(nrg);
  486. return del;
  487. }
  488. /*
  489. * A rare out of memory error was encountered which prevented removal of
  490. * the reserve map region for a page. The huge page itself was free'ed
  491. * and removed from the page cache. This routine will adjust the subpool
  492. * usage count, and the global reserve count if needed. By incrementing
  493. * these counts, the reserve map entry which could not be deleted will
  494. * appear as a "reserved" entry instead of simply dangling with incorrect
  495. * counts.
  496. */
  497. void hugetlb_fix_reserve_counts(struct inode *inode)
  498. {
  499. struct hugepage_subpool *spool = subpool_inode(inode);
  500. long rsv_adjust;
  501. rsv_adjust = hugepage_subpool_get_pages(spool, 1);
  502. if (rsv_adjust) {
  503. struct hstate *h = hstate_inode(inode);
  504. hugetlb_acct_memory(h, 1);
  505. }
  506. }
  507. /*
  508. * Count and return the number of huge pages in the reserve map
  509. * that intersect with the range [f, t).
  510. */
  511. static long region_count(struct resv_map *resv, long f, long t)
  512. {
  513. struct list_head *head = &resv->regions;
  514. struct file_region *rg;
  515. long chg = 0;
  516. spin_lock(&resv->lock);
  517. /* Locate each segment we overlap with, and count that overlap. */
  518. list_for_each_entry(rg, head, link) {
  519. long seg_from;
  520. long seg_to;
  521. if (rg->to <= f)
  522. continue;
  523. if (rg->from >= t)
  524. break;
  525. seg_from = max(rg->from, f);
  526. seg_to = min(rg->to, t);
  527. chg += seg_to - seg_from;
  528. }
  529. spin_unlock(&resv->lock);
  530. return chg;
  531. }
  532. /*
  533. * Convert the address within this vma to the page offset within
  534. * the mapping, in pagecache page units; huge pages here.
  535. */
  536. static pgoff_t vma_hugecache_offset(struct hstate *h,
  537. struct vm_area_struct *vma, unsigned long address)
  538. {
  539. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  540. (vma->vm_pgoff >> huge_page_order(h));
  541. }
  542. pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  543. unsigned long address)
  544. {
  545. return vma_hugecache_offset(hstate_vma(vma), vma, address);
  546. }
  547. EXPORT_SYMBOL_GPL(linear_hugepage_index);
  548. /*
  549. * Return the size of the pages allocated when backing a VMA. In the majority
  550. * cases this will be same size as used by the page table entries.
  551. */
  552. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  553. {
  554. struct hstate *hstate;
  555. if (!is_vm_hugetlb_page(vma))
  556. return PAGE_SIZE;
  557. hstate = hstate_vma(vma);
  558. return 1UL << huge_page_shift(hstate);
  559. }
  560. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  561. /*
  562. * Return the page size being used by the MMU to back a VMA. In the majority
  563. * of cases, the page size used by the kernel matches the MMU size. On
  564. * architectures where it differs, an architecture-specific version of this
  565. * function is required.
  566. */
  567. #ifndef vma_mmu_pagesize
  568. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  569. {
  570. return vma_kernel_pagesize(vma);
  571. }
  572. #endif
  573. /*
  574. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  575. * bits of the reservation map pointer, which are always clear due to
  576. * alignment.
  577. */
  578. #define HPAGE_RESV_OWNER (1UL << 0)
  579. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  580. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  581. /*
  582. * These helpers are used to track how many pages are reserved for
  583. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  584. * is guaranteed to have their future faults succeed.
  585. *
  586. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  587. * the reserve counters are updated with the hugetlb_lock held. It is safe
  588. * to reset the VMA at fork() time as it is not in use yet and there is no
  589. * chance of the global counters getting corrupted as a result of the values.
  590. *
  591. * The private mapping reservation is represented in a subtly different
  592. * manner to a shared mapping. A shared mapping has a region map associated
  593. * with the underlying file, this region map represents the backing file
  594. * pages which have ever had a reservation assigned which this persists even
  595. * after the page is instantiated. A private mapping has a region map
  596. * associated with the original mmap which is attached to all VMAs which
  597. * reference it, this region map represents those offsets which have consumed
  598. * reservation ie. where pages have been instantiated.
  599. */
  600. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  601. {
  602. return (unsigned long)vma->vm_private_data;
  603. }
  604. static void set_vma_private_data(struct vm_area_struct *vma,
  605. unsigned long value)
  606. {
  607. vma->vm_private_data = (void *)value;
  608. }
  609. struct resv_map *resv_map_alloc(void)
  610. {
  611. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  612. struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
  613. if (!resv_map || !rg) {
  614. kfree(resv_map);
  615. kfree(rg);
  616. return NULL;
  617. }
  618. kref_init(&resv_map->refs);
  619. spin_lock_init(&resv_map->lock);
  620. INIT_LIST_HEAD(&resv_map->regions);
  621. resv_map->adds_in_progress = 0;
  622. INIT_LIST_HEAD(&resv_map->region_cache);
  623. list_add(&rg->link, &resv_map->region_cache);
  624. resv_map->region_cache_count = 1;
  625. return resv_map;
  626. }
  627. void resv_map_release(struct kref *ref)
  628. {
  629. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  630. struct list_head *head = &resv_map->region_cache;
  631. struct file_region *rg, *trg;
  632. /* Clear out any active regions before we release the map. */
  633. region_del(resv_map, 0, LONG_MAX);
  634. /* ... and any entries left in the cache */
  635. list_for_each_entry_safe(rg, trg, head, link) {
  636. list_del(&rg->link);
  637. kfree(rg);
  638. }
  639. VM_BUG_ON(resv_map->adds_in_progress);
  640. kfree(resv_map);
  641. }
  642. static inline struct resv_map *inode_resv_map(struct inode *inode)
  643. {
  644. return inode->i_mapping->private_data;
  645. }
  646. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  647. {
  648. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  649. if (vma->vm_flags & VM_MAYSHARE) {
  650. struct address_space *mapping = vma->vm_file->f_mapping;
  651. struct inode *inode = mapping->host;
  652. return inode_resv_map(inode);
  653. } else {
  654. return (struct resv_map *)(get_vma_private_data(vma) &
  655. ~HPAGE_RESV_MASK);
  656. }
  657. }
  658. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  659. {
  660. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  661. VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
  662. set_vma_private_data(vma, (get_vma_private_data(vma) &
  663. HPAGE_RESV_MASK) | (unsigned long)map);
  664. }
  665. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  666. {
  667. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  668. VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
  669. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  670. }
  671. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  672. {
  673. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  674. return (get_vma_private_data(vma) & flag) != 0;
  675. }
  676. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  677. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  678. {
  679. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  680. if (!(vma->vm_flags & VM_MAYSHARE))
  681. vma->vm_private_data = (void *)0;
  682. }
  683. /* Returns true if the VMA has associated reserve pages */
  684. static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
  685. {
  686. if (vma->vm_flags & VM_NORESERVE) {
  687. /*
  688. * This address is already reserved by other process(chg == 0),
  689. * so, we should decrement reserved count. Without decrementing,
  690. * reserve count remains after releasing inode, because this
  691. * allocated page will go into page cache and is regarded as
  692. * coming from reserved pool in releasing step. Currently, we
  693. * don't have any other solution to deal with this situation
  694. * properly, so add work-around here.
  695. */
  696. if (vma->vm_flags & VM_MAYSHARE && chg == 0)
  697. return true;
  698. else
  699. return false;
  700. }
  701. /* Shared mappings always use reserves */
  702. if (vma->vm_flags & VM_MAYSHARE) {
  703. /*
  704. * We know VM_NORESERVE is not set. Therefore, there SHOULD
  705. * be a region map for all pages. The only situation where
  706. * there is no region map is if a hole was punched via
  707. * fallocate. In this case, there really are no reverves to
  708. * use. This situation is indicated if chg != 0.
  709. */
  710. if (chg)
  711. return false;
  712. else
  713. return true;
  714. }
  715. /*
  716. * Only the process that called mmap() has reserves for
  717. * private mappings.
  718. */
  719. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
  720. /*
  721. * Like the shared case above, a hole punch or truncate
  722. * could have been performed on the private mapping.
  723. * Examine the value of chg to determine if reserves
  724. * actually exist or were previously consumed.
  725. * Very Subtle - The value of chg comes from a previous
  726. * call to vma_needs_reserves(). The reserve map for
  727. * private mappings has different (opposite) semantics
  728. * than that of shared mappings. vma_needs_reserves()
  729. * has already taken this difference in semantics into
  730. * account. Therefore, the meaning of chg is the same
  731. * as in the shared case above. Code could easily be
  732. * combined, but keeping it separate draws attention to
  733. * subtle differences.
  734. */
  735. if (chg)
  736. return false;
  737. else
  738. return true;
  739. }
  740. return false;
  741. }
  742. static void enqueue_huge_page(struct hstate *h, struct page *page)
  743. {
  744. int nid = page_to_nid(page);
  745. list_move(&page->lru, &h->hugepage_freelists[nid]);
  746. h->free_huge_pages++;
  747. h->free_huge_pages_node[nid]++;
  748. }
  749. static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
  750. {
  751. struct page *page;
  752. list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
  753. if (!PageHWPoison(page))
  754. break;
  755. /*
  756. * if 'non-isolated free hugepage' not found on the list,
  757. * the allocation fails.
  758. */
  759. if (&h->hugepage_freelists[nid] == &page->lru)
  760. return NULL;
  761. list_move(&page->lru, &h->hugepage_activelist);
  762. set_page_refcounted(page);
  763. h->free_huge_pages--;
  764. h->free_huge_pages_node[nid]--;
  765. return page;
  766. }
  767. static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
  768. nodemask_t *nmask)
  769. {
  770. unsigned int cpuset_mems_cookie;
  771. struct zonelist *zonelist;
  772. struct zone *zone;
  773. struct zoneref *z;
  774. int node = -1;
  775. zonelist = node_zonelist(nid, gfp_mask);
  776. retry_cpuset:
  777. cpuset_mems_cookie = read_mems_allowed_begin();
  778. for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
  779. struct page *page;
  780. if (!cpuset_zone_allowed(zone, gfp_mask))
  781. continue;
  782. /*
  783. * no need to ask again on the same node. Pool is node rather than
  784. * zone aware
  785. */
  786. if (zone_to_nid(zone) == node)
  787. continue;
  788. node = zone_to_nid(zone);
  789. page = dequeue_huge_page_node_exact(h, node);
  790. if (page)
  791. return page;
  792. }
  793. if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
  794. goto retry_cpuset;
  795. return NULL;
  796. }
  797. /* Movability of hugepages depends on migration support. */
  798. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  799. {
  800. if (hugepage_migration_supported(h))
  801. return GFP_HIGHUSER_MOVABLE;
  802. else
  803. return GFP_HIGHUSER;
  804. }
  805. static struct page *dequeue_huge_page_vma(struct hstate *h,
  806. struct vm_area_struct *vma,
  807. unsigned long address, int avoid_reserve,
  808. long chg)
  809. {
  810. struct page *page;
  811. struct mempolicy *mpol;
  812. gfp_t gfp_mask;
  813. nodemask_t *nodemask;
  814. int nid;
  815. /*
  816. * A child process with MAP_PRIVATE mappings created by their parent
  817. * have no page reserves. This check ensures that reservations are
  818. * not "stolen". The child may still get SIGKILLed
  819. */
  820. if (!vma_has_reserves(vma, chg) &&
  821. h->free_huge_pages - h->resv_huge_pages == 0)
  822. goto err;
  823. /* If reserves cannot be used, ensure enough pages are in the pool */
  824. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  825. goto err;
  826. gfp_mask = htlb_alloc_mask(h);
  827. nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
  828. page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
  829. if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
  830. SetPagePrivate(page);
  831. h->resv_huge_pages--;
  832. }
  833. mpol_cond_put(mpol);
  834. return page;
  835. err:
  836. return NULL;
  837. }
  838. /*
  839. * common helper functions for hstate_next_node_to_{alloc|free}.
  840. * We may have allocated or freed a huge page based on a different
  841. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  842. * be outside of *nodes_allowed. Ensure that we use an allowed
  843. * node for alloc or free.
  844. */
  845. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  846. {
  847. nid = next_node_in(nid, *nodes_allowed);
  848. VM_BUG_ON(nid >= MAX_NUMNODES);
  849. return nid;
  850. }
  851. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  852. {
  853. if (!node_isset(nid, *nodes_allowed))
  854. nid = next_node_allowed(nid, nodes_allowed);
  855. return nid;
  856. }
  857. /*
  858. * returns the previously saved node ["this node"] from which to
  859. * allocate a persistent huge page for the pool and advance the
  860. * next node from which to allocate, handling wrap at end of node
  861. * mask.
  862. */
  863. static int hstate_next_node_to_alloc(struct hstate *h,
  864. nodemask_t *nodes_allowed)
  865. {
  866. int nid;
  867. VM_BUG_ON(!nodes_allowed);
  868. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  869. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  870. return nid;
  871. }
  872. /*
  873. * helper for free_pool_huge_page() - return the previously saved
  874. * node ["this node"] from which to free a huge page. Advance the
  875. * next node id whether or not we find a free huge page to free so
  876. * that the next attempt to free addresses the next node.
  877. */
  878. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  879. {
  880. int nid;
  881. VM_BUG_ON(!nodes_allowed);
  882. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  883. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  884. return nid;
  885. }
  886. #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
  887. for (nr_nodes = nodes_weight(*mask); \
  888. nr_nodes > 0 && \
  889. ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
  890. nr_nodes--)
  891. #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
  892. for (nr_nodes = nodes_weight(*mask); \
  893. nr_nodes > 0 && \
  894. ((node = hstate_next_node_to_free(hs, mask)) || 1); \
  895. nr_nodes--)
  896. #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
  897. static void destroy_compound_gigantic_page(struct page *page,
  898. unsigned int order)
  899. {
  900. int i;
  901. int nr_pages = 1 << order;
  902. struct page *p = page + 1;
  903. atomic_set(compound_mapcount_ptr(page), 0);
  904. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  905. clear_compound_head(p);
  906. set_page_refcounted(p);
  907. }
  908. set_compound_order(page, 0);
  909. __ClearPageHead(page);
  910. }
  911. static void free_gigantic_page(struct page *page, unsigned int order)
  912. {
  913. free_contig_range(page_to_pfn(page), 1 << order);
  914. }
  915. static int __alloc_gigantic_page(unsigned long start_pfn,
  916. unsigned long nr_pages, gfp_t gfp_mask)
  917. {
  918. unsigned long end_pfn = start_pfn + nr_pages;
  919. return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
  920. gfp_mask);
  921. }
  922. static bool pfn_range_valid_gigantic(struct zone *z,
  923. unsigned long start_pfn, unsigned long nr_pages)
  924. {
  925. unsigned long i, end_pfn = start_pfn + nr_pages;
  926. struct page *page;
  927. for (i = start_pfn; i < end_pfn; i++) {
  928. if (!pfn_valid(i))
  929. return false;
  930. page = pfn_to_page(i);
  931. if (page_zone(page) != z)
  932. return false;
  933. if (PageReserved(page))
  934. return false;
  935. if (page_count(page) > 0)
  936. return false;
  937. if (PageHuge(page))
  938. return false;
  939. }
  940. return true;
  941. }
  942. static bool zone_spans_last_pfn(const struct zone *zone,
  943. unsigned long start_pfn, unsigned long nr_pages)
  944. {
  945. unsigned long last_pfn = start_pfn + nr_pages - 1;
  946. return zone_spans_pfn(zone, last_pfn);
  947. }
  948. static struct page *alloc_gigantic_page(int nid, struct hstate *h)
  949. {
  950. unsigned int order = huge_page_order(h);
  951. unsigned long nr_pages = 1 << order;
  952. unsigned long ret, pfn, flags;
  953. struct zonelist *zonelist;
  954. struct zone *zone;
  955. struct zoneref *z;
  956. gfp_t gfp_mask;
  957. gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
  958. zonelist = node_zonelist(nid, gfp_mask);
  959. for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), NULL) {
  960. spin_lock_irqsave(&zone->lock, flags);
  961. pfn = ALIGN(zone->zone_start_pfn, nr_pages);
  962. while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
  963. if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
  964. /*
  965. * We release the zone lock here because
  966. * alloc_contig_range() will also lock the zone
  967. * at some point. If there's an allocation
  968. * spinning on this lock, it may win the race
  969. * and cause alloc_contig_range() to fail...
  970. */
  971. spin_unlock_irqrestore(&zone->lock, flags);
  972. ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
  973. if (!ret)
  974. return pfn_to_page(pfn);
  975. spin_lock_irqsave(&zone->lock, flags);
  976. }
  977. pfn += nr_pages;
  978. }
  979. spin_unlock_irqrestore(&zone->lock, flags);
  980. }
  981. return NULL;
  982. }
  983. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
  984. static void prep_compound_gigantic_page(struct page *page, unsigned int order);
  985. static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
  986. {
  987. struct page *page;
  988. page = alloc_gigantic_page(nid, h);
  989. if (page) {
  990. prep_compound_gigantic_page(page, huge_page_order(h));
  991. prep_new_huge_page(h, page, nid);
  992. }
  993. return page;
  994. }
  995. static int alloc_fresh_gigantic_page(struct hstate *h,
  996. nodemask_t *nodes_allowed)
  997. {
  998. struct page *page = NULL;
  999. int nr_nodes, node;
  1000. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1001. page = alloc_fresh_gigantic_page_node(h, node);
  1002. if (page)
  1003. return 1;
  1004. }
  1005. return 0;
  1006. }
  1007. #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
  1008. static inline bool gigantic_page_supported(void) { return false; }
  1009. static inline void free_gigantic_page(struct page *page, unsigned int order) { }
  1010. static inline void destroy_compound_gigantic_page(struct page *page,
  1011. unsigned int order) { }
  1012. static inline int alloc_fresh_gigantic_page(struct hstate *h,
  1013. nodemask_t *nodes_allowed) { return 0; }
  1014. #endif
  1015. static void update_and_free_page(struct hstate *h, struct page *page)
  1016. {
  1017. int i;
  1018. if (hstate_is_gigantic(h) && !gigantic_page_supported())
  1019. return;
  1020. h->nr_huge_pages--;
  1021. h->nr_huge_pages_node[page_to_nid(page)]--;
  1022. for (i = 0; i < pages_per_huge_page(h); i++) {
  1023. page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
  1024. 1 << PG_referenced | 1 << PG_dirty |
  1025. 1 << PG_active | 1 << PG_private |
  1026. 1 << PG_writeback);
  1027. }
  1028. VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
  1029. set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
  1030. set_page_refcounted(page);
  1031. if (hstate_is_gigantic(h)) {
  1032. destroy_compound_gigantic_page(page, huge_page_order(h));
  1033. free_gigantic_page(page, huge_page_order(h));
  1034. } else {
  1035. __free_pages(page, huge_page_order(h));
  1036. }
  1037. }
  1038. struct hstate *size_to_hstate(unsigned long size)
  1039. {
  1040. struct hstate *h;
  1041. for_each_hstate(h) {
  1042. if (huge_page_size(h) == size)
  1043. return h;
  1044. }
  1045. return NULL;
  1046. }
  1047. /*
  1048. * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
  1049. * to hstate->hugepage_activelist.)
  1050. *
  1051. * This function can be called for tail pages, but never returns true for them.
  1052. */
  1053. bool page_huge_active(struct page *page)
  1054. {
  1055. VM_BUG_ON_PAGE(!PageHuge(page), page);
  1056. return PageHead(page) && PagePrivate(&page[1]);
  1057. }
  1058. /* never called for tail page */
  1059. static void set_page_huge_active(struct page *page)
  1060. {
  1061. VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
  1062. SetPagePrivate(&page[1]);
  1063. }
  1064. static void clear_page_huge_active(struct page *page)
  1065. {
  1066. VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
  1067. ClearPagePrivate(&page[1]);
  1068. }
  1069. void free_huge_page(struct page *page)
  1070. {
  1071. /*
  1072. * Can't pass hstate in here because it is called from the
  1073. * compound page destructor.
  1074. */
  1075. struct hstate *h = page_hstate(page);
  1076. int nid = page_to_nid(page);
  1077. struct hugepage_subpool *spool =
  1078. (struct hugepage_subpool *)page_private(page);
  1079. bool restore_reserve;
  1080. set_page_private(page, 0);
  1081. page->mapping = NULL;
  1082. VM_BUG_ON_PAGE(page_count(page), page);
  1083. VM_BUG_ON_PAGE(page_mapcount(page), page);
  1084. restore_reserve = PagePrivate(page);
  1085. ClearPagePrivate(page);
  1086. /*
  1087. * A return code of zero implies that the subpool will be under its
  1088. * minimum size if the reservation is not restored after page is free.
  1089. * Therefore, force restore_reserve operation.
  1090. */
  1091. if (hugepage_subpool_put_pages(spool, 1) == 0)
  1092. restore_reserve = true;
  1093. spin_lock(&hugetlb_lock);
  1094. clear_page_huge_active(page);
  1095. hugetlb_cgroup_uncharge_page(hstate_index(h),
  1096. pages_per_huge_page(h), page);
  1097. if (restore_reserve)
  1098. h->resv_huge_pages++;
  1099. if (h->surplus_huge_pages_node[nid]) {
  1100. /* remove the page from active list */
  1101. list_del(&page->lru);
  1102. update_and_free_page(h, page);
  1103. h->surplus_huge_pages--;
  1104. h->surplus_huge_pages_node[nid]--;
  1105. } else {
  1106. arch_clear_hugepage_flags(page);
  1107. enqueue_huge_page(h, page);
  1108. }
  1109. spin_unlock(&hugetlb_lock);
  1110. }
  1111. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  1112. {
  1113. INIT_LIST_HEAD(&page->lru);
  1114. set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
  1115. spin_lock(&hugetlb_lock);
  1116. set_hugetlb_cgroup(page, NULL);
  1117. h->nr_huge_pages++;
  1118. h->nr_huge_pages_node[nid]++;
  1119. spin_unlock(&hugetlb_lock);
  1120. put_page(page); /* free it into the hugepage allocator */
  1121. }
  1122. static void prep_compound_gigantic_page(struct page *page, unsigned int order)
  1123. {
  1124. int i;
  1125. int nr_pages = 1 << order;
  1126. struct page *p = page + 1;
  1127. /* we rely on prep_new_huge_page to set the destructor */
  1128. set_compound_order(page, order);
  1129. __ClearPageReserved(page);
  1130. __SetPageHead(page);
  1131. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  1132. /*
  1133. * For gigantic hugepages allocated through bootmem at
  1134. * boot, it's safer to be consistent with the not-gigantic
  1135. * hugepages and clear the PG_reserved bit from all tail pages
  1136. * too. Otherwse drivers using get_user_pages() to access tail
  1137. * pages may get the reference counting wrong if they see
  1138. * PG_reserved set on a tail page (despite the head page not
  1139. * having PG_reserved set). Enforcing this consistency between
  1140. * head and tail pages allows drivers to optimize away a check
  1141. * on the head page when they need know if put_page() is needed
  1142. * after get_user_pages().
  1143. */
  1144. __ClearPageReserved(p);
  1145. set_page_count(p, 0);
  1146. set_compound_head(p, page);
  1147. }
  1148. atomic_set(compound_mapcount_ptr(page), -1);
  1149. }
  1150. /*
  1151. * PageHuge() only returns true for hugetlbfs pages, but not for normal or
  1152. * transparent huge pages. See the PageTransHuge() documentation for more
  1153. * details.
  1154. */
  1155. int PageHuge(struct page *page)
  1156. {
  1157. if (!PageCompound(page))
  1158. return 0;
  1159. page = compound_head(page);
  1160. return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
  1161. }
  1162. EXPORT_SYMBOL_GPL(PageHuge);
  1163. /*
  1164. * PageHeadHuge() only returns true for hugetlbfs head page, but not for
  1165. * normal or transparent huge pages.
  1166. */
  1167. int PageHeadHuge(struct page *page_head)
  1168. {
  1169. if (!PageHead(page_head))
  1170. return 0;
  1171. return get_compound_page_dtor(page_head) == free_huge_page;
  1172. }
  1173. pgoff_t __basepage_index(struct page *page)
  1174. {
  1175. struct page *page_head = compound_head(page);
  1176. pgoff_t index = page_index(page_head);
  1177. unsigned long compound_idx;
  1178. if (!PageHuge(page_head))
  1179. return page_index(page);
  1180. if (compound_order(page_head) >= MAX_ORDER)
  1181. compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
  1182. else
  1183. compound_idx = page - page_head;
  1184. return (index << compound_order(page_head)) + compound_idx;
  1185. }
  1186. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  1187. {
  1188. struct page *page;
  1189. page = __alloc_pages_node(nid,
  1190. htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
  1191. __GFP_RETRY_MAYFAIL|__GFP_NOWARN,
  1192. huge_page_order(h));
  1193. if (page) {
  1194. prep_new_huge_page(h, page, nid);
  1195. }
  1196. return page;
  1197. }
  1198. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  1199. {
  1200. struct page *page;
  1201. int nr_nodes, node;
  1202. int ret = 0;
  1203. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1204. page = alloc_fresh_huge_page_node(h, node);
  1205. if (page) {
  1206. ret = 1;
  1207. break;
  1208. }
  1209. }
  1210. if (ret)
  1211. count_vm_event(HTLB_BUDDY_PGALLOC);
  1212. else
  1213. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  1214. return ret;
  1215. }
  1216. /*
  1217. * Free huge page from pool from next node to free.
  1218. * Attempt to keep persistent huge pages more or less
  1219. * balanced over allowed nodes.
  1220. * Called with hugetlb_lock locked.
  1221. */
  1222. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  1223. bool acct_surplus)
  1224. {
  1225. int nr_nodes, node;
  1226. int ret = 0;
  1227. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  1228. /*
  1229. * If we're returning unused surplus pages, only examine
  1230. * nodes with surplus pages.
  1231. */
  1232. if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
  1233. !list_empty(&h->hugepage_freelists[node])) {
  1234. struct page *page =
  1235. list_entry(h->hugepage_freelists[node].next,
  1236. struct page, lru);
  1237. list_del(&page->lru);
  1238. h->free_huge_pages--;
  1239. h->free_huge_pages_node[node]--;
  1240. if (acct_surplus) {
  1241. h->surplus_huge_pages--;
  1242. h->surplus_huge_pages_node[node]--;
  1243. }
  1244. update_and_free_page(h, page);
  1245. ret = 1;
  1246. break;
  1247. }
  1248. }
  1249. return ret;
  1250. }
  1251. /*
  1252. * Dissolve a given free hugepage into free buddy pages. This function does
  1253. * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the
  1254. * number of free hugepages would be reduced below the number of reserved
  1255. * hugepages.
  1256. */
  1257. int dissolve_free_huge_page(struct page *page)
  1258. {
  1259. int rc = 0;
  1260. spin_lock(&hugetlb_lock);
  1261. if (PageHuge(page) && !page_count(page)) {
  1262. struct page *head = compound_head(page);
  1263. struct hstate *h = page_hstate(head);
  1264. int nid = page_to_nid(head);
  1265. if (h->free_huge_pages - h->resv_huge_pages == 0) {
  1266. rc = -EBUSY;
  1267. goto out;
  1268. }
  1269. /*
  1270. * Move PageHWPoison flag from head page to the raw error page,
  1271. * which makes any subpages rather than the error page reusable.
  1272. */
  1273. if (PageHWPoison(head) && page != head) {
  1274. SetPageHWPoison(page);
  1275. ClearPageHWPoison(head);
  1276. }
  1277. list_del(&head->lru);
  1278. h->free_huge_pages--;
  1279. h->free_huge_pages_node[nid]--;
  1280. h->max_huge_pages--;
  1281. update_and_free_page(h, head);
  1282. }
  1283. out:
  1284. spin_unlock(&hugetlb_lock);
  1285. return rc;
  1286. }
  1287. /*
  1288. * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
  1289. * make specified memory blocks removable from the system.
  1290. * Note that this will dissolve a free gigantic hugepage completely, if any
  1291. * part of it lies within the given range.
  1292. * Also note that if dissolve_free_huge_page() returns with an error, all
  1293. * free hugepages that were dissolved before that error are lost.
  1294. */
  1295. int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
  1296. {
  1297. unsigned long pfn;
  1298. struct page *page;
  1299. int rc = 0;
  1300. if (!hugepages_supported())
  1301. return rc;
  1302. for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
  1303. page = pfn_to_page(pfn);
  1304. if (PageHuge(page) && !page_count(page)) {
  1305. rc = dissolve_free_huge_page(page);
  1306. if (rc)
  1307. break;
  1308. }
  1309. }
  1310. return rc;
  1311. }
  1312. static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
  1313. gfp_t gfp_mask, int nid, nodemask_t *nmask)
  1314. {
  1315. int order = huge_page_order(h);
  1316. gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
  1317. if (nid == NUMA_NO_NODE)
  1318. nid = numa_mem_id();
  1319. return __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
  1320. }
  1321. static struct page *__alloc_buddy_huge_page(struct hstate *h, gfp_t gfp_mask,
  1322. int nid, nodemask_t *nmask)
  1323. {
  1324. struct page *page;
  1325. unsigned int r_nid;
  1326. if (hstate_is_gigantic(h))
  1327. return NULL;
  1328. /*
  1329. * Assume we will successfully allocate the surplus page to
  1330. * prevent racing processes from causing the surplus to exceed
  1331. * overcommit
  1332. *
  1333. * This however introduces a different race, where a process B
  1334. * tries to grow the static hugepage pool while alloc_pages() is
  1335. * called by process A. B will only examine the per-node
  1336. * counters in determining if surplus huge pages can be
  1337. * converted to normal huge pages in adjust_pool_surplus(). A
  1338. * won't be able to increment the per-node counter, until the
  1339. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  1340. * no more huge pages can be converted from surplus to normal
  1341. * state (and doesn't try to convert again). Thus, we have a
  1342. * case where a surplus huge page exists, the pool is grown, and
  1343. * the surplus huge page still exists after, even though it
  1344. * should just have been converted to a normal huge page. This
  1345. * does not leak memory, though, as the hugepage will be freed
  1346. * once it is out of use. It also does not allow the counters to
  1347. * go out of whack in adjust_pool_surplus() as we don't modify
  1348. * the node values until we've gotten the hugepage and only the
  1349. * per-node value is checked there.
  1350. */
  1351. spin_lock(&hugetlb_lock);
  1352. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  1353. spin_unlock(&hugetlb_lock);
  1354. return NULL;
  1355. } else {
  1356. h->nr_huge_pages++;
  1357. h->surplus_huge_pages++;
  1358. }
  1359. spin_unlock(&hugetlb_lock);
  1360. page = __hugetlb_alloc_buddy_huge_page(h, gfp_mask, nid, nmask);
  1361. spin_lock(&hugetlb_lock);
  1362. if (page) {
  1363. INIT_LIST_HEAD(&page->lru);
  1364. r_nid = page_to_nid(page);
  1365. set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
  1366. set_hugetlb_cgroup(page, NULL);
  1367. /*
  1368. * We incremented the global counters already
  1369. */
  1370. h->nr_huge_pages_node[r_nid]++;
  1371. h->surplus_huge_pages_node[r_nid]++;
  1372. __count_vm_event(HTLB_BUDDY_PGALLOC);
  1373. } else {
  1374. h->nr_huge_pages--;
  1375. h->surplus_huge_pages--;
  1376. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  1377. }
  1378. spin_unlock(&hugetlb_lock);
  1379. return page;
  1380. }
  1381. /*
  1382. * Use the VMA's mpolicy to allocate a huge page from the buddy.
  1383. */
  1384. static
  1385. struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
  1386. struct vm_area_struct *vma, unsigned long addr)
  1387. {
  1388. struct page *page;
  1389. struct mempolicy *mpol;
  1390. gfp_t gfp_mask = htlb_alloc_mask(h);
  1391. int nid;
  1392. nodemask_t *nodemask;
  1393. nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
  1394. page = __alloc_buddy_huge_page(h, gfp_mask, nid, nodemask);
  1395. mpol_cond_put(mpol);
  1396. return page;
  1397. }
  1398. /*
  1399. * This allocation function is useful in the context where vma is irrelevant.
  1400. * E.g. soft-offlining uses this function because it only cares physical
  1401. * address of error page.
  1402. */
  1403. struct page *alloc_huge_page_node(struct hstate *h, int nid)
  1404. {
  1405. gfp_t gfp_mask = htlb_alloc_mask(h);
  1406. struct page *page = NULL;
  1407. if (nid != NUMA_NO_NODE)
  1408. gfp_mask |= __GFP_THISNODE;
  1409. spin_lock(&hugetlb_lock);
  1410. if (h->free_huge_pages - h->resv_huge_pages > 0)
  1411. page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
  1412. spin_unlock(&hugetlb_lock);
  1413. if (!page)
  1414. page = __alloc_buddy_huge_page(h, gfp_mask, nid, NULL);
  1415. return page;
  1416. }
  1417. struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  1418. nodemask_t *nmask)
  1419. {
  1420. gfp_t gfp_mask = htlb_alloc_mask(h);
  1421. spin_lock(&hugetlb_lock);
  1422. if (h->free_huge_pages - h->resv_huge_pages > 0) {
  1423. struct page *page;
  1424. page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
  1425. if (page) {
  1426. spin_unlock(&hugetlb_lock);
  1427. return page;
  1428. }
  1429. }
  1430. spin_unlock(&hugetlb_lock);
  1431. /* No reservations, try to overcommit */
  1432. return __alloc_buddy_huge_page(h, gfp_mask, preferred_nid, nmask);
  1433. }
  1434. /*
  1435. * Increase the hugetlb pool such that it can accommodate a reservation
  1436. * of size 'delta'.
  1437. */
  1438. static int gather_surplus_pages(struct hstate *h, int delta)
  1439. {
  1440. struct list_head surplus_list;
  1441. struct page *page, *tmp;
  1442. int ret, i;
  1443. int needed, allocated;
  1444. bool alloc_ok = true;
  1445. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  1446. if (needed <= 0) {
  1447. h->resv_huge_pages += delta;
  1448. return 0;
  1449. }
  1450. allocated = 0;
  1451. INIT_LIST_HEAD(&surplus_list);
  1452. ret = -ENOMEM;
  1453. retry:
  1454. spin_unlock(&hugetlb_lock);
  1455. for (i = 0; i < needed; i++) {
  1456. page = __alloc_buddy_huge_page(h, htlb_alloc_mask(h),
  1457. NUMA_NO_NODE, NULL);
  1458. if (!page) {
  1459. alloc_ok = false;
  1460. break;
  1461. }
  1462. list_add(&page->lru, &surplus_list);
  1463. cond_resched();
  1464. }
  1465. allocated += i;
  1466. /*
  1467. * After retaking hugetlb_lock, we need to recalculate 'needed'
  1468. * because either resv_huge_pages or free_huge_pages may have changed.
  1469. */
  1470. spin_lock(&hugetlb_lock);
  1471. needed = (h->resv_huge_pages + delta) -
  1472. (h->free_huge_pages + allocated);
  1473. if (needed > 0) {
  1474. if (alloc_ok)
  1475. goto retry;
  1476. /*
  1477. * We were not able to allocate enough pages to
  1478. * satisfy the entire reservation so we free what
  1479. * we've allocated so far.
  1480. */
  1481. goto free;
  1482. }
  1483. /*
  1484. * The surplus_list now contains _at_least_ the number of extra pages
  1485. * needed to accommodate the reservation. Add the appropriate number
  1486. * of pages to the hugetlb pool and free the extras back to the buddy
  1487. * allocator. Commit the entire reservation here to prevent another
  1488. * process from stealing the pages as they are added to the pool but
  1489. * before they are reserved.
  1490. */
  1491. needed += allocated;
  1492. h->resv_huge_pages += delta;
  1493. ret = 0;
  1494. /* Free the needed pages to the hugetlb pool */
  1495. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  1496. if ((--needed) < 0)
  1497. break;
  1498. /*
  1499. * This page is now managed by the hugetlb allocator and has
  1500. * no users -- drop the buddy allocator's reference.
  1501. */
  1502. put_page_testzero(page);
  1503. VM_BUG_ON_PAGE(page_count(page), page);
  1504. enqueue_huge_page(h, page);
  1505. }
  1506. free:
  1507. spin_unlock(&hugetlb_lock);
  1508. /* Free unnecessary surplus pages to the buddy allocator */
  1509. list_for_each_entry_safe(page, tmp, &surplus_list, lru)
  1510. put_page(page);
  1511. spin_lock(&hugetlb_lock);
  1512. return ret;
  1513. }
  1514. /*
  1515. * This routine has two main purposes:
  1516. * 1) Decrement the reservation count (resv_huge_pages) by the value passed
  1517. * in unused_resv_pages. This corresponds to the prior adjustments made
  1518. * to the associated reservation map.
  1519. * 2) Free any unused surplus pages that may have been allocated to satisfy
  1520. * the reservation. As many as unused_resv_pages may be freed.
  1521. *
  1522. * Called with hugetlb_lock held. However, the lock could be dropped (and
  1523. * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
  1524. * we must make sure nobody else can claim pages we are in the process of
  1525. * freeing. Do this by ensuring resv_huge_page always is greater than the
  1526. * number of huge pages we plan to free when dropping the lock.
  1527. */
  1528. static void return_unused_surplus_pages(struct hstate *h,
  1529. unsigned long unused_resv_pages)
  1530. {
  1531. unsigned long nr_pages;
  1532. /* Cannot return gigantic pages currently */
  1533. if (hstate_is_gigantic(h))
  1534. goto out;
  1535. /*
  1536. * Part (or even all) of the reservation could have been backed
  1537. * by pre-allocated pages. Only free surplus pages.
  1538. */
  1539. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  1540. /*
  1541. * We want to release as many surplus pages as possible, spread
  1542. * evenly across all nodes with memory. Iterate across these nodes
  1543. * until we can no longer free unreserved surplus pages. This occurs
  1544. * when the nodes with surplus pages have no free pages.
  1545. * free_pool_huge_page() will balance the the freed pages across the
  1546. * on-line nodes with memory and will handle the hstate accounting.
  1547. *
  1548. * Note that we decrement resv_huge_pages as we free the pages. If
  1549. * we drop the lock, resv_huge_pages will still be sufficiently large
  1550. * to cover subsequent pages we may free.
  1551. */
  1552. while (nr_pages--) {
  1553. h->resv_huge_pages--;
  1554. unused_resv_pages--;
  1555. if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
  1556. goto out;
  1557. cond_resched_lock(&hugetlb_lock);
  1558. }
  1559. out:
  1560. /* Fully uncommit the reservation */
  1561. h->resv_huge_pages -= unused_resv_pages;
  1562. }
  1563. /*
  1564. * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
  1565. * are used by the huge page allocation routines to manage reservations.
  1566. *
  1567. * vma_needs_reservation is called to determine if the huge page at addr
  1568. * within the vma has an associated reservation. If a reservation is
  1569. * needed, the value 1 is returned. The caller is then responsible for
  1570. * managing the global reservation and subpool usage counts. After
  1571. * the huge page has been allocated, vma_commit_reservation is called
  1572. * to add the page to the reservation map. If the page allocation fails,
  1573. * the reservation must be ended instead of committed. vma_end_reservation
  1574. * is called in such cases.
  1575. *
  1576. * In the normal case, vma_commit_reservation returns the same value
  1577. * as the preceding vma_needs_reservation call. The only time this
  1578. * is not the case is if a reserve map was changed between calls. It
  1579. * is the responsibility of the caller to notice the difference and
  1580. * take appropriate action.
  1581. *
  1582. * vma_add_reservation is used in error paths where a reservation must
  1583. * be restored when a newly allocated huge page must be freed. It is
  1584. * to be called after calling vma_needs_reservation to determine if a
  1585. * reservation exists.
  1586. */
  1587. enum vma_resv_mode {
  1588. VMA_NEEDS_RESV,
  1589. VMA_COMMIT_RESV,
  1590. VMA_END_RESV,
  1591. VMA_ADD_RESV,
  1592. };
  1593. static long __vma_reservation_common(struct hstate *h,
  1594. struct vm_area_struct *vma, unsigned long addr,
  1595. enum vma_resv_mode mode)
  1596. {
  1597. struct resv_map *resv;
  1598. pgoff_t idx;
  1599. long ret;
  1600. resv = vma_resv_map(vma);
  1601. if (!resv)
  1602. return 1;
  1603. idx = vma_hugecache_offset(h, vma, addr);
  1604. switch (mode) {
  1605. case VMA_NEEDS_RESV:
  1606. ret = region_chg(resv, idx, idx + 1);
  1607. break;
  1608. case VMA_COMMIT_RESV:
  1609. ret = region_add(resv, idx, idx + 1);
  1610. break;
  1611. case VMA_END_RESV:
  1612. region_abort(resv, idx, idx + 1);
  1613. ret = 0;
  1614. break;
  1615. case VMA_ADD_RESV:
  1616. if (vma->vm_flags & VM_MAYSHARE)
  1617. ret = region_add(resv, idx, idx + 1);
  1618. else {
  1619. region_abort(resv, idx, idx + 1);
  1620. ret = region_del(resv, idx, idx + 1);
  1621. }
  1622. break;
  1623. default:
  1624. BUG();
  1625. }
  1626. if (vma->vm_flags & VM_MAYSHARE)
  1627. return ret;
  1628. else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
  1629. /*
  1630. * In most cases, reserves always exist for private mappings.
  1631. * However, a file associated with mapping could have been
  1632. * hole punched or truncated after reserves were consumed.
  1633. * As subsequent fault on such a range will not use reserves.
  1634. * Subtle - The reserve map for private mappings has the
  1635. * opposite meaning than that of shared mappings. If NO
  1636. * entry is in the reserve map, it means a reservation exists.
  1637. * If an entry exists in the reserve map, it means the
  1638. * reservation has already been consumed. As a result, the
  1639. * return value of this routine is the opposite of the
  1640. * value returned from reserve map manipulation routines above.
  1641. */
  1642. if (ret)
  1643. return 0;
  1644. else
  1645. return 1;
  1646. }
  1647. else
  1648. return ret < 0 ? ret : 0;
  1649. }
  1650. static long vma_needs_reservation(struct hstate *h,
  1651. struct vm_area_struct *vma, unsigned long addr)
  1652. {
  1653. return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
  1654. }
  1655. static long vma_commit_reservation(struct hstate *h,
  1656. struct vm_area_struct *vma, unsigned long addr)
  1657. {
  1658. return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
  1659. }
  1660. static void vma_end_reservation(struct hstate *h,
  1661. struct vm_area_struct *vma, unsigned long addr)
  1662. {
  1663. (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
  1664. }
  1665. static long vma_add_reservation(struct hstate *h,
  1666. struct vm_area_struct *vma, unsigned long addr)
  1667. {
  1668. return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
  1669. }
  1670. /*
  1671. * This routine is called to restore a reservation on error paths. In the
  1672. * specific error paths, a huge page was allocated (via alloc_huge_page)
  1673. * and is about to be freed. If a reservation for the page existed,
  1674. * alloc_huge_page would have consumed the reservation and set PagePrivate
  1675. * in the newly allocated page. When the page is freed via free_huge_page,
  1676. * the global reservation count will be incremented if PagePrivate is set.
  1677. * However, free_huge_page can not adjust the reserve map. Adjust the
  1678. * reserve map here to be consistent with global reserve count adjustments
  1679. * to be made by free_huge_page.
  1680. */
  1681. static void restore_reserve_on_error(struct hstate *h,
  1682. struct vm_area_struct *vma, unsigned long address,
  1683. struct page *page)
  1684. {
  1685. if (unlikely(PagePrivate(page))) {
  1686. long rc = vma_needs_reservation(h, vma, address);
  1687. if (unlikely(rc < 0)) {
  1688. /*
  1689. * Rare out of memory condition in reserve map
  1690. * manipulation. Clear PagePrivate so that
  1691. * global reserve count will not be incremented
  1692. * by free_huge_page. This will make it appear
  1693. * as though the reservation for this page was
  1694. * consumed. This may prevent the task from
  1695. * faulting in the page at a later time. This
  1696. * is better than inconsistent global huge page
  1697. * accounting of reserve counts.
  1698. */
  1699. ClearPagePrivate(page);
  1700. } else if (rc) {
  1701. rc = vma_add_reservation(h, vma, address);
  1702. if (unlikely(rc < 0))
  1703. /*
  1704. * See above comment about rare out of
  1705. * memory condition.
  1706. */
  1707. ClearPagePrivate(page);
  1708. } else
  1709. vma_end_reservation(h, vma, address);
  1710. }
  1711. }
  1712. struct page *alloc_huge_page(struct vm_area_struct *vma,
  1713. unsigned long addr, int avoid_reserve)
  1714. {
  1715. struct hugepage_subpool *spool = subpool_vma(vma);
  1716. struct hstate *h = hstate_vma(vma);
  1717. struct page *page;
  1718. long map_chg, map_commit;
  1719. long gbl_chg;
  1720. int ret, idx;
  1721. struct hugetlb_cgroup *h_cg;
  1722. idx = hstate_index(h);
  1723. /*
  1724. * Examine the region/reserve map to determine if the process
  1725. * has a reservation for the page to be allocated. A return
  1726. * code of zero indicates a reservation exists (no change).
  1727. */
  1728. map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
  1729. if (map_chg < 0)
  1730. return ERR_PTR(-ENOMEM);
  1731. /*
  1732. * Processes that did not create the mapping will have no
  1733. * reserves as indicated by the region/reserve map. Check
  1734. * that the allocation will not exceed the subpool limit.
  1735. * Allocations for MAP_NORESERVE mappings also need to be
  1736. * checked against any subpool limit.
  1737. */
  1738. if (map_chg || avoid_reserve) {
  1739. gbl_chg = hugepage_subpool_get_pages(spool, 1);
  1740. if (gbl_chg < 0) {
  1741. vma_end_reservation(h, vma, addr);
  1742. return ERR_PTR(-ENOSPC);
  1743. }
  1744. /*
  1745. * Even though there was no reservation in the region/reserve
  1746. * map, there could be reservations associated with the
  1747. * subpool that can be used. This would be indicated if the
  1748. * return value of hugepage_subpool_get_pages() is zero.
  1749. * However, if avoid_reserve is specified we still avoid even
  1750. * the subpool reservations.
  1751. */
  1752. if (avoid_reserve)
  1753. gbl_chg = 1;
  1754. }
  1755. ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
  1756. if (ret)
  1757. goto out_subpool_put;
  1758. spin_lock(&hugetlb_lock);
  1759. /*
  1760. * glb_chg is passed to indicate whether or not a page must be taken
  1761. * from the global free pool (global change). gbl_chg == 0 indicates
  1762. * a reservation exists for the allocation.
  1763. */
  1764. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
  1765. if (!page) {
  1766. spin_unlock(&hugetlb_lock);
  1767. page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
  1768. if (!page)
  1769. goto out_uncharge_cgroup;
  1770. if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
  1771. SetPagePrivate(page);
  1772. h->resv_huge_pages--;
  1773. }
  1774. spin_lock(&hugetlb_lock);
  1775. list_move(&page->lru, &h->hugepage_activelist);
  1776. /* Fall through */
  1777. }
  1778. hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
  1779. spin_unlock(&hugetlb_lock);
  1780. set_page_private(page, (unsigned long)spool);
  1781. map_commit = vma_commit_reservation(h, vma, addr);
  1782. if (unlikely(map_chg > map_commit)) {
  1783. /*
  1784. * The page was added to the reservation map between
  1785. * vma_needs_reservation and vma_commit_reservation.
  1786. * This indicates a race with hugetlb_reserve_pages.
  1787. * Adjust for the subpool count incremented above AND
  1788. * in hugetlb_reserve_pages for the same page. Also,
  1789. * the reservation count added in hugetlb_reserve_pages
  1790. * no longer applies.
  1791. */
  1792. long rsv_adjust;
  1793. rsv_adjust = hugepage_subpool_put_pages(spool, 1);
  1794. hugetlb_acct_memory(h, -rsv_adjust);
  1795. }
  1796. return page;
  1797. out_uncharge_cgroup:
  1798. hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
  1799. out_subpool_put:
  1800. if (map_chg || avoid_reserve)
  1801. hugepage_subpool_put_pages(spool, 1);
  1802. vma_end_reservation(h, vma, addr);
  1803. return ERR_PTR(-ENOSPC);
  1804. }
  1805. /*
  1806. * alloc_huge_page()'s wrapper which simply returns the page if allocation
  1807. * succeeds, otherwise NULL. This function is called from new_vma_page(),
  1808. * where no ERR_VALUE is expected to be returned.
  1809. */
  1810. struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
  1811. unsigned long addr, int avoid_reserve)
  1812. {
  1813. struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
  1814. if (IS_ERR(page))
  1815. page = NULL;
  1816. return page;
  1817. }
  1818. int alloc_bootmem_huge_page(struct hstate *h)
  1819. __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
  1820. int __alloc_bootmem_huge_page(struct hstate *h)
  1821. {
  1822. struct huge_bootmem_page *m;
  1823. int nr_nodes, node;
  1824. for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
  1825. void *addr;
  1826. addr = memblock_virt_alloc_try_nid_nopanic(
  1827. huge_page_size(h), huge_page_size(h),
  1828. 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
  1829. if (addr) {
  1830. /*
  1831. * Use the beginning of the huge page to store the
  1832. * huge_bootmem_page struct (until gather_bootmem
  1833. * puts them into the mem_map).
  1834. */
  1835. m = addr;
  1836. goto found;
  1837. }
  1838. }
  1839. return 0;
  1840. found:
  1841. BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
  1842. /* Put them into a private list first because mem_map is not up yet */
  1843. list_add(&m->list, &huge_boot_pages);
  1844. m->hstate = h;
  1845. return 1;
  1846. }
  1847. static void __init prep_compound_huge_page(struct page *page,
  1848. unsigned int order)
  1849. {
  1850. if (unlikely(order > (MAX_ORDER - 1)))
  1851. prep_compound_gigantic_page(page, order);
  1852. else
  1853. prep_compound_page(page, order);
  1854. }
  1855. /* Put bootmem huge pages into the standard lists after mem_map is up */
  1856. static void __init gather_bootmem_prealloc(void)
  1857. {
  1858. struct huge_bootmem_page *m;
  1859. list_for_each_entry(m, &huge_boot_pages, list) {
  1860. struct hstate *h = m->hstate;
  1861. struct page *page;
  1862. #ifdef CONFIG_HIGHMEM
  1863. page = pfn_to_page(m->phys >> PAGE_SHIFT);
  1864. memblock_free_late(__pa(m),
  1865. sizeof(struct huge_bootmem_page));
  1866. #else
  1867. page = virt_to_page(m);
  1868. #endif
  1869. WARN_ON(page_count(page) != 1);
  1870. prep_compound_huge_page(page, h->order);
  1871. WARN_ON(PageReserved(page));
  1872. prep_new_huge_page(h, page, page_to_nid(page));
  1873. /*
  1874. * If we had gigantic hugepages allocated at boot time, we need
  1875. * to restore the 'stolen' pages to totalram_pages in order to
  1876. * fix confusing memory reports from free(1) and another
  1877. * side-effects, like CommitLimit going negative.
  1878. */
  1879. if (hstate_is_gigantic(h))
  1880. adjust_managed_page_count(page, 1 << h->order);
  1881. }
  1882. }
  1883. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  1884. {
  1885. unsigned long i;
  1886. for (i = 0; i < h->max_huge_pages; ++i) {
  1887. if (hstate_is_gigantic(h)) {
  1888. if (!alloc_bootmem_huge_page(h))
  1889. break;
  1890. } else if (!alloc_fresh_huge_page(h,
  1891. &node_states[N_MEMORY]))
  1892. break;
  1893. cond_resched();
  1894. }
  1895. if (i < h->max_huge_pages) {
  1896. char buf[32];
  1897. string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
  1898. pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
  1899. h->max_huge_pages, buf, i);
  1900. h->max_huge_pages = i;
  1901. }
  1902. }
  1903. static void __init hugetlb_init_hstates(void)
  1904. {
  1905. struct hstate *h;
  1906. for_each_hstate(h) {
  1907. if (minimum_order > huge_page_order(h))
  1908. minimum_order = huge_page_order(h);
  1909. /* oversize hugepages were init'ed in early boot */
  1910. if (!hstate_is_gigantic(h))
  1911. hugetlb_hstate_alloc_pages(h);
  1912. }
  1913. VM_BUG_ON(minimum_order == UINT_MAX);
  1914. }
  1915. static void __init report_hugepages(void)
  1916. {
  1917. struct hstate *h;
  1918. for_each_hstate(h) {
  1919. char buf[32];
  1920. string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
  1921. pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
  1922. buf, h->free_huge_pages);
  1923. }
  1924. }
  1925. #ifdef CONFIG_HIGHMEM
  1926. static void try_to_free_low(struct hstate *h, unsigned long count,
  1927. nodemask_t *nodes_allowed)
  1928. {
  1929. int i;
  1930. if (hstate_is_gigantic(h))
  1931. return;
  1932. for_each_node_mask(i, *nodes_allowed) {
  1933. struct page *page, *next;
  1934. struct list_head *freel = &h->hugepage_freelists[i];
  1935. list_for_each_entry_safe(page, next, freel, lru) {
  1936. if (count >= h->nr_huge_pages)
  1937. return;
  1938. if (PageHighMem(page))
  1939. continue;
  1940. list_del(&page->lru);
  1941. update_and_free_page(h, page);
  1942. h->free_huge_pages--;
  1943. h->free_huge_pages_node[page_to_nid(page)]--;
  1944. }
  1945. }
  1946. }
  1947. #else
  1948. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1949. nodemask_t *nodes_allowed)
  1950. {
  1951. }
  1952. #endif
  1953. /*
  1954. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1955. * balanced by operating on them in a round-robin fashion.
  1956. * Returns 1 if an adjustment was made.
  1957. */
  1958. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1959. int delta)
  1960. {
  1961. int nr_nodes, node;
  1962. VM_BUG_ON(delta != -1 && delta != 1);
  1963. if (delta < 0) {
  1964. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1965. if (h->surplus_huge_pages_node[node])
  1966. goto found;
  1967. }
  1968. } else {
  1969. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  1970. if (h->surplus_huge_pages_node[node] <
  1971. h->nr_huge_pages_node[node])
  1972. goto found;
  1973. }
  1974. }
  1975. return 0;
  1976. found:
  1977. h->surplus_huge_pages += delta;
  1978. h->surplus_huge_pages_node[node] += delta;
  1979. return 1;
  1980. }
  1981. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1982. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1983. nodemask_t *nodes_allowed)
  1984. {
  1985. unsigned long min_count, ret;
  1986. if (hstate_is_gigantic(h) && !gigantic_page_supported())
  1987. return h->max_huge_pages;
  1988. /*
  1989. * Increase the pool size
  1990. * First take pages out of surplus state. Then make up the
  1991. * remaining difference by allocating fresh huge pages.
  1992. *
  1993. * We might race with __alloc_buddy_huge_page() here and be unable
  1994. * to convert a surplus huge page to a normal huge page. That is
  1995. * not critical, though, it just means the overall size of the
  1996. * pool might be one hugepage larger than it needs to be, but
  1997. * within all the constraints specified by the sysctls.
  1998. */
  1999. spin_lock(&hugetlb_lock);
  2000. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  2001. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  2002. break;
  2003. }
  2004. while (count > persistent_huge_pages(h)) {
  2005. /*
  2006. * If this allocation races such that we no longer need the
  2007. * page, free_huge_page will handle it by freeing the page
  2008. * and reducing the surplus.
  2009. */
  2010. spin_unlock(&hugetlb_lock);
  2011. /* yield cpu to avoid soft lockup */
  2012. cond_resched();
  2013. if (hstate_is_gigantic(h))
  2014. ret = alloc_fresh_gigantic_page(h, nodes_allowed);
  2015. else
  2016. ret = alloc_fresh_huge_page(h, nodes_allowed);
  2017. spin_lock(&hugetlb_lock);
  2018. if (!ret)
  2019. goto out;
  2020. /* Bail for signals. Probably ctrl-c from user */
  2021. if (signal_pending(current))
  2022. goto out;
  2023. }
  2024. /*
  2025. * Decrease the pool size
  2026. * First return free pages to the buddy allocator (being careful
  2027. * to keep enough around to satisfy reservations). Then place
  2028. * pages into surplus state as needed so the pool will shrink
  2029. * to the desired size as pages become free.
  2030. *
  2031. * By placing pages into the surplus state independent of the
  2032. * overcommit value, we are allowing the surplus pool size to
  2033. * exceed overcommit. There are few sane options here. Since
  2034. * __alloc_buddy_huge_page() is checking the global counter,
  2035. * though, we'll note that we're not allowed to exceed surplus
  2036. * and won't grow the pool anywhere else. Not until one of the
  2037. * sysctls are changed, or the surplus pages go out of use.
  2038. */
  2039. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  2040. min_count = max(count, min_count);
  2041. try_to_free_low(h, min_count, nodes_allowed);
  2042. while (min_count < persistent_huge_pages(h)) {
  2043. if (!free_pool_huge_page(h, nodes_allowed, 0))
  2044. break;
  2045. cond_resched_lock(&hugetlb_lock);
  2046. }
  2047. while (count < persistent_huge_pages(h)) {
  2048. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  2049. break;
  2050. }
  2051. out:
  2052. ret = persistent_huge_pages(h);
  2053. spin_unlock(&hugetlb_lock);
  2054. return ret;
  2055. }
  2056. #define HSTATE_ATTR_RO(_name) \
  2057. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  2058. #define HSTATE_ATTR(_name) \
  2059. static struct kobj_attribute _name##_attr = \
  2060. __ATTR(_name, 0644, _name##_show, _name##_store)
  2061. static struct kobject *hugepages_kobj;
  2062. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  2063. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  2064. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  2065. {
  2066. int i;
  2067. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  2068. if (hstate_kobjs[i] == kobj) {
  2069. if (nidp)
  2070. *nidp = NUMA_NO_NODE;
  2071. return &hstates[i];
  2072. }
  2073. return kobj_to_node_hstate(kobj, nidp);
  2074. }
  2075. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  2076. struct kobj_attribute *attr, char *buf)
  2077. {
  2078. struct hstate *h;
  2079. unsigned long nr_huge_pages;
  2080. int nid;
  2081. h = kobj_to_hstate(kobj, &nid);
  2082. if (nid == NUMA_NO_NODE)
  2083. nr_huge_pages = h->nr_huge_pages;
  2084. else
  2085. nr_huge_pages = h->nr_huge_pages_node[nid];
  2086. return sprintf(buf, "%lu\n", nr_huge_pages);
  2087. }
  2088. static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
  2089. struct hstate *h, int nid,
  2090. unsigned long count, size_t len)
  2091. {
  2092. int err;
  2093. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  2094. if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
  2095. err = -EINVAL;
  2096. goto out;
  2097. }
  2098. if (nid == NUMA_NO_NODE) {
  2099. /*
  2100. * global hstate attribute
  2101. */
  2102. if (!(obey_mempolicy &&
  2103. init_nodemask_of_mempolicy(nodes_allowed))) {
  2104. NODEMASK_FREE(nodes_allowed);
  2105. nodes_allowed = &node_states[N_MEMORY];
  2106. }
  2107. } else if (nodes_allowed) {
  2108. /*
  2109. * per node hstate attribute: adjust count to global,
  2110. * but restrict alloc/free to the specified node.
  2111. */
  2112. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  2113. init_nodemask_of_node(nodes_allowed, nid);
  2114. } else
  2115. nodes_allowed = &node_states[N_MEMORY];
  2116. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  2117. if (nodes_allowed != &node_states[N_MEMORY])
  2118. NODEMASK_FREE(nodes_allowed);
  2119. return len;
  2120. out:
  2121. NODEMASK_FREE(nodes_allowed);
  2122. return err;
  2123. }
  2124. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  2125. struct kobject *kobj, const char *buf,
  2126. size_t len)
  2127. {
  2128. struct hstate *h;
  2129. unsigned long count;
  2130. int nid;
  2131. int err;
  2132. err = kstrtoul(buf, 10, &count);
  2133. if (err)
  2134. return err;
  2135. h = kobj_to_hstate(kobj, &nid);
  2136. return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
  2137. }
  2138. static ssize_t nr_hugepages_show(struct kobject *kobj,
  2139. struct kobj_attribute *attr, char *buf)
  2140. {
  2141. return nr_hugepages_show_common(kobj, attr, buf);
  2142. }
  2143. static ssize_t nr_hugepages_store(struct kobject *kobj,
  2144. struct kobj_attribute *attr, const char *buf, size_t len)
  2145. {
  2146. return nr_hugepages_store_common(false, kobj, buf, len);
  2147. }
  2148. HSTATE_ATTR(nr_hugepages);
  2149. #ifdef CONFIG_NUMA
  2150. /*
  2151. * hstate attribute for optionally mempolicy-based constraint on persistent
  2152. * huge page alloc/free.
  2153. */
  2154. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  2155. struct kobj_attribute *attr, char *buf)
  2156. {
  2157. return nr_hugepages_show_common(kobj, attr, buf);
  2158. }
  2159. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  2160. struct kobj_attribute *attr, const char *buf, size_t len)
  2161. {
  2162. return nr_hugepages_store_common(true, kobj, buf, len);
  2163. }
  2164. HSTATE_ATTR(nr_hugepages_mempolicy);
  2165. #endif
  2166. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  2167. struct kobj_attribute *attr, char *buf)
  2168. {
  2169. struct hstate *h = kobj_to_hstate(kobj, NULL);
  2170. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  2171. }
  2172. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  2173. struct kobj_attribute *attr, const char *buf, size_t count)
  2174. {
  2175. int err;
  2176. unsigned long input;
  2177. struct hstate *h = kobj_to_hstate(kobj, NULL);
  2178. if (hstate_is_gigantic(h))
  2179. return -EINVAL;
  2180. err = kstrtoul(buf, 10, &input);
  2181. if (err)
  2182. return err;
  2183. spin_lock(&hugetlb_lock);
  2184. h->nr_overcommit_huge_pages = input;
  2185. spin_unlock(&hugetlb_lock);
  2186. return count;
  2187. }
  2188. HSTATE_ATTR(nr_overcommit_hugepages);
  2189. static ssize_t free_hugepages_show(struct kobject *kobj,
  2190. struct kobj_attribute *attr, char *buf)
  2191. {
  2192. struct hstate *h;
  2193. unsigned long free_huge_pages;
  2194. int nid;
  2195. h = kobj_to_hstate(kobj, &nid);
  2196. if (nid == NUMA_NO_NODE)
  2197. free_huge_pages = h->free_huge_pages;
  2198. else
  2199. free_huge_pages = h->free_huge_pages_node[nid];
  2200. return sprintf(buf, "%lu\n", free_huge_pages);
  2201. }
  2202. HSTATE_ATTR_RO(free_hugepages);
  2203. static ssize_t resv_hugepages_show(struct kobject *kobj,
  2204. struct kobj_attribute *attr, char *buf)
  2205. {
  2206. struct hstate *h = kobj_to_hstate(kobj, NULL);
  2207. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  2208. }
  2209. HSTATE_ATTR_RO(resv_hugepages);
  2210. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  2211. struct kobj_attribute *attr, char *buf)
  2212. {
  2213. struct hstate *h;
  2214. unsigned long surplus_huge_pages;
  2215. int nid;
  2216. h = kobj_to_hstate(kobj, &nid);
  2217. if (nid == NUMA_NO_NODE)
  2218. surplus_huge_pages = h->surplus_huge_pages;
  2219. else
  2220. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  2221. return sprintf(buf, "%lu\n", surplus_huge_pages);
  2222. }
  2223. HSTATE_ATTR_RO(surplus_hugepages);
  2224. static struct attribute *hstate_attrs[] = {
  2225. &nr_hugepages_attr.attr,
  2226. &nr_overcommit_hugepages_attr.attr,
  2227. &free_hugepages_attr.attr,
  2228. &resv_hugepages_attr.attr,
  2229. &surplus_hugepages_attr.attr,
  2230. #ifdef CONFIG_NUMA
  2231. &nr_hugepages_mempolicy_attr.attr,
  2232. #endif
  2233. NULL,
  2234. };
  2235. static const struct attribute_group hstate_attr_group = {
  2236. .attrs = hstate_attrs,
  2237. };
  2238. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  2239. struct kobject **hstate_kobjs,
  2240. const struct attribute_group *hstate_attr_group)
  2241. {
  2242. int retval;
  2243. int hi = hstate_index(h);
  2244. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  2245. if (!hstate_kobjs[hi])
  2246. return -ENOMEM;
  2247. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  2248. if (retval)
  2249. kobject_put(hstate_kobjs[hi]);
  2250. return retval;
  2251. }
  2252. static void __init hugetlb_sysfs_init(void)
  2253. {
  2254. struct hstate *h;
  2255. int err;
  2256. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  2257. if (!hugepages_kobj)
  2258. return;
  2259. for_each_hstate(h) {
  2260. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  2261. hstate_kobjs, &hstate_attr_group);
  2262. if (err)
  2263. pr_err("Hugetlb: Unable to add hstate %s", h->name);
  2264. }
  2265. }
  2266. #ifdef CONFIG_NUMA
  2267. /*
  2268. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  2269. * with node devices in node_devices[] using a parallel array. The array
  2270. * index of a node device or _hstate == node id.
  2271. * This is here to avoid any static dependency of the node device driver, in
  2272. * the base kernel, on the hugetlb module.
  2273. */
  2274. struct node_hstate {
  2275. struct kobject *hugepages_kobj;
  2276. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  2277. };
  2278. static struct node_hstate node_hstates[MAX_NUMNODES];
  2279. /*
  2280. * A subset of global hstate attributes for node devices
  2281. */
  2282. static struct attribute *per_node_hstate_attrs[] = {
  2283. &nr_hugepages_attr.attr,
  2284. &free_hugepages_attr.attr,
  2285. &surplus_hugepages_attr.attr,
  2286. NULL,
  2287. };
  2288. static const struct attribute_group per_node_hstate_attr_group = {
  2289. .attrs = per_node_hstate_attrs,
  2290. };
  2291. /*
  2292. * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  2293. * Returns node id via non-NULL nidp.
  2294. */
  2295. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  2296. {
  2297. int nid;
  2298. for (nid = 0; nid < nr_node_ids; nid++) {
  2299. struct node_hstate *nhs = &node_hstates[nid];
  2300. int i;
  2301. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  2302. if (nhs->hstate_kobjs[i] == kobj) {
  2303. if (nidp)
  2304. *nidp = nid;
  2305. return &hstates[i];
  2306. }
  2307. }
  2308. BUG();
  2309. return NULL;
  2310. }
  2311. /*
  2312. * Unregister hstate attributes from a single node device.
  2313. * No-op if no hstate attributes attached.
  2314. */
  2315. static void hugetlb_unregister_node(struct node *node)
  2316. {
  2317. struct hstate *h;
  2318. struct node_hstate *nhs = &node_hstates[node->dev.id];
  2319. if (!nhs->hugepages_kobj)
  2320. return; /* no hstate attributes */
  2321. for_each_hstate(h) {
  2322. int idx = hstate_index(h);
  2323. if (nhs->hstate_kobjs[idx]) {
  2324. kobject_put(nhs->hstate_kobjs[idx]);
  2325. nhs->hstate_kobjs[idx] = NULL;
  2326. }
  2327. }
  2328. kobject_put(nhs->hugepages_kobj);
  2329. nhs->hugepages_kobj = NULL;
  2330. }
  2331. /*
  2332. * Register hstate attributes for a single node device.
  2333. * No-op if attributes already registered.
  2334. */
  2335. static void hugetlb_register_node(struct node *node)
  2336. {
  2337. struct hstate *h;
  2338. struct node_hstate *nhs = &node_hstates[node->dev.id];
  2339. int err;
  2340. if (nhs->hugepages_kobj)
  2341. return; /* already allocated */
  2342. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  2343. &node->dev.kobj);
  2344. if (!nhs->hugepages_kobj)
  2345. return;
  2346. for_each_hstate(h) {
  2347. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  2348. nhs->hstate_kobjs,
  2349. &per_node_hstate_attr_group);
  2350. if (err) {
  2351. pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
  2352. h->name, node->dev.id);
  2353. hugetlb_unregister_node(node);
  2354. break;
  2355. }
  2356. }
  2357. }
  2358. /*
  2359. * hugetlb init time: register hstate attributes for all registered node
  2360. * devices of nodes that have memory. All on-line nodes should have
  2361. * registered their associated device by this time.
  2362. */
  2363. static void __init hugetlb_register_all_nodes(void)
  2364. {
  2365. int nid;
  2366. for_each_node_state(nid, N_MEMORY) {
  2367. struct node *node = node_devices[nid];
  2368. if (node->dev.id == nid)
  2369. hugetlb_register_node(node);
  2370. }
  2371. /*
  2372. * Let the node device driver know we're here so it can
  2373. * [un]register hstate attributes on node hotplug.
  2374. */
  2375. register_hugetlbfs_with_node(hugetlb_register_node,
  2376. hugetlb_unregister_node);
  2377. }
  2378. #else /* !CONFIG_NUMA */
  2379. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  2380. {
  2381. BUG();
  2382. if (nidp)
  2383. *nidp = -1;
  2384. return NULL;
  2385. }
  2386. static void hugetlb_register_all_nodes(void) { }
  2387. #endif
  2388. static int __init hugetlb_init(void)
  2389. {
  2390. int i;
  2391. if (!hugepages_supported())
  2392. return 0;
  2393. if (!size_to_hstate(default_hstate_size)) {
  2394. if (default_hstate_size != 0) {
  2395. pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
  2396. default_hstate_size, HPAGE_SIZE);
  2397. }
  2398. default_hstate_size = HPAGE_SIZE;
  2399. if (!size_to_hstate(default_hstate_size))
  2400. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  2401. }
  2402. default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
  2403. if (default_hstate_max_huge_pages) {
  2404. if (!default_hstate.max_huge_pages)
  2405. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  2406. }
  2407. hugetlb_init_hstates();
  2408. gather_bootmem_prealloc();
  2409. report_hugepages();
  2410. hugetlb_sysfs_init();
  2411. hugetlb_register_all_nodes();
  2412. hugetlb_cgroup_file_init();
  2413. #ifdef CONFIG_SMP
  2414. num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
  2415. #else
  2416. num_fault_mutexes = 1;
  2417. #endif
  2418. hugetlb_fault_mutex_table =
  2419. kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
  2420. BUG_ON(!hugetlb_fault_mutex_table);
  2421. for (i = 0; i < num_fault_mutexes; i++)
  2422. mutex_init(&hugetlb_fault_mutex_table[i]);
  2423. return 0;
  2424. }
  2425. subsys_initcall(hugetlb_init);
  2426. /* Should be called on processing a hugepagesz=... option */
  2427. void __init hugetlb_bad_size(void)
  2428. {
  2429. parsed_valid_hugepagesz = false;
  2430. }
  2431. void __init hugetlb_add_hstate(unsigned int order)
  2432. {
  2433. struct hstate *h;
  2434. unsigned long i;
  2435. if (size_to_hstate(PAGE_SIZE << order)) {
  2436. pr_warn("hugepagesz= specified twice, ignoring\n");
  2437. return;
  2438. }
  2439. BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
  2440. BUG_ON(order == 0);
  2441. h = &hstates[hugetlb_max_hstate++];
  2442. h->order = order;
  2443. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  2444. h->nr_huge_pages = 0;
  2445. h->free_huge_pages = 0;
  2446. for (i = 0; i < MAX_NUMNODES; ++i)
  2447. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  2448. INIT_LIST_HEAD(&h->hugepage_activelist);
  2449. h->next_nid_to_alloc = first_memory_node;
  2450. h->next_nid_to_free = first_memory_node;
  2451. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  2452. huge_page_size(h)/1024);
  2453. parsed_hstate = h;
  2454. }
  2455. static int __init hugetlb_nrpages_setup(char *s)
  2456. {
  2457. unsigned long *mhp;
  2458. static unsigned long *last_mhp;
  2459. if (!parsed_valid_hugepagesz) {
  2460. pr_warn("hugepages = %s preceded by "
  2461. "an unsupported hugepagesz, ignoring\n", s);
  2462. parsed_valid_hugepagesz = true;
  2463. return 1;
  2464. }
  2465. /*
  2466. * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
  2467. * so this hugepages= parameter goes to the "default hstate".
  2468. */
  2469. else if (!hugetlb_max_hstate)
  2470. mhp = &default_hstate_max_huge_pages;
  2471. else
  2472. mhp = &parsed_hstate->max_huge_pages;
  2473. if (mhp == last_mhp) {
  2474. pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
  2475. return 1;
  2476. }
  2477. if (sscanf(s, "%lu", mhp) <= 0)
  2478. *mhp = 0;
  2479. /*
  2480. * Global state is always initialized later in hugetlb_init.
  2481. * But we need to allocate >= MAX_ORDER hstates here early to still
  2482. * use the bootmem allocator.
  2483. */
  2484. if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
  2485. hugetlb_hstate_alloc_pages(parsed_hstate);
  2486. last_mhp = mhp;
  2487. return 1;
  2488. }
  2489. __setup("hugepages=", hugetlb_nrpages_setup);
  2490. static int __init hugetlb_default_setup(char *s)
  2491. {
  2492. default_hstate_size = memparse(s, &s);
  2493. return 1;
  2494. }
  2495. __setup("default_hugepagesz=", hugetlb_default_setup);
  2496. static unsigned int cpuset_mems_nr(unsigned int *array)
  2497. {
  2498. int node;
  2499. unsigned int nr = 0;
  2500. for_each_node_mask(node, cpuset_current_mems_allowed)
  2501. nr += array[node];
  2502. return nr;
  2503. }
  2504. #ifdef CONFIG_SYSCTL
  2505. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  2506. struct ctl_table *table, int write,
  2507. void __user *buffer, size_t *length, loff_t *ppos)
  2508. {
  2509. struct hstate *h = &default_hstate;
  2510. unsigned long tmp = h->max_huge_pages;
  2511. int ret;
  2512. if (!hugepages_supported())
  2513. return -EOPNOTSUPP;
  2514. table->data = &tmp;
  2515. table->maxlen = sizeof(unsigned long);
  2516. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  2517. if (ret)
  2518. goto out;
  2519. if (write)
  2520. ret = __nr_hugepages_store_common(obey_mempolicy, h,
  2521. NUMA_NO_NODE, tmp, *length);
  2522. out:
  2523. return ret;
  2524. }
  2525. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  2526. void __user *buffer, size_t *length, loff_t *ppos)
  2527. {
  2528. return hugetlb_sysctl_handler_common(false, table, write,
  2529. buffer, length, ppos);
  2530. }
  2531. #ifdef CONFIG_NUMA
  2532. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  2533. void __user *buffer, size_t *length, loff_t *ppos)
  2534. {
  2535. return hugetlb_sysctl_handler_common(true, table, write,
  2536. buffer, length, ppos);
  2537. }
  2538. #endif /* CONFIG_NUMA */
  2539. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  2540. void __user *buffer,
  2541. size_t *length, loff_t *ppos)
  2542. {
  2543. struct hstate *h = &default_hstate;
  2544. unsigned long tmp;
  2545. int ret;
  2546. if (!hugepages_supported())
  2547. return -EOPNOTSUPP;
  2548. tmp = h->nr_overcommit_huge_pages;
  2549. if (write && hstate_is_gigantic(h))
  2550. return -EINVAL;
  2551. table->data = &tmp;
  2552. table->maxlen = sizeof(unsigned long);
  2553. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  2554. if (ret)
  2555. goto out;
  2556. if (write) {
  2557. spin_lock(&hugetlb_lock);
  2558. h->nr_overcommit_huge_pages = tmp;
  2559. spin_unlock(&hugetlb_lock);
  2560. }
  2561. out:
  2562. return ret;
  2563. }
  2564. #endif /* CONFIG_SYSCTL */
  2565. void hugetlb_report_meminfo(struct seq_file *m)
  2566. {
  2567. struct hstate *h;
  2568. unsigned long total = 0;
  2569. if (!hugepages_supported())
  2570. return;
  2571. for_each_hstate(h) {
  2572. unsigned long count = h->nr_huge_pages;
  2573. total += (PAGE_SIZE << huge_page_order(h)) * count;
  2574. if (h == &default_hstate)
  2575. seq_printf(m,
  2576. "HugePages_Total: %5lu\n"
  2577. "HugePages_Free: %5lu\n"
  2578. "HugePages_Rsvd: %5lu\n"
  2579. "HugePages_Surp: %5lu\n"
  2580. "Hugepagesize: %8lu kB\n",
  2581. count,
  2582. h->free_huge_pages,
  2583. h->resv_huge_pages,
  2584. h->surplus_huge_pages,
  2585. (PAGE_SIZE << huge_page_order(h)) / 1024);
  2586. }
  2587. seq_printf(m, "Hugetlb: %8lu kB\n", total / 1024);
  2588. }
  2589. int hugetlb_report_node_meminfo(int nid, char *buf)
  2590. {
  2591. struct hstate *h = &default_hstate;
  2592. if (!hugepages_supported())
  2593. return 0;
  2594. return sprintf(buf,
  2595. "Node %d HugePages_Total: %5u\n"
  2596. "Node %d HugePages_Free: %5u\n"
  2597. "Node %d HugePages_Surp: %5u\n",
  2598. nid, h->nr_huge_pages_node[nid],
  2599. nid, h->free_huge_pages_node[nid],
  2600. nid, h->surplus_huge_pages_node[nid]);
  2601. }
  2602. void hugetlb_show_meminfo(void)
  2603. {
  2604. struct hstate *h;
  2605. int nid;
  2606. if (!hugepages_supported())
  2607. return;
  2608. for_each_node_state(nid, N_MEMORY)
  2609. for_each_hstate(h)
  2610. pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
  2611. nid,
  2612. h->nr_huge_pages_node[nid],
  2613. h->free_huge_pages_node[nid],
  2614. h->surplus_huge_pages_node[nid],
  2615. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  2616. }
  2617. void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
  2618. {
  2619. seq_printf(m, "HugetlbPages:\t%8lu kB\n",
  2620. atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
  2621. }
  2622. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  2623. unsigned long hugetlb_total_pages(void)
  2624. {
  2625. struct hstate *h;
  2626. unsigned long nr_total_pages = 0;
  2627. for_each_hstate(h)
  2628. nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
  2629. return nr_total_pages;
  2630. }
  2631. static int hugetlb_acct_memory(struct hstate *h, long delta)
  2632. {
  2633. int ret = -ENOMEM;
  2634. spin_lock(&hugetlb_lock);
  2635. /*
  2636. * When cpuset is configured, it breaks the strict hugetlb page
  2637. * reservation as the accounting is done on a global variable. Such
  2638. * reservation is completely rubbish in the presence of cpuset because
  2639. * the reservation is not checked against page availability for the
  2640. * current cpuset. Application can still potentially OOM'ed by kernel
  2641. * with lack of free htlb page in cpuset that the task is in.
  2642. * Attempt to enforce strict accounting with cpuset is almost
  2643. * impossible (or too ugly) because cpuset is too fluid that
  2644. * task or memory node can be dynamically moved between cpusets.
  2645. *
  2646. * The change of semantics for shared hugetlb mapping with cpuset is
  2647. * undesirable. However, in order to preserve some of the semantics,
  2648. * we fall back to check against current free page availability as
  2649. * a best attempt and hopefully to minimize the impact of changing
  2650. * semantics that cpuset has.
  2651. */
  2652. if (delta > 0) {
  2653. if (gather_surplus_pages(h, delta) < 0)
  2654. goto out;
  2655. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  2656. return_unused_surplus_pages(h, delta);
  2657. goto out;
  2658. }
  2659. }
  2660. ret = 0;
  2661. if (delta < 0)
  2662. return_unused_surplus_pages(h, (unsigned long) -delta);
  2663. out:
  2664. spin_unlock(&hugetlb_lock);
  2665. return ret;
  2666. }
  2667. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  2668. {
  2669. struct resv_map *resv = vma_resv_map(vma);
  2670. /*
  2671. * This new VMA should share its siblings reservation map if present.
  2672. * The VMA will only ever have a valid reservation map pointer where
  2673. * it is being copied for another still existing VMA. As that VMA
  2674. * has a reference to the reservation map it cannot disappear until
  2675. * after this open call completes. It is therefore safe to take a
  2676. * new reference here without additional locking.
  2677. */
  2678. if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2679. kref_get(&resv->refs);
  2680. }
  2681. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  2682. {
  2683. struct hstate *h = hstate_vma(vma);
  2684. struct resv_map *resv = vma_resv_map(vma);
  2685. struct hugepage_subpool *spool = subpool_vma(vma);
  2686. unsigned long reserve, start, end;
  2687. long gbl_reserve;
  2688. if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2689. return;
  2690. start = vma_hugecache_offset(h, vma, vma->vm_start);
  2691. end = vma_hugecache_offset(h, vma, vma->vm_end);
  2692. reserve = (end - start) - region_count(resv, start, end);
  2693. kref_put(&resv->refs, resv_map_release);
  2694. if (reserve) {
  2695. /*
  2696. * Decrement reserve counts. The global reserve count may be
  2697. * adjusted if the subpool has a minimum size.
  2698. */
  2699. gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
  2700. hugetlb_acct_memory(h, -gbl_reserve);
  2701. }
  2702. }
  2703. static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
  2704. {
  2705. if (addr & ~(huge_page_mask(hstate_vma(vma))))
  2706. return -EINVAL;
  2707. return 0;
  2708. }
  2709. /*
  2710. * We cannot handle pagefaults against hugetlb pages at all. They cause
  2711. * handle_mm_fault() to try to instantiate regular-sized pages in the
  2712. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  2713. * this far.
  2714. */
  2715. static int hugetlb_vm_op_fault(struct vm_fault *vmf)
  2716. {
  2717. BUG();
  2718. return 0;
  2719. }
  2720. const struct vm_operations_struct hugetlb_vm_ops = {
  2721. .fault = hugetlb_vm_op_fault,
  2722. .open = hugetlb_vm_op_open,
  2723. .close = hugetlb_vm_op_close,
  2724. .split = hugetlb_vm_op_split,
  2725. };
  2726. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  2727. int writable)
  2728. {
  2729. pte_t entry;
  2730. if (writable) {
  2731. entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
  2732. vma->vm_page_prot)));
  2733. } else {
  2734. entry = huge_pte_wrprotect(mk_huge_pte(page,
  2735. vma->vm_page_prot));
  2736. }
  2737. entry = pte_mkyoung(entry);
  2738. entry = pte_mkhuge(entry);
  2739. entry = arch_make_huge_pte(entry, vma, page, writable);
  2740. return entry;
  2741. }
  2742. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  2743. unsigned long address, pte_t *ptep)
  2744. {
  2745. pte_t entry;
  2746. entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
  2747. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
  2748. update_mmu_cache(vma, address, ptep);
  2749. }
  2750. bool is_hugetlb_entry_migration(pte_t pte)
  2751. {
  2752. swp_entry_t swp;
  2753. if (huge_pte_none(pte) || pte_present(pte))
  2754. return false;
  2755. swp = pte_to_swp_entry(pte);
  2756. if (non_swap_entry(swp) && is_migration_entry(swp))
  2757. return true;
  2758. else
  2759. return false;
  2760. }
  2761. static int is_hugetlb_entry_hwpoisoned(pte_t pte)
  2762. {
  2763. swp_entry_t swp;
  2764. if (huge_pte_none(pte) || pte_present(pte))
  2765. return 0;
  2766. swp = pte_to_swp_entry(pte);
  2767. if (non_swap_entry(swp) && is_hwpoison_entry(swp))
  2768. return 1;
  2769. else
  2770. return 0;
  2771. }
  2772. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  2773. struct vm_area_struct *vma)
  2774. {
  2775. pte_t *src_pte, *dst_pte, entry;
  2776. struct page *ptepage;
  2777. unsigned long addr;
  2778. int cow;
  2779. struct hstate *h = hstate_vma(vma);
  2780. unsigned long sz = huge_page_size(h);
  2781. unsigned long mmun_start; /* For mmu_notifiers */
  2782. unsigned long mmun_end; /* For mmu_notifiers */
  2783. int ret = 0;
  2784. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  2785. mmun_start = vma->vm_start;
  2786. mmun_end = vma->vm_end;
  2787. if (cow)
  2788. mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
  2789. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  2790. spinlock_t *src_ptl, *dst_ptl;
  2791. src_pte = huge_pte_offset(src, addr, sz);
  2792. if (!src_pte)
  2793. continue;
  2794. dst_pte = huge_pte_alloc(dst, addr, sz);
  2795. if (!dst_pte) {
  2796. ret = -ENOMEM;
  2797. break;
  2798. }
  2799. /* If the pagetables are shared don't copy or take references */
  2800. if (dst_pte == src_pte)
  2801. continue;
  2802. dst_ptl = huge_pte_lock(h, dst, dst_pte);
  2803. src_ptl = huge_pte_lockptr(h, src, src_pte);
  2804. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  2805. entry = huge_ptep_get(src_pte);
  2806. if (huge_pte_none(entry)) { /* skip none entry */
  2807. ;
  2808. } else if (unlikely(is_hugetlb_entry_migration(entry) ||
  2809. is_hugetlb_entry_hwpoisoned(entry))) {
  2810. swp_entry_t swp_entry = pte_to_swp_entry(entry);
  2811. if (is_write_migration_entry(swp_entry) && cow) {
  2812. /*
  2813. * COW mappings require pages in both
  2814. * parent and child to be set to read.
  2815. */
  2816. make_migration_entry_read(&swp_entry);
  2817. entry = swp_entry_to_pte(swp_entry);
  2818. set_huge_swap_pte_at(src, addr, src_pte,
  2819. entry, sz);
  2820. }
  2821. set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
  2822. } else {
  2823. if (cow) {
  2824. /*
  2825. * No need to notify as we are downgrading page
  2826. * table protection not changing it to point
  2827. * to a new page.
  2828. *
  2829. * See Documentation/vm/mmu_notifier.txt
  2830. */
  2831. huge_ptep_set_wrprotect(src, addr, src_pte);
  2832. }
  2833. entry = huge_ptep_get(src_pte);
  2834. ptepage = pte_page(entry);
  2835. get_page(ptepage);
  2836. page_dup_rmap(ptepage, true);
  2837. set_huge_pte_at(dst, addr, dst_pte, entry);
  2838. hugetlb_count_add(pages_per_huge_page(h), dst);
  2839. }
  2840. spin_unlock(src_ptl);
  2841. spin_unlock(dst_ptl);
  2842. }
  2843. if (cow)
  2844. mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
  2845. return ret;
  2846. }
  2847. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  2848. unsigned long start, unsigned long end,
  2849. struct page *ref_page)
  2850. {
  2851. struct mm_struct *mm = vma->vm_mm;
  2852. unsigned long address;
  2853. pte_t *ptep;
  2854. pte_t pte;
  2855. spinlock_t *ptl;
  2856. struct page *page;
  2857. struct hstate *h = hstate_vma(vma);
  2858. unsigned long sz = huge_page_size(h);
  2859. const unsigned long mmun_start = start; /* For mmu_notifiers */
  2860. const unsigned long mmun_end = end; /* For mmu_notifiers */
  2861. WARN_ON(!is_vm_hugetlb_page(vma));
  2862. BUG_ON(start & ~huge_page_mask(h));
  2863. BUG_ON(end & ~huge_page_mask(h));
  2864. /*
  2865. * This is a hugetlb vma, all the pte entries should point
  2866. * to huge page.
  2867. */
  2868. tlb_remove_check_page_size_change(tlb, sz);
  2869. tlb_start_vma(tlb, vma);
  2870. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2871. address = start;
  2872. for (; address < end; address += sz) {
  2873. ptep = huge_pte_offset(mm, address, sz);
  2874. if (!ptep)
  2875. continue;
  2876. ptl = huge_pte_lock(h, mm, ptep);
  2877. if (huge_pmd_unshare(mm, &address, ptep)) {
  2878. spin_unlock(ptl);
  2879. continue;
  2880. }
  2881. pte = huge_ptep_get(ptep);
  2882. if (huge_pte_none(pte)) {
  2883. spin_unlock(ptl);
  2884. continue;
  2885. }
  2886. /*
  2887. * Migrating hugepage or HWPoisoned hugepage is already
  2888. * unmapped and its refcount is dropped, so just clear pte here.
  2889. */
  2890. if (unlikely(!pte_present(pte))) {
  2891. huge_pte_clear(mm, address, ptep, sz);
  2892. spin_unlock(ptl);
  2893. continue;
  2894. }
  2895. page = pte_page(pte);
  2896. /*
  2897. * If a reference page is supplied, it is because a specific
  2898. * page is being unmapped, not a range. Ensure the page we
  2899. * are about to unmap is the actual page of interest.
  2900. */
  2901. if (ref_page) {
  2902. if (page != ref_page) {
  2903. spin_unlock(ptl);
  2904. continue;
  2905. }
  2906. /*
  2907. * Mark the VMA as having unmapped its page so that
  2908. * future faults in this VMA will fail rather than
  2909. * looking like data was lost
  2910. */
  2911. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  2912. }
  2913. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2914. tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
  2915. if (huge_pte_dirty(pte))
  2916. set_page_dirty(page);
  2917. hugetlb_count_sub(pages_per_huge_page(h), mm);
  2918. page_remove_rmap(page, true);
  2919. spin_unlock(ptl);
  2920. tlb_remove_page_size(tlb, page, huge_page_size(h));
  2921. /*
  2922. * Bail out after unmapping reference page if supplied
  2923. */
  2924. if (ref_page)
  2925. break;
  2926. }
  2927. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2928. tlb_end_vma(tlb, vma);
  2929. }
  2930. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  2931. struct vm_area_struct *vma, unsigned long start,
  2932. unsigned long end, struct page *ref_page)
  2933. {
  2934. __unmap_hugepage_range(tlb, vma, start, end, ref_page);
  2935. /*
  2936. * Clear this flag so that x86's huge_pmd_share page_table_shareable
  2937. * test will fail on a vma being torn down, and not grab a page table
  2938. * on its way out. We're lucky that the flag has such an appropriate
  2939. * name, and can in fact be safely cleared here. We could clear it
  2940. * before the __unmap_hugepage_range above, but all that's necessary
  2941. * is to clear it before releasing the i_mmap_rwsem. This works
  2942. * because in the context this is called, the VMA is about to be
  2943. * destroyed and the i_mmap_rwsem is held.
  2944. */
  2945. vma->vm_flags &= ~VM_MAYSHARE;
  2946. }
  2947. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  2948. unsigned long end, struct page *ref_page)
  2949. {
  2950. struct mm_struct *mm;
  2951. struct mmu_gather tlb;
  2952. mm = vma->vm_mm;
  2953. tlb_gather_mmu(&tlb, mm, start, end);
  2954. __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
  2955. tlb_finish_mmu(&tlb, start, end);
  2956. }
  2957. /*
  2958. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  2959. * mappping it owns the reserve page for. The intention is to unmap the page
  2960. * from other VMAs and let the children be SIGKILLed if they are faulting the
  2961. * same region.
  2962. */
  2963. static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  2964. struct page *page, unsigned long address)
  2965. {
  2966. struct hstate *h = hstate_vma(vma);
  2967. struct vm_area_struct *iter_vma;
  2968. struct address_space *mapping;
  2969. pgoff_t pgoff;
  2970. /*
  2971. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  2972. * from page cache lookup which is in HPAGE_SIZE units.
  2973. */
  2974. address = address & huge_page_mask(h);
  2975. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
  2976. vma->vm_pgoff;
  2977. mapping = vma->vm_file->f_mapping;
  2978. /*
  2979. * Take the mapping lock for the duration of the table walk. As
  2980. * this mapping should be shared between all the VMAs,
  2981. * __unmap_hugepage_range() is called as the lock is already held
  2982. */
  2983. i_mmap_lock_write(mapping);
  2984. vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
  2985. /* Do not unmap the current VMA */
  2986. if (iter_vma == vma)
  2987. continue;
  2988. /*
  2989. * Shared VMAs have their own reserves and do not affect
  2990. * MAP_PRIVATE accounting but it is possible that a shared
  2991. * VMA is using the same page so check and skip such VMAs.
  2992. */
  2993. if (iter_vma->vm_flags & VM_MAYSHARE)
  2994. continue;
  2995. /*
  2996. * Unmap the page from other VMAs without their own reserves.
  2997. * They get marked to be SIGKILLed if they fault in these
  2998. * areas. This is because a future no-page fault on this VMA
  2999. * could insert a zeroed page instead of the data existing
  3000. * from the time of fork. This would look like data corruption
  3001. */
  3002. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  3003. unmap_hugepage_range(iter_vma, address,
  3004. address + huge_page_size(h), page);
  3005. }
  3006. i_mmap_unlock_write(mapping);
  3007. }
  3008. /*
  3009. * Hugetlb_cow() should be called with page lock of the original hugepage held.
  3010. * Called with hugetlb_instantiation_mutex held and pte_page locked so we
  3011. * cannot race with other handlers or page migration.
  3012. * Keep the pte_same checks anyway to make transition from the mutex easier.
  3013. */
  3014. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  3015. unsigned long address, pte_t *ptep,
  3016. struct page *pagecache_page, spinlock_t *ptl)
  3017. {
  3018. pte_t pte;
  3019. struct hstate *h = hstate_vma(vma);
  3020. struct page *old_page, *new_page;
  3021. int ret = 0, outside_reserve = 0;
  3022. unsigned long mmun_start; /* For mmu_notifiers */
  3023. unsigned long mmun_end; /* For mmu_notifiers */
  3024. pte = huge_ptep_get(ptep);
  3025. old_page = pte_page(pte);
  3026. retry_avoidcopy:
  3027. /* If no-one else is actually using this page, avoid the copy
  3028. * and just make the page writable */
  3029. if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
  3030. page_move_anon_rmap(old_page, vma);
  3031. set_huge_ptep_writable(vma, address, ptep);
  3032. return 0;
  3033. }
  3034. /*
  3035. * If the process that created a MAP_PRIVATE mapping is about to
  3036. * perform a COW due to a shared page count, attempt to satisfy
  3037. * the allocation without using the existing reserves. The pagecache
  3038. * page is used to determine if the reserve at this address was
  3039. * consumed or not. If reserves were used, a partial faulted mapping
  3040. * at the time of fork() could consume its reserves on COW instead
  3041. * of the full address range.
  3042. */
  3043. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  3044. old_page != pagecache_page)
  3045. outside_reserve = 1;
  3046. get_page(old_page);
  3047. /*
  3048. * Drop page table lock as buddy allocator may be called. It will
  3049. * be acquired again before returning to the caller, as expected.
  3050. */
  3051. spin_unlock(ptl);
  3052. new_page = alloc_huge_page(vma, address, outside_reserve);
  3053. if (IS_ERR(new_page)) {
  3054. /*
  3055. * If a process owning a MAP_PRIVATE mapping fails to COW,
  3056. * it is due to references held by a child and an insufficient
  3057. * huge page pool. To guarantee the original mappers
  3058. * reliability, unmap the page from child processes. The child
  3059. * may get SIGKILLed if it later faults.
  3060. */
  3061. if (outside_reserve) {
  3062. put_page(old_page);
  3063. BUG_ON(huge_pte_none(pte));
  3064. unmap_ref_private(mm, vma, old_page, address);
  3065. BUG_ON(huge_pte_none(pte));
  3066. spin_lock(ptl);
  3067. ptep = huge_pte_offset(mm, address & huge_page_mask(h),
  3068. huge_page_size(h));
  3069. if (likely(ptep &&
  3070. pte_same(huge_ptep_get(ptep), pte)))
  3071. goto retry_avoidcopy;
  3072. /*
  3073. * race occurs while re-acquiring page table
  3074. * lock, and our job is done.
  3075. */
  3076. return 0;
  3077. }
  3078. ret = (PTR_ERR(new_page) == -ENOMEM) ?
  3079. VM_FAULT_OOM : VM_FAULT_SIGBUS;
  3080. goto out_release_old;
  3081. }
  3082. /*
  3083. * When the original hugepage is shared one, it does not have
  3084. * anon_vma prepared.
  3085. */
  3086. if (unlikely(anon_vma_prepare(vma))) {
  3087. ret = VM_FAULT_OOM;
  3088. goto out_release_all;
  3089. }
  3090. copy_user_huge_page(new_page, old_page, address, vma,
  3091. pages_per_huge_page(h));
  3092. __SetPageUptodate(new_page);
  3093. set_page_huge_active(new_page);
  3094. mmun_start = address & huge_page_mask(h);
  3095. mmun_end = mmun_start + huge_page_size(h);
  3096. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  3097. /*
  3098. * Retake the page table lock to check for racing updates
  3099. * before the page tables are altered
  3100. */
  3101. spin_lock(ptl);
  3102. ptep = huge_pte_offset(mm, address & huge_page_mask(h),
  3103. huge_page_size(h));
  3104. if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
  3105. ClearPagePrivate(new_page);
  3106. /* Break COW */
  3107. huge_ptep_clear_flush(vma, address, ptep);
  3108. mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
  3109. set_huge_pte_at(mm, address, ptep,
  3110. make_huge_pte(vma, new_page, 1));
  3111. page_remove_rmap(old_page, true);
  3112. hugepage_add_new_anon_rmap(new_page, vma, address);
  3113. /* Make the old page be freed below */
  3114. new_page = old_page;
  3115. }
  3116. spin_unlock(ptl);
  3117. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  3118. out_release_all:
  3119. restore_reserve_on_error(h, vma, address, new_page);
  3120. put_page(new_page);
  3121. out_release_old:
  3122. put_page(old_page);
  3123. spin_lock(ptl); /* Caller expects lock to be held */
  3124. return ret;
  3125. }
  3126. /* Return the pagecache page at a given address within a VMA */
  3127. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  3128. struct vm_area_struct *vma, unsigned long address)
  3129. {
  3130. struct address_space *mapping;
  3131. pgoff_t idx;
  3132. mapping = vma->vm_file->f_mapping;
  3133. idx = vma_hugecache_offset(h, vma, address);
  3134. return find_lock_page(mapping, idx);
  3135. }
  3136. /*
  3137. * Return whether there is a pagecache page to back given address within VMA.
  3138. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  3139. */
  3140. static bool hugetlbfs_pagecache_present(struct hstate *h,
  3141. struct vm_area_struct *vma, unsigned long address)
  3142. {
  3143. struct address_space *mapping;
  3144. pgoff_t idx;
  3145. struct page *page;
  3146. mapping = vma->vm_file->f_mapping;
  3147. idx = vma_hugecache_offset(h, vma, address);
  3148. page = find_get_page(mapping, idx);
  3149. if (page)
  3150. put_page(page);
  3151. return page != NULL;
  3152. }
  3153. int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
  3154. pgoff_t idx)
  3155. {
  3156. struct inode *inode = mapping->host;
  3157. struct hstate *h = hstate_inode(inode);
  3158. int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  3159. if (err)
  3160. return err;
  3161. ClearPagePrivate(page);
  3162. spin_lock(&inode->i_lock);
  3163. inode->i_blocks += blocks_per_huge_page(h);
  3164. spin_unlock(&inode->i_lock);
  3165. return 0;
  3166. }
  3167. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  3168. struct address_space *mapping, pgoff_t idx,
  3169. unsigned long address, pte_t *ptep, unsigned int flags)
  3170. {
  3171. struct hstate *h = hstate_vma(vma);
  3172. int ret = VM_FAULT_SIGBUS;
  3173. int anon_rmap = 0;
  3174. unsigned long size;
  3175. struct page *page;
  3176. pte_t new_pte;
  3177. spinlock_t *ptl;
  3178. /*
  3179. * Currently, we are forced to kill the process in the event the
  3180. * original mapper has unmapped pages from the child due to a failed
  3181. * COW. Warn that such a situation has occurred as it may not be obvious
  3182. */
  3183. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  3184. pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
  3185. current->pid);
  3186. return ret;
  3187. }
  3188. /*
  3189. * Use page lock to guard against racing truncation
  3190. * before we get page_table_lock.
  3191. */
  3192. retry:
  3193. page = find_lock_page(mapping, idx);
  3194. if (!page) {
  3195. size = i_size_read(mapping->host) >> huge_page_shift(h);
  3196. if (idx >= size)
  3197. goto out;
  3198. /*
  3199. * Check for page in userfault range
  3200. */
  3201. if (userfaultfd_missing(vma)) {
  3202. u32 hash;
  3203. struct vm_fault vmf = {
  3204. .vma = vma,
  3205. .address = address,
  3206. .flags = flags,
  3207. /*
  3208. * Hard to debug if it ends up being
  3209. * used by a callee that assumes
  3210. * something about the other
  3211. * uninitialized fields... same as in
  3212. * memory.c
  3213. */
  3214. };
  3215. /*
  3216. * hugetlb_fault_mutex must be dropped before
  3217. * handling userfault. Reacquire after handling
  3218. * fault to make calling code simpler.
  3219. */
  3220. hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping,
  3221. idx, address);
  3222. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  3223. ret = handle_userfault(&vmf, VM_UFFD_MISSING);
  3224. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  3225. goto out;
  3226. }
  3227. page = alloc_huge_page(vma, address, 0);
  3228. if (IS_ERR(page)) {
  3229. ret = PTR_ERR(page);
  3230. if (ret == -ENOMEM)
  3231. ret = VM_FAULT_OOM;
  3232. else
  3233. ret = VM_FAULT_SIGBUS;
  3234. goto out;
  3235. }
  3236. clear_huge_page(page, address, pages_per_huge_page(h));
  3237. __SetPageUptodate(page);
  3238. set_page_huge_active(page);
  3239. if (vma->vm_flags & VM_MAYSHARE) {
  3240. int err = huge_add_to_page_cache(page, mapping, idx);
  3241. if (err) {
  3242. put_page(page);
  3243. if (err == -EEXIST)
  3244. goto retry;
  3245. goto out;
  3246. }
  3247. } else {
  3248. lock_page(page);
  3249. if (unlikely(anon_vma_prepare(vma))) {
  3250. ret = VM_FAULT_OOM;
  3251. goto backout_unlocked;
  3252. }
  3253. anon_rmap = 1;
  3254. }
  3255. } else {
  3256. /*
  3257. * If memory error occurs between mmap() and fault, some process
  3258. * don't have hwpoisoned swap entry for errored virtual address.
  3259. * So we need to block hugepage fault by PG_hwpoison bit check.
  3260. */
  3261. if (unlikely(PageHWPoison(page))) {
  3262. ret = VM_FAULT_HWPOISON |
  3263. VM_FAULT_SET_HINDEX(hstate_index(h));
  3264. goto backout_unlocked;
  3265. }
  3266. }
  3267. /*
  3268. * If we are going to COW a private mapping later, we examine the
  3269. * pending reservations for this page now. This will ensure that
  3270. * any allocations necessary to record that reservation occur outside
  3271. * the spinlock.
  3272. */
  3273. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  3274. if (vma_needs_reservation(h, vma, address) < 0) {
  3275. ret = VM_FAULT_OOM;
  3276. goto backout_unlocked;
  3277. }
  3278. /* Just decrements count, does not deallocate */
  3279. vma_end_reservation(h, vma, address);
  3280. }
  3281. ptl = huge_pte_lock(h, mm, ptep);
  3282. size = i_size_read(mapping->host) >> huge_page_shift(h);
  3283. if (idx >= size)
  3284. goto backout;
  3285. ret = 0;
  3286. if (!huge_pte_none(huge_ptep_get(ptep)))
  3287. goto backout;
  3288. if (anon_rmap) {
  3289. ClearPagePrivate(page);
  3290. hugepage_add_new_anon_rmap(page, vma, address);
  3291. } else
  3292. page_dup_rmap(page, true);
  3293. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  3294. && (vma->vm_flags & VM_SHARED)));
  3295. set_huge_pte_at(mm, address, ptep, new_pte);
  3296. hugetlb_count_add(pages_per_huge_page(h), mm);
  3297. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  3298. /* Optimization, do the COW without a second fault */
  3299. ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
  3300. }
  3301. spin_unlock(ptl);
  3302. unlock_page(page);
  3303. out:
  3304. return ret;
  3305. backout:
  3306. spin_unlock(ptl);
  3307. backout_unlocked:
  3308. unlock_page(page);
  3309. restore_reserve_on_error(h, vma, address, page);
  3310. put_page(page);
  3311. goto out;
  3312. }
  3313. #ifdef CONFIG_SMP
  3314. u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  3315. struct vm_area_struct *vma,
  3316. struct address_space *mapping,
  3317. pgoff_t idx, unsigned long address)
  3318. {
  3319. unsigned long key[2];
  3320. u32 hash;
  3321. if (vma->vm_flags & VM_SHARED) {
  3322. key[0] = (unsigned long) mapping;
  3323. key[1] = idx;
  3324. } else {
  3325. key[0] = (unsigned long) mm;
  3326. key[1] = address >> huge_page_shift(h);
  3327. }
  3328. hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
  3329. return hash & (num_fault_mutexes - 1);
  3330. }
  3331. #else
  3332. /*
  3333. * For uniprocesor systems we always use a single mutex, so just
  3334. * return 0 and avoid the hashing overhead.
  3335. */
  3336. u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  3337. struct vm_area_struct *vma,
  3338. struct address_space *mapping,
  3339. pgoff_t idx, unsigned long address)
  3340. {
  3341. return 0;
  3342. }
  3343. #endif
  3344. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  3345. unsigned long address, unsigned int flags)
  3346. {
  3347. pte_t *ptep, entry;
  3348. spinlock_t *ptl;
  3349. int ret;
  3350. u32 hash;
  3351. pgoff_t idx;
  3352. struct page *page = NULL;
  3353. struct page *pagecache_page = NULL;
  3354. struct hstate *h = hstate_vma(vma);
  3355. struct address_space *mapping;
  3356. int need_wait_lock = 0;
  3357. address &= huge_page_mask(h);
  3358. ptep = huge_pte_offset(mm, address, huge_page_size(h));
  3359. if (ptep) {
  3360. entry = huge_ptep_get(ptep);
  3361. if (unlikely(is_hugetlb_entry_migration(entry))) {
  3362. migration_entry_wait_huge(vma, mm, ptep);
  3363. return 0;
  3364. } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
  3365. return VM_FAULT_HWPOISON_LARGE |
  3366. VM_FAULT_SET_HINDEX(hstate_index(h));
  3367. } else {
  3368. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  3369. if (!ptep)
  3370. return VM_FAULT_OOM;
  3371. }
  3372. mapping = vma->vm_file->f_mapping;
  3373. idx = vma_hugecache_offset(h, vma, address);
  3374. /*
  3375. * Serialize hugepage allocation and instantiation, so that we don't
  3376. * get spurious allocation failures if two CPUs race to instantiate
  3377. * the same page in the page cache.
  3378. */
  3379. hash = hugetlb_fault_mutex_hash(h, mm, vma, mapping, idx, address);
  3380. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  3381. entry = huge_ptep_get(ptep);
  3382. if (huge_pte_none(entry)) {
  3383. ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
  3384. goto out_mutex;
  3385. }
  3386. ret = 0;
  3387. /*
  3388. * entry could be a migration/hwpoison entry at this point, so this
  3389. * check prevents the kernel from going below assuming that we have
  3390. * a active hugepage in pagecache. This goto expects the 2nd page fault,
  3391. * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
  3392. * handle it.
  3393. */
  3394. if (!pte_present(entry))
  3395. goto out_mutex;
  3396. /*
  3397. * If we are going to COW the mapping later, we examine the pending
  3398. * reservations for this page now. This will ensure that any
  3399. * allocations necessary to record that reservation occur outside the
  3400. * spinlock. For private mappings, we also lookup the pagecache
  3401. * page now as it is used to determine if a reservation has been
  3402. * consumed.
  3403. */
  3404. if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
  3405. if (vma_needs_reservation(h, vma, address) < 0) {
  3406. ret = VM_FAULT_OOM;
  3407. goto out_mutex;
  3408. }
  3409. /* Just decrements count, does not deallocate */
  3410. vma_end_reservation(h, vma, address);
  3411. if (!(vma->vm_flags & VM_MAYSHARE))
  3412. pagecache_page = hugetlbfs_pagecache_page(h,
  3413. vma, address);
  3414. }
  3415. ptl = huge_pte_lock(h, mm, ptep);
  3416. /* Check for a racing update before calling hugetlb_cow */
  3417. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  3418. goto out_ptl;
  3419. /*
  3420. * hugetlb_cow() requires page locks of pte_page(entry) and
  3421. * pagecache_page, so here we need take the former one
  3422. * when page != pagecache_page or !pagecache_page.
  3423. */
  3424. page = pte_page(entry);
  3425. if (page != pagecache_page)
  3426. if (!trylock_page(page)) {
  3427. need_wait_lock = 1;
  3428. goto out_ptl;
  3429. }
  3430. get_page(page);
  3431. if (flags & FAULT_FLAG_WRITE) {
  3432. if (!huge_pte_write(entry)) {
  3433. ret = hugetlb_cow(mm, vma, address, ptep,
  3434. pagecache_page, ptl);
  3435. goto out_put_page;
  3436. }
  3437. entry = huge_pte_mkdirty(entry);
  3438. }
  3439. entry = pte_mkyoung(entry);
  3440. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  3441. flags & FAULT_FLAG_WRITE))
  3442. update_mmu_cache(vma, address, ptep);
  3443. out_put_page:
  3444. if (page != pagecache_page)
  3445. unlock_page(page);
  3446. put_page(page);
  3447. out_ptl:
  3448. spin_unlock(ptl);
  3449. if (pagecache_page) {
  3450. unlock_page(pagecache_page);
  3451. put_page(pagecache_page);
  3452. }
  3453. out_mutex:
  3454. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  3455. /*
  3456. * Generally it's safe to hold refcount during waiting page lock. But
  3457. * here we just wait to defer the next page fault to avoid busy loop and
  3458. * the page is not used after unlocked before returning from the current
  3459. * page fault. So we are safe from accessing freed page, even if we wait
  3460. * here without taking refcount.
  3461. */
  3462. if (need_wait_lock)
  3463. wait_on_page_locked(page);
  3464. return ret;
  3465. }
  3466. /*
  3467. * Used by userfaultfd UFFDIO_COPY. Based on mcopy_atomic_pte with
  3468. * modifications for huge pages.
  3469. */
  3470. int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
  3471. pte_t *dst_pte,
  3472. struct vm_area_struct *dst_vma,
  3473. unsigned long dst_addr,
  3474. unsigned long src_addr,
  3475. struct page **pagep)
  3476. {
  3477. struct address_space *mapping;
  3478. pgoff_t idx;
  3479. unsigned long size;
  3480. int vm_shared = dst_vma->vm_flags & VM_SHARED;
  3481. struct hstate *h = hstate_vma(dst_vma);
  3482. pte_t _dst_pte;
  3483. spinlock_t *ptl;
  3484. int ret;
  3485. struct page *page;
  3486. if (!*pagep) {
  3487. ret = -ENOMEM;
  3488. page = alloc_huge_page(dst_vma, dst_addr, 0);
  3489. if (IS_ERR(page))
  3490. goto out;
  3491. ret = copy_huge_page_from_user(page,
  3492. (const void __user *) src_addr,
  3493. pages_per_huge_page(h), false);
  3494. /* fallback to copy_from_user outside mmap_sem */
  3495. if (unlikely(ret)) {
  3496. ret = -EFAULT;
  3497. *pagep = page;
  3498. /* don't free the page */
  3499. goto out;
  3500. }
  3501. } else {
  3502. page = *pagep;
  3503. *pagep = NULL;
  3504. }
  3505. /*
  3506. * The memory barrier inside __SetPageUptodate makes sure that
  3507. * preceding stores to the page contents become visible before
  3508. * the set_pte_at() write.
  3509. */
  3510. __SetPageUptodate(page);
  3511. set_page_huge_active(page);
  3512. mapping = dst_vma->vm_file->f_mapping;
  3513. idx = vma_hugecache_offset(h, dst_vma, dst_addr);
  3514. /*
  3515. * If shared, add to page cache
  3516. */
  3517. if (vm_shared) {
  3518. size = i_size_read(mapping->host) >> huge_page_shift(h);
  3519. ret = -EFAULT;
  3520. if (idx >= size)
  3521. goto out_release_nounlock;
  3522. /*
  3523. * Serialization between remove_inode_hugepages() and
  3524. * huge_add_to_page_cache() below happens through the
  3525. * hugetlb_fault_mutex_table that here must be hold by
  3526. * the caller.
  3527. */
  3528. ret = huge_add_to_page_cache(page, mapping, idx);
  3529. if (ret)
  3530. goto out_release_nounlock;
  3531. }
  3532. ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
  3533. spin_lock(ptl);
  3534. /*
  3535. * Recheck the i_size after holding PT lock to make sure not
  3536. * to leave any page mapped (as page_mapped()) beyond the end
  3537. * of the i_size (remove_inode_hugepages() is strict about
  3538. * enforcing that). If we bail out here, we'll also leave a
  3539. * page in the radix tree in the vm_shared case beyond the end
  3540. * of the i_size, but remove_inode_hugepages() will take care
  3541. * of it as soon as we drop the hugetlb_fault_mutex_table.
  3542. */
  3543. size = i_size_read(mapping->host) >> huge_page_shift(h);
  3544. ret = -EFAULT;
  3545. if (idx >= size)
  3546. goto out_release_unlock;
  3547. ret = -EEXIST;
  3548. if (!huge_pte_none(huge_ptep_get(dst_pte)))
  3549. goto out_release_unlock;
  3550. if (vm_shared) {
  3551. page_dup_rmap(page, true);
  3552. } else {
  3553. ClearPagePrivate(page);
  3554. hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
  3555. }
  3556. _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
  3557. if (dst_vma->vm_flags & VM_WRITE)
  3558. _dst_pte = huge_pte_mkdirty(_dst_pte);
  3559. _dst_pte = pte_mkyoung(_dst_pte);
  3560. set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
  3561. (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
  3562. dst_vma->vm_flags & VM_WRITE);
  3563. hugetlb_count_add(pages_per_huge_page(h), dst_mm);
  3564. /* No need to invalidate - it was non-present before */
  3565. update_mmu_cache(dst_vma, dst_addr, dst_pte);
  3566. spin_unlock(ptl);
  3567. if (vm_shared)
  3568. unlock_page(page);
  3569. ret = 0;
  3570. out:
  3571. return ret;
  3572. out_release_unlock:
  3573. spin_unlock(ptl);
  3574. if (vm_shared)
  3575. unlock_page(page);
  3576. out_release_nounlock:
  3577. put_page(page);
  3578. goto out;
  3579. }
  3580. long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  3581. struct page **pages, struct vm_area_struct **vmas,
  3582. unsigned long *position, unsigned long *nr_pages,
  3583. long i, unsigned int flags, int *nonblocking)
  3584. {
  3585. unsigned long pfn_offset;
  3586. unsigned long vaddr = *position;
  3587. unsigned long remainder = *nr_pages;
  3588. struct hstate *h = hstate_vma(vma);
  3589. int err = -EFAULT;
  3590. while (vaddr < vma->vm_end && remainder) {
  3591. pte_t *pte;
  3592. spinlock_t *ptl = NULL;
  3593. int absent;
  3594. struct page *page;
  3595. /*
  3596. * If we have a pending SIGKILL, don't keep faulting pages and
  3597. * potentially allocating memory.
  3598. */
  3599. if (unlikely(fatal_signal_pending(current))) {
  3600. remainder = 0;
  3601. break;
  3602. }
  3603. /*
  3604. * Some archs (sparc64, sh*) have multiple pte_ts to
  3605. * each hugepage. We have to make sure we get the
  3606. * first, for the page indexing below to work.
  3607. *
  3608. * Note that page table lock is not held when pte is null.
  3609. */
  3610. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
  3611. huge_page_size(h));
  3612. if (pte)
  3613. ptl = huge_pte_lock(h, mm, pte);
  3614. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  3615. /*
  3616. * When coredumping, it suits get_dump_page if we just return
  3617. * an error where there's an empty slot with no huge pagecache
  3618. * to back it. This way, we avoid allocating a hugepage, and
  3619. * the sparse dumpfile avoids allocating disk blocks, but its
  3620. * huge holes still show up with zeroes where they need to be.
  3621. */
  3622. if (absent && (flags & FOLL_DUMP) &&
  3623. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  3624. if (pte)
  3625. spin_unlock(ptl);
  3626. remainder = 0;
  3627. break;
  3628. }
  3629. /*
  3630. * We need call hugetlb_fault for both hugepages under migration
  3631. * (in which case hugetlb_fault waits for the migration,) and
  3632. * hwpoisoned hugepages (in which case we need to prevent the
  3633. * caller from accessing to them.) In order to do this, we use
  3634. * here is_swap_pte instead of is_hugetlb_entry_migration and
  3635. * is_hugetlb_entry_hwpoisoned. This is because it simply covers
  3636. * both cases, and because we can't follow correct pages
  3637. * directly from any kind of swap entries.
  3638. */
  3639. if (absent || is_swap_pte(huge_ptep_get(pte)) ||
  3640. ((flags & FOLL_WRITE) &&
  3641. !huge_pte_write(huge_ptep_get(pte)))) {
  3642. int ret;
  3643. unsigned int fault_flags = 0;
  3644. if (pte)
  3645. spin_unlock(ptl);
  3646. if (flags & FOLL_WRITE)
  3647. fault_flags |= FAULT_FLAG_WRITE;
  3648. if (nonblocking)
  3649. fault_flags |= FAULT_FLAG_ALLOW_RETRY;
  3650. if (flags & FOLL_NOWAIT)
  3651. fault_flags |= FAULT_FLAG_ALLOW_RETRY |
  3652. FAULT_FLAG_RETRY_NOWAIT;
  3653. if (flags & FOLL_TRIED) {
  3654. VM_WARN_ON_ONCE(fault_flags &
  3655. FAULT_FLAG_ALLOW_RETRY);
  3656. fault_flags |= FAULT_FLAG_TRIED;
  3657. }
  3658. ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
  3659. if (ret & VM_FAULT_ERROR) {
  3660. err = vm_fault_to_errno(ret, flags);
  3661. remainder = 0;
  3662. break;
  3663. }
  3664. if (ret & VM_FAULT_RETRY) {
  3665. if (nonblocking)
  3666. *nonblocking = 0;
  3667. *nr_pages = 0;
  3668. /*
  3669. * VM_FAULT_RETRY must not return an
  3670. * error, it will return zero
  3671. * instead.
  3672. *
  3673. * No need to update "position" as the
  3674. * caller will not check it after
  3675. * *nr_pages is set to 0.
  3676. */
  3677. return i;
  3678. }
  3679. continue;
  3680. }
  3681. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  3682. page = pte_page(huge_ptep_get(pte));
  3683. same_page:
  3684. if (pages) {
  3685. pages[i] = mem_map_offset(page, pfn_offset);
  3686. get_page(pages[i]);
  3687. }
  3688. if (vmas)
  3689. vmas[i] = vma;
  3690. vaddr += PAGE_SIZE;
  3691. ++pfn_offset;
  3692. --remainder;
  3693. ++i;
  3694. if (vaddr < vma->vm_end && remainder &&
  3695. pfn_offset < pages_per_huge_page(h)) {
  3696. /*
  3697. * We use pfn_offset to avoid touching the pageframes
  3698. * of this compound page.
  3699. */
  3700. goto same_page;
  3701. }
  3702. spin_unlock(ptl);
  3703. }
  3704. *nr_pages = remainder;
  3705. /*
  3706. * setting position is actually required only if remainder is
  3707. * not zero but it's faster not to add a "if (remainder)"
  3708. * branch.
  3709. */
  3710. *position = vaddr;
  3711. return i ? i : err;
  3712. }
  3713. #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
  3714. /*
  3715. * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
  3716. * implement this.
  3717. */
  3718. #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
  3719. #endif
  3720. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  3721. unsigned long address, unsigned long end, pgprot_t newprot)
  3722. {
  3723. struct mm_struct *mm = vma->vm_mm;
  3724. unsigned long start = address;
  3725. pte_t *ptep;
  3726. pte_t pte;
  3727. struct hstate *h = hstate_vma(vma);
  3728. unsigned long pages = 0;
  3729. BUG_ON(address >= end);
  3730. flush_cache_range(vma, address, end);
  3731. mmu_notifier_invalidate_range_start(mm, start, end);
  3732. i_mmap_lock_write(vma->vm_file->f_mapping);
  3733. for (; address < end; address += huge_page_size(h)) {
  3734. spinlock_t *ptl;
  3735. ptep = huge_pte_offset(mm, address, huge_page_size(h));
  3736. if (!ptep)
  3737. continue;
  3738. ptl = huge_pte_lock(h, mm, ptep);
  3739. if (huge_pmd_unshare(mm, &address, ptep)) {
  3740. pages++;
  3741. spin_unlock(ptl);
  3742. continue;
  3743. }
  3744. pte = huge_ptep_get(ptep);
  3745. if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
  3746. spin_unlock(ptl);
  3747. continue;
  3748. }
  3749. if (unlikely(is_hugetlb_entry_migration(pte))) {
  3750. swp_entry_t entry = pte_to_swp_entry(pte);
  3751. if (is_write_migration_entry(entry)) {
  3752. pte_t newpte;
  3753. make_migration_entry_read(&entry);
  3754. newpte = swp_entry_to_pte(entry);
  3755. set_huge_swap_pte_at(mm, address, ptep,
  3756. newpte, huge_page_size(h));
  3757. pages++;
  3758. }
  3759. spin_unlock(ptl);
  3760. continue;
  3761. }
  3762. if (!huge_pte_none(pte)) {
  3763. pte = huge_ptep_get_and_clear(mm, address, ptep);
  3764. pte = pte_mkhuge(huge_pte_modify(pte, newprot));
  3765. pte = arch_make_huge_pte(pte, vma, NULL, 0);
  3766. set_huge_pte_at(mm, address, ptep, pte);
  3767. pages++;
  3768. }
  3769. spin_unlock(ptl);
  3770. }
  3771. /*
  3772. * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
  3773. * may have cleared our pud entry and done put_page on the page table:
  3774. * once we release i_mmap_rwsem, another task can do the final put_page
  3775. * and that page table be reused and filled with junk.
  3776. */
  3777. flush_hugetlb_tlb_range(vma, start, end);
  3778. /*
  3779. * No need to call mmu_notifier_invalidate_range() we are downgrading
  3780. * page table protection not changing it to point to a new page.
  3781. *
  3782. * See Documentation/vm/mmu_notifier.txt
  3783. */
  3784. i_mmap_unlock_write(vma->vm_file->f_mapping);
  3785. mmu_notifier_invalidate_range_end(mm, start, end);
  3786. return pages << h->order;
  3787. }
  3788. int hugetlb_reserve_pages(struct inode *inode,
  3789. long from, long to,
  3790. struct vm_area_struct *vma,
  3791. vm_flags_t vm_flags)
  3792. {
  3793. long ret, chg;
  3794. struct hstate *h = hstate_inode(inode);
  3795. struct hugepage_subpool *spool = subpool_inode(inode);
  3796. struct resv_map *resv_map;
  3797. long gbl_reserve;
  3798. /*
  3799. * Only apply hugepage reservation if asked. At fault time, an
  3800. * attempt will be made for VM_NORESERVE to allocate a page
  3801. * without using reserves
  3802. */
  3803. if (vm_flags & VM_NORESERVE)
  3804. return 0;
  3805. /*
  3806. * Shared mappings base their reservation on the number of pages that
  3807. * are already allocated on behalf of the file. Private mappings need
  3808. * to reserve the full area even if read-only as mprotect() may be
  3809. * called to make the mapping read-write. Assume !vma is a shm mapping
  3810. */
  3811. if (!vma || vma->vm_flags & VM_MAYSHARE) {
  3812. resv_map = inode_resv_map(inode);
  3813. chg = region_chg(resv_map, from, to);
  3814. } else {
  3815. resv_map = resv_map_alloc();
  3816. if (!resv_map)
  3817. return -ENOMEM;
  3818. chg = to - from;
  3819. set_vma_resv_map(vma, resv_map);
  3820. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  3821. }
  3822. if (chg < 0) {
  3823. ret = chg;
  3824. goto out_err;
  3825. }
  3826. /*
  3827. * There must be enough pages in the subpool for the mapping. If
  3828. * the subpool has a minimum size, there may be some global
  3829. * reservations already in place (gbl_reserve).
  3830. */
  3831. gbl_reserve = hugepage_subpool_get_pages(spool, chg);
  3832. if (gbl_reserve < 0) {
  3833. ret = -ENOSPC;
  3834. goto out_err;
  3835. }
  3836. /*
  3837. * Check enough hugepages are available for the reservation.
  3838. * Hand the pages back to the subpool if there are not
  3839. */
  3840. ret = hugetlb_acct_memory(h, gbl_reserve);
  3841. if (ret < 0) {
  3842. /* put back original number of pages, chg */
  3843. (void)hugepage_subpool_put_pages(spool, chg);
  3844. goto out_err;
  3845. }
  3846. /*
  3847. * Account for the reservations made. Shared mappings record regions
  3848. * that have reservations as they are shared by multiple VMAs.
  3849. * When the last VMA disappears, the region map says how much
  3850. * the reservation was and the page cache tells how much of
  3851. * the reservation was consumed. Private mappings are per-VMA and
  3852. * only the consumed reservations are tracked. When the VMA
  3853. * disappears, the original reservation is the VMA size and the
  3854. * consumed reservations are stored in the map. Hence, nothing
  3855. * else has to be done for private mappings here
  3856. */
  3857. if (!vma || vma->vm_flags & VM_MAYSHARE) {
  3858. long add = region_add(resv_map, from, to);
  3859. if (unlikely(chg > add)) {
  3860. /*
  3861. * pages in this range were added to the reserve
  3862. * map between region_chg and region_add. This
  3863. * indicates a race with alloc_huge_page. Adjust
  3864. * the subpool and reserve counts modified above
  3865. * based on the difference.
  3866. */
  3867. long rsv_adjust;
  3868. rsv_adjust = hugepage_subpool_put_pages(spool,
  3869. chg - add);
  3870. hugetlb_acct_memory(h, -rsv_adjust);
  3871. }
  3872. }
  3873. return 0;
  3874. out_err:
  3875. if (!vma || vma->vm_flags & VM_MAYSHARE)
  3876. /* Don't call region_abort if region_chg failed */
  3877. if (chg >= 0)
  3878. region_abort(resv_map, from, to);
  3879. if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  3880. kref_put(&resv_map->refs, resv_map_release);
  3881. return ret;
  3882. }
  3883. long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
  3884. long freed)
  3885. {
  3886. struct hstate *h = hstate_inode(inode);
  3887. struct resv_map *resv_map = inode_resv_map(inode);
  3888. long chg = 0;
  3889. struct hugepage_subpool *spool = subpool_inode(inode);
  3890. long gbl_reserve;
  3891. if (resv_map) {
  3892. chg = region_del(resv_map, start, end);
  3893. /*
  3894. * region_del() can fail in the rare case where a region
  3895. * must be split and another region descriptor can not be
  3896. * allocated. If end == LONG_MAX, it will not fail.
  3897. */
  3898. if (chg < 0)
  3899. return chg;
  3900. }
  3901. spin_lock(&inode->i_lock);
  3902. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  3903. spin_unlock(&inode->i_lock);
  3904. /*
  3905. * If the subpool has a minimum size, the number of global
  3906. * reservations to be released may be adjusted.
  3907. */
  3908. gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
  3909. hugetlb_acct_memory(h, -gbl_reserve);
  3910. return 0;
  3911. }
  3912. #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
  3913. static unsigned long page_table_shareable(struct vm_area_struct *svma,
  3914. struct vm_area_struct *vma,
  3915. unsigned long addr, pgoff_t idx)
  3916. {
  3917. unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
  3918. svma->vm_start;
  3919. unsigned long sbase = saddr & PUD_MASK;
  3920. unsigned long s_end = sbase + PUD_SIZE;
  3921. /* Allow segments to share if only one is marked locked */
  3922. unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
  3923. unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
  3924. /*
  3925. * match the virtual addresses, permission and the alignment of the
  3926. * page table page.
  3927. */
  3928. if (pmd_index(addr) != pmd_index(saddr) ||
  3929. vm_flags != svm_flags ||
  3930. sbase < svma->vm_start || svma->vm_end < s_end)
  3931. return 0;
  3932. return saddr;
  3933. }
  3934. static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
  3935. {
  3936. unsigned long base = addr & PUD_MASK;
  3937. unsigned long end = base + PUD_SIZE;
  3938. /*
  3939. * check on proper vm_flags and page table alignment
  3940. */
  3941. if (vma->vm_flags & VM_MAYSHARE &&
  3942. vma->vm_start <= base && end <= vma->vm_end)
  3943. return true;
  3944. return false;
  3945. }
  3946. /*
  3947. * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  3948. * and returns the corresponding pte. While this is not necessary for the
  3949. * !shared pmd case because we can allocate the pmd later as well, it makes the
  3950. * code much cleaner. pmd allocation is essential for the shared case because
  3951. * pud has to be populated inside the same i_mmap_rwsem section - otherwise
  3952. * racing tasks could either miss the sharing (see huge_pte_offset) or select a
  3953. * bad pmd for sharing.
  3954. */
  3955. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  3956. {
  3957. struct vm_area_struct *vma = find_vma(mm, addr);
  3958. struct address_space *mapping = vma->vm_file->f_mapping;
  3959. pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  3960. vma->vm_pgoff;
  3961. struct vm_area_struct *svma;
  3962. unsigned long saddr;
  3963. pte_t *spte = NULL;
  3964. pte_t *pte;
  3965. spinlock_t *ptl;
  3966. if (!vma_shareable(vma, addr))
  3967. return (pte_t *)pmd_alloc(mm, pud, addr);
  3968. i_mmap_lock_write(mapping);
  3969. vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
  3970. if (svma == vma)
  3971. continue;
  3972. saddr = page_table_shareable(svma, vma, addr, idx);
  3973. if (saddr) {
  3974. spte = huge_pte_offset(svma->vm_mm, saddr,
  3975. vma_mmu_pagesize(svma));
  3976. if (spte) {
  3977. get_page(virt_to_page(spte));
  3978. break;
  3979. }
  3980. }
  3981. }
  3982. if (!spte)
  3983. goto out;
  3984. ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
  3985. if (pud_none(*pud)) {
  3986. pud_populate(mm, pud,
  3987. (pmd_t *)((unsigned long)spte & PAGE_MASK));
  3988. mm_inc_nr_pmds(mm);
  3989. } else {
  3990. put_page(virt_to_page(spte));
  3991. }
  3992. spin_unlock(ptl);
  3993. out:
  3994. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  3995. i_mmap_unlock_write(mapping);
  3996. return pte;
  3997. }
  3998. /*
  3999. * unmap huge page backed by shared pte.
  4000. *
  4001. * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
  4002. * indicated by page_count > 1, unmap is achieved by clearing pud and
  4003. * decrementing the ref count. If count == 1, the pte page is not shared.
  4004. *
  4005. * called with page table lock held.
  4006. *
  4007. * returns: 1 successfully unmapped a shared pte page
  4008. * 0 the underlying pte page is not shared, or it is the last user
  4009. */
  4010. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  4011. {
  4012. pgd_t *pgd = pgd_offset(mm, *addr);
  4013. p4d_t *p4d = p4d_offset(pgd, *addr);
  4014. pud_t *pud = pud_offset(p4d, *addr);
  4015. BUG_ON(page_count(virt_to_page(ptep)) == 0);
  4016. if (page_count(virt_to_page(ptep)) == 1)
  4017. return 0;
  4018. pud_clear(pud);
  4019. put_page(virt_to_page(ptep));
  4020. mm_dec_nr_pmds(mm);
  4021. *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
  4022. return 1;
  4023. }
  4024. #define want_pmd_share() (1)
  4025. #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  4026. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  4027. {
  4028. return NULL;
  4029. }
  4030. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  4031. {
  4032. return 0;
  4033. }
  4034. #define want_pmd_share() (0)
  4035. #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  4036. #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
  4037. pte_t *huge_pte_alloc(struct mm_struct *mm,
  4038. unsigned long addr, unsigned long sz)
  4039. {
  4040. pgd_t *pgd;
  4041. p4d_t *p4d;
  4042. pud_t *pud;
  4043. pte_t *pte = NULL;
  4044. pgd = pgd_offset(mm, addr);
  4045. p4d = p4d_alloc(mm, pgd, addr);
  4046. if (!p4d)
  4047. return NULL;
  4048. pud = pud_alloc(mm, p4d, addr);
  4049. if (pud) {
  4050. if (sz == PUD_SIZE) {
  4051. pte = (pte_t *)pud;
  4052. } else {
  4053. BUG_ON(sz != PMD_SIZE);
  4054. if (want_pmd_share() && pud_none(*pud))
  4055. pte = huge_pmd_share(mm, addr, pud);
  4056. else
  4057. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  4058. }
  4059. }
  4060. BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
  4061. return pte;
  4062. }
  4063. /*
  4064. * huge_pte_offset() - Walk the page table to resolve the hugepage
  4065. * entry at address @addr
  4066. *
  4067. * Return: Pointer to page table or swap entry (PUD or PMD) for
  4068. * address @addr, or NULL if a p*d_none() entry is encountered and the
  4069. * size @sz doesn't match the hugepage size at this level of the page
  4070. * table.
  4071. */
  4072. pte_t *huge_pte_offset(struct mm_struct *mm,
  4073. unsigned long addr, unsigned long sz)
  4074. {
  4075. pgd_t *pgd;
  4076. p4d_t *p4d;
  4077. pud_t *pud;
  4078. pmd_t *pmd;
  4079. pgd = pgd_offset(mm, addr);
  4080. if (!pgd_present(*pgd))
  4081. return NULL;
  4082. p4d = p4d_offset(pgd, addr);
  4083. if (!p4d_present(*p4d))
  4084. return NULL;
  4085. pud = pud_offset(p4d, addr);
  4086. if (sz != PUD_SIZE && pud_none(*pud))
  4087. return NULL;
  4088. /* hugepage or swap? */
  4089. if (pud_huge(*pud) || !pud_present(*pud))
  4090. return (pte_t *)pud;
  4091. pmd = pmd_offset(pud, addr);
  4092. if (sz != PMD_SIZE && pmd_none(*pmd))
  4093. return NULL;
  4094. /* hugepage or swap? */
  4095. if (pmd_huge(*pmd) || !pmd_present(*pmd))
  4096. return (pte_t *)pmd;
  4097. return NULL;
  4098. }
  4099. #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  4100. /*
  4101. * These functions are overwritable if your architecture needs its own
  4102. * behavior.
  4103. */
  4104. struct page * __weak
  4105. follow_huge_addr(struct mm_struct *mm, unsigned long address,
  4106. int write)
  4107. {
  4108. return ERR_PTR(-EINVAL);
  4109. }
  4110. struct page * __weak
  4111. follow_huge_pd(struct vm_area_struct *vma,
  4112. unsigned long address, hugepd_t hpd, int flags, int pdshift)
  4113. {
  4114. WARN(1, "hugepd follow called with no support for hugepage directory format\n");
  4115. return NULL;
  4116. }
  4117. struct page * __weak
  4118. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  4119. pmd_t *pmd, int flags)
  4120. {
  4121. struct page *page = NULL;
  4122. spinlock_t *ptl;
  4123. pte_t pte;
  4124. retry:
  4125. ptl = pmd_lockptr(mm, pmd);
  4126. spin_lock(ptl);
  4127. /*
  4128. * make sure that the address range covered by this pmd is not
  4129. * unmapped from other threads.
  4130. */
  4131. if (!pmd_huge(*pmd))
  4132. goto out;
  4133. pte = huge_ptep_get((pte_t *)pmd);
  4134. if (pte_present(pte)) {
  4135. page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
  4136. if (flags & FOLL_GET)
  4137. get_page(page);
  4138. } else {
  4139. if (is_hugetlb_entry_migration(pte)) {
  4140. spin_unlock(ptl);
  4141. __migration_entry_wait(mm, (pte_t *)pmd, ptl);
  4142. goto retry;
  4143. }
  4144. /*
  4145. * hwpoisoned entry is treated as no_page_table in
  4146. * follow_page_mask().
  4147. */
  4148. }
  4149. out:
  4150. spin_unlock(ptl);
  4151. return page;
  4152. }
  4153. struct page * __weak
  4154. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  4155. pud_t *pud, int flags)
  4156. {
  4157. if (flags & FOLL_GET)
  4158. return NULL;
  4159. return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
  4160. }
  4161. struct page * __weak
  4162. follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
  4163. {
  4164. if (flags & FOLL_GET)
  4165. return NULL;
  4166. return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
  4167. }
  4168. bool isolate_huge_page(struct page *page, struct list_head *list)
  4169. {
  4170. bool ret = true;
  4171. VM_BUG_ON_PAGE(!PageHead(page), page);
  4172. spin_lock(&hugetlb_lock);
  4173. if (!page_huge_active(page) || !get_page_unless_zero(page)) {
  4174. ret = false;
  4175. goto unlock;
  4176. }
  4177. clear_page_huge_active(page);
  4178. list_move_tail(&page->lru, list);
  4179. unlock:
  4180. spin_unlock(&hugetlb_lock);
  4181. return ret;
  4182. }
  4183. void putback_active_hugepage(struct page *page)
  4184. {
  4185. VM_BUG_ON_PAGE(!PageHead(page), page);
  4186. spin_lock(&hugetlb_lock);
  4187. set_page_huge_active(page);
  4188. list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
  4189. spin_unlock(&hugetlb_lock);
  4190. put_page(page);
  4191. }