hugetlb.c 98 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778
  1. /*
  2. * Generic hugetlb support.
  3. * (C) Nadia Yvette Chambers, April 2004
  4. */
  5. #include <linux/list.h>
  6. #include <linux/init.h>
  7. #include <linux/module.h>
  8. #include <linux/mm.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/sysctl.h>
  11. #include <linux/highmem.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/nodemask.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/mempolicy.h>
  16. #include <linux/compiler.h>
  17. #include <linux/cpuset.h>
  18. #include <linux/mutex.h>
  19. #include <linux/bootmem.h>
  20. #include <linux/sysfs.h>
  21. #include <linux/slab.h>
  22. #include <linux/rmap.h>
  23. #include <linux/swap.h>
  24. #include <linux/swapops.h>
  25. #include <linux/page-isolation.h>
  26. #include <linux/jhash.h>
  27. #include <asm/page.h>
  28. #include <asm/pgtable.h>
  29. #include <asm/tlb.h>
  30. #include <linux/io.h>
  31. #include <linux/hugetlb.h>
  32. #include <linux/hugetlb_cgroup.h>
  33. #include <linux/node.h>
  34. #include "internal.h"
  35. unsigned long hugepages_treat_as_movable;
  36. int hugetlb_max_hstate __read_mostly;
  37. unsigned int default_hstate_idx;
  38. struct hstate hstates[HUGE_MAX_HSTATE];
  39. __initdata LIST_HEAD(huge_boot_pages);
  40. /* for command line parsing */
  41. static struct hstate * __initdata parsed_hstate;
  42. static unsigned long __initdata default_hstate_max_huge_pages;
  43. static unsigned long __initdata default_hstate_size;
  44. /*
  45. * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
  46. * free_huge_pages, and surplus_huge_pages.
  47. */
  48. DEFINE_SPINLOCK(hugetlb_lock);
  49. /*
  50. * Serializes faults on the same logical page. This is used to
  51. * prevent spurious OOMs when the hugepage pool is fully utilized.
  52. */
  53. static int num_fault_mutexes;
  54. static struct mutex *htlb_fault_mutex_table ____cacheline_aligned_in_smp;
  55. static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
  56. {
  57. bool free = (spool->count == 0) && (spool->used_hpages == 0);
  58. spin_unlock(&spool->lock);
  59. /* If no pages are used, and no other handles to the subpool
  60. * remain, free the subpool the subpool remain */
  61. if (free)
  62. kfree(spool);
  63. }
  64. struct hugepage_subpool *hugepage_new_subpool(long nr_blocks)
  65. {
  66. struct hugepage_subpool *spool;
  67. spool = kmalloc(sizeof(*spool), GFP_KERNEL);
  68. if (!spool)
  69. return NULL;
  70. spin_lock_init(&spool->lock);
  71. spool->count = 1;
  72. spool->max_hpages = nr_blocks;
  73. spool->used_hpages = 0;
  74. return spool;
  75. }
  76. void hugepage_put_subpool(struct hugepage_subpool *spool)
  77. {
  78. spin_lock(&spool->lock);
  79. BUG_ON(!spool->count);
  80. spool->count--;
  81. unlock_or_release_subpool(spool);
  82. }
  83. static int hugepage_subpool_get_pages(struct hugepage_subpool *spool,
  84. long delta)
  85. {
  86. int ret = 0;
  87. if (!spool)
  88. return 0;
  89. spin_lock(&spool->lock);
  90. if ((spool->used_hpages + delta) <= spool->max_hpages) {
  91. spool->used_hpages += delta;
  92. } else {
  93. ret = -ENOMEM;
  94. }
  95. spin_unlock(&spool->lock);
  96. return ret;
  97. }
  98. static void hugepage_subpool_put_pages(struct hugepage_subpool *spool,
  99. long delta)
  100. {
  101. if (!spool)
  102. return;
  103. spin_lock(&spool->lock);
  104. spool->used_hpages -= delta;
  105. /* If hugetlbfs_put_super couldn't free spool due to
  106. * an outstanding quota reference, free it now. */
  107. unlock_or_release_subpool(spool);
  108. }
  109. static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
  110. {
  111. return HUGETLBFS_SB(inode->i_sb)->spool;
  112. }
  113. static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
  114. {
  115. return subpool_inode(file_inode(vma->vm_file));
  116. }
  117. /*
  118. * Region tracking -- allows tracking of reservations and instantiated pages
  119. * across the pages in a mapping.
  120. *
  121. * The region data structures are embedded into a resv_map and
  122. * protected by a resv_map's lock
  123. */
  124. struct file_region {
  125. struct list_head link;
  126. long from;
  127. long to;
  128. };
  129. static long region_add(struct resv_map *resv, long f, long t)
  130. {
  131. struct list_head *head = &resv->regions;
  132. struct file_region *rg, *nrg, *trg;
  133. spin_lock(&resv->lock);
  134. /* Locate the region we are either in or before. */
  135. list_for_each_entry(rg, head, link)
  136. if (f <= rg->to)
  137. break;
  138. /* Round our left edge to the current segment if it encloses us. */
  139. if (f > rg->from)
  140. f = rg->from;
  141. /* Check for and consume any regions we now overlap with. */
  142. nrg = rg;
  143. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  144. if (&rg->link == head)
  145. break;
  146. if (rg->from > t)
  147. break;
  148. /* If this area reaches higher then extend our area to
  149. * include it completely. If this is not the first area
  150. * which we intend to reuse, free it. */
  151. if (rg->to > t)
  152. t = rg->to;
  153. if (rg != nrg) {
  154. list_del(&rg->link);
  155. kfree(rg);
  156. }
  157. }
  158. nrg->from = f;
  159. nrg->to = t;
  160. spin_unlock(&resv->lock);
  161. return 0;
  162. }
  163. static long region_chg(struct resv_map *resv, long f, long t)
  164. {
  165. struct list_head *head = &resv->regions;
  166. struct file_region *rg, *nrg = NULL;
  167. long chg = 0;
  168. retry:
  169. spin_lock(&resv->lock);
  170. /* Locate the region we are before or in. */
  171. list_for_each_entry(rg, head, link)
  172. if (f <= rg->to)
  173. break;
  174. /* If we are below the current region then a new region is required.
  175. * Subtle, allocate a new region at the position but make it zero
  176. * size such that we can guarantee to record the reservation. */
  177. if (&rg->link == head || t < rg->from) {
  178. if (!nrg) {
  179. spin_unlock(&resv->lock);
  180. nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
  181. if (!nrg)
  182. return -ENOMEM;
  183. nrg->from = f;
  184. nrg->to = f;
  185. INIT_LIST_HEAD(&nrg->link);
  186. goto retry;
  187. }
  188. list_add(&nrg->link, rg->link.prev);
  189. chg = t - f;
  190. goto out_nrg;
  191. }
  192. /* Round our left edge to the current segment if it encloses us. */
  193. if (f > rg->from)
  194. f = rg->from;
  195. chg = t - f;
  196. /* Check for and consume any regions we now overlap with. */
  197. list_for_each_entry(rg, rg->link.prev, link) {
  198. if (&rg->link == head)
  199. break;
  200. if (rg->from > t)
  201. goto out;
  202. /* We overlap with this area, if it extends further than
  203. * us then we must extend ourselves. Account for its
  204. * existing reservation. */
  205. if (rg->to > t) {
  206. chg += rg->to - t;
  207. t = rg->to;
  208. }
  209. chg -= rg->to - rg->from;
  210. }
  211. out:
  212. spin_unlock(&resv->lock);
  213. /* We already know we raced and no longer need the new region */
  214. kfree(nrg);
  215. return chg;
  216. out_nrg:
  217. spin_unlock(&resv->lock);
  218. return chg;
  219. }
  220. static long region_truncate(struct resv_map *resv, long end)
  221. {
  222. struct list_head *head = &resv->regions;
  223. struct file_region *rg, *trg;
  224. long chg = 0;
  225. spin_lock(&resv->lock);
  226. /* Locate the region we are either in or before. */
  227. list_for_each_entry(rg, head, link)
  228. if (end <= rg->to)
  229. break;
  230. if (&rg->link == head)
  231. goto out;
  232. /* If we are in the middle of a region then adjust it. */
  233. if (end > rg->from) {
  234. chg = rg->to - end;
  235. rg->to = end;
  236. rg = list_entry(rg->link.next, typeof(*rg), link);
  237. }
  238. /* Drop any remaining regions. */
  239. list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
  240. if (&rg->link == head)
  241. break;
  242. chg += rg->to - rg->from;
  243. list_del(&rg->link);
  244. kfree(rg);
  245. }
  246. out:
  247. spin_unlock(&resv->lock);
  248. return chg;
  249. }
  250. static long region_count(struct resv_map *resv, long f, long t)
  251. {
  252. struct list_head *head = &resv->regions;
  253. struct file_region *rg;
  254. long chg = 0;
  255. spin_lock(&resv->lock);
  256. /* Locate each segment we overlap with, and count that overlap. */
  257. list_for_each_entry(rg, head, link) {
  258. long seg_from;
  259. long seg_to;
  260. if (rg->to <= f)
  261. continue;
  262. if (rg->from >= t)
  263. break;
  264. seg_from = max(rg->from, f);
  265. seg_to = min(rg->to, t);
  266. chg += seg_to - seg_from;
  267. }
  268. spin_unlock(&resv->lock);
  269. return chg;
  270. }
  271. /*
  272. * Convert the address within this vma to the page offset within
  273. * the mapping, in pagecache page units; huge pages here.
  274. */
  275. static pgoff_t vma_hugecache_offset(struct hstate *h,
  276. struct vm_area_struct *vma, unsigned long address)
  277. {
  278. return ((address - vma->vm_start) >> huge_page_shift(h)) +
  279. (vma->vm_pgoff >> huge_page_order(h));
  280. }
  281. pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
  282. unsigned long address)
  283. {
  284. return vma_hugecache_offset(hstate_vma(vma), vma, address);
  285. }
  286. /*
  287. * Return the size of the pages allocated when backing a VMA. In the majority
  288. * cases this will be same size as used by the page table entries.
  289. */
  290. unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
  291. {
  292. struct hstate *hstate;
  293. if (!is_vm_hugetlb_page(vma))
  294. return PAGE_SIZE;
  295. hstate = hstate_vma(vma);
  296. return 1UL << huge_page_shift(hstate);
  297. }
  298. EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
  299. /*
  300. * Return the page size being used by the MMU to back a VMA. In the majority
  301. * of cases, the page size used by the kernel matches the MMU size. On
  302. * architectures where it differs, an architecture-specific version of this
  303. * function is required.
  304. */
  305. #ifndef vma_mmu_pagesize
  306. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  307. {
  308. return vma_kernel_pagesize(vma);
  309. }
  310. #endif
  311. /*
  312. * Flags for MAP_PRIVATE reservations. These are stored in the bottom
  313. * bits of the reservation map pointer, which are always clear due to
  314. * alignment.
  315. */
  316. #define HPAGE_RESV_OWNER (1UL << 0)
  317. #define HPAGE_RESV_UNMAPPED (1UL << 1)
  318. #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
  319. /*
  320. * These helpers are used to track how many pages are reserved for
  321. * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
  322. * is guaranteed to have their future faults succeed.
  323. *
  324. * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
  325. * the reserve counters are updated with the hugetlb_lock held. It is safe
  326. * to reset the VMA at fork() time as it is not in use yet and there is no
  327. * chance of the global counters getting corrupted as a result of the values.
  328. *
  329. * The private mapping reservation is represented in a subtly different
  330. * manner to a shared mapping. A shared mapping has a region map associated
  331. * with the underlying file, this region map represents the backing file
  332. * pages which have ever had a reservation assigned which this persists even
  333. * after the page is instantiated. A private mapping has a region map
  334. * associated with the original mmap which is attached to all VMAs which
  335. * reference it, this region map represents those offsets which have consumed
  336. * reservation ie. where pages have been instantiated.
  337. */
  338. static unsigned long get_vma_private_data(struct vm_area_struct *vma)
  339. {
  340. return (unsigned long)vma->vm_private_data;
  341. }
  342. static void set_vma_private_data(struct vm_area_struct *vma,
  343. unsigned long value)
  344. {
  345. vma->vm_private_data = (void *)value;
  346. }
  347. struct resv_map *resv_map_alloc(void)
  348. {
  349. struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
  350. if (!resv_map)
  351. return NULL;
  352. kref_init(&resv_map->refs);
  353. spin_lock_init(&resv_map->lock);
  354. INIT_LIST_HEAD(&resv_map->regions);
  355. return resv_map;
  356. }
  357. void resv_map_release(struct kref *ref)
  358. {
  359. struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
  360. /* Clear out any active regions before we release the map. */
  361. region_truncate(resv_map, 0);
  362. kfree(resv_map);
  363. }
  364. static inline struct resv_map *inode_resv_map(struct inode *inode)
  365. {
  366. return inode->i_mapping->private_data;
  367. }
  368. static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
  369. {
  370. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  371. if (vma->vm_flags & VM_MAYSHARE) {
  372. struct address_space *mapping = vma->vm_file->f_mapping;
  373. struct inode *inode = mapping->host;
  374. return inode_resv_map(inode);
  375. } else {
  376. return (struct resv_map *)(get_vma_private_data(vma) &
  377. ~HPAGE_RESV_MASK);
  378. }
  379. }
  380. static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
  381. {
  382. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  383. VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
  384. set_vma_private_data(vma, (get_vma_private_data(vma) &
  385. HPAGE_RESV_MASK) | (unsigned long)map);
  386. }
  387. static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
  388. {
  389. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  390. VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
  391. set_vma_private_data(vma, get_vma_private_data(vma) | flags);
  392. }
  393. static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
  394. {
  395. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  396. return (get_vma_private_data(vma) & flag) != 0;
  397. }
  398. /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
  399. void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  400. {
  401. VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
  402. if (!(vma->vm_flags & VM_MAYSHARE))
  403. vma->vm_private_data = (void *)0;
  404. }
  405. /* Returns true if the VMA has associated reserve pages */
  406. static int vma_has_reserves(struct vm_area_struct *vma, long chg)
  407. {
  408. if (vma->vm_flags & VM_NORESERVE) {
  409. /*
  410. * This address is already reserved by other process(chg == 0),
  411. * so, we should decrement reserved count. Without decrementing,
  412. * reserve count remains after releasing inode, because this
  413. * allocated page will go into page cache and is regarded as
  414. * coming from reserved pool in releasing step. Currently, we
  415. * don't have any other solution to deal with this situation
  416. * properly, so add work-around here.
  417. */
  418. if (vma->vm_flags & VM_MAYSHARE && chg == 0)
  419. return 1;
  420. else
  421. return 0;
  422. }
  423. /* Shared mappings always use reserves */
  424. if (vma->vm_flags & VM_MAYSHARE)
  425. return 1;
  426. /*
  427. * Only the process that called mmap() has reserves for
  428. * private mappings.
  429. */
  430. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  431. return 1;
  432. return 0;
  433. }
  434. static void enqueue_huge_page(struct hstate *h, struct page *page)
  435. {
  436. int nid = page_to_nid(page);
  437. list_move(&page->lru, &h->hugepage_freelists[nid]);
  438. h->free_huge_pages++;
  439. h->free_huge_pages_node[nid]++;
  440. }
  441. static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
  442. {
  443. struct page *page;
  444. list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
  445. if (!is_migrate_isolate_page(page))
  446. break;
  447. /*
  448. * if 'non-isolated free hugepage' not found on the list,
  449. * the allocation fails.
  450. */
  451. if (&h->hugepage_freelists[nid] == &page->lru)
  452. return NULL;
  453. list_move(&page->lru, &h->hugepage_activelist);
  454. set_page_refcounted(page);
  455. h->free_huge_pages--;
  456. h->free_huge_pages_node[nid]--;
  457. return page;
  458. }
  459. /* Movability of hugepages depends on migration support. */
  460. static inline gfp_t htlb_alloc_mask(struct hstate *h)
  461. {
  462. if (hugepages_treat_as_movable || hugepage_migration_supported(h))
  463. return GFP_HIGHUSER_MOVABLE;
  464. else
  465. return GFP_HIGHUSER;
  466. }
  467. static struct page *dequeue_huge_page_vma(struct hstate *h,
  468. struct vm_area_struct *vma,
  469. unsigned long address, int avoid_reserve,
  470. long chg)
  471. {
  472. struct page *page = NULL;
  473. struct mempolicy *mpol;
  474. nodemask_t *nodemask;
  475. struct zonelist *zonelist;
  476. struct zone *zone;
  477. struct zoneref *z;
  478. unsigned int cpuset_mems_cookie;
  479. /*
  480. * A child process with MAP_PRIVATE mappings created by their parent
  481. * have no page reserves. This check ensures that reservations are
  482. * not "stolen". The child may still get SIGKILLed
  483. */
  484. if (!vma_has_reserves(vma, chg) &&
  485. h->free_huge_pages - h->resv_huge_pages == 0)
  486. goto err;
  487. /* If reserves cannot be used, ensure enough pages are in the pool */
  488. if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
  489. goto err;
  490. retry_cpuset:
  491. cpuset_mems_cookie = read_mems_allowed_begin();
  492. zonelist = huge_zonelist(vma, address,
  493. htlb_alloc_mask(h), &mpol, &nodemask);
  494. for_each_zone_zonelist_nodemask(zone, z, zonelist,
  495. MAX_NR_ZONES - 1, nodemask) {
  496. if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) {
  497. page = dequeue_huge_page_node(h, zone_to_nid(zone));
  498. if (page) {
  499. if (avoid_reserve)
  500. break;
  501. if (!vma_has_reserves(vma, chg))
  502. break;
  503. SetPagePrivate(page);
  504. h->resv_huge_pages--;
  505. break;
  506. }
  507. }
  508. }
  509. mpol_cond_put(mpol);
  510. if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
  511. goto retry_cpuset;
  512. return page;
  513. err:
  514. return NULL;
  515. }
  516. /*
  517. * common helper functions for hstate_next_node_to_{alloc|free}.
  518. * We may have allocated or freed a huge page based on a different
  519. * nodes_allowed previously, so h->next_node_to_{alloc|free} might
  520. * be outside of *nodes_allowed. Ensure that we use an allowed
  521. * node for alloc or free.
  522. */
  523. static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
  524. {
  525. nid = next_node(nid, *nodes_allowed);
  526. if (nid == MAX_NUMNODES)
  527. nid = first_node(*nodes_allowed);
  528. VM_BUG_ON(nid >= MAX_NUMNODES);
  529. return nid;
  530. }
  531. static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
  532. {
  533. if (!node_isset(nid, *nodes_allowed))
  534. nid = next_node_allowed(nid, nodes_allowed);
  535. return nid;
  536. }
  537. /*
  538. * returns the previously saved node ["this node"] from which to
  539. * allocate a persistent huge page for the pool and advance the
  540. * next node from which to allocate, handling wrap at end of node
  541. * mask.
  542. */
  543. static int hstate_next_node_to_alloc(struct hstate *h,
  544. nodemask_t *nodes_allowed)
  545. {
  546. int nid;
  547. VM_BUG_ON(!nodes_allowed);
  548. nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
  549. h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
  550. return nid;
  551. }
  552. /*
  553. * helper for free_pool_huge_page() - return the previously saved
  554. * node ["this node"] from which to free a huge page. Advance the
  555. * next node id whether or not we find a free huge page to free so
  556. * that the next attempt to free addresses the next node.
  557. */
  558. static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
  559. {
  560. int nid;
  561. VM_BUG_ON(!nodes_allowed);
  562. nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
  563. h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
  564. return nid;
  565. }
  566. #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
  567. for (nr_nodes = nodes_weight(*mask); \
  568. nr_nodes > 0 && \
  569. ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
  570. nr_nodes--)
  571. #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
  572. for (nr_nodes = nodes_weight(*mask); \
  573. nr_nodes > 0 && \
  574. ((node = hstate_next_node_to_free(hs, mask)) || 1); \
  575. nr_nodes--)
  576. #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
  577. static void destroy_compound_gigantic_page(struct page *page,
  578. unsigned long order)
  579. {
  580. int i;
  581. int nr_pages = 1 << order;
  582. struct page *p = page + 1;
  583. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  584. __ClearPageTail(p);
  585. set_page_refcounted(p);
  586. p->first_page = NULL;
  587. }
  588. set_compound_order(page, 0);
  589. __ClearPageHead(page);
  590. }
  591. static void free_gigantic_page(struct page *page, unsigned order)
  592. {
  593. free_contig_range(page_to_pfn(page), 1 << order);
  594. }
  595. static int __alloc_gigantic_page(unsigned long start_pfn,
  596. unsigned long nr_pages)
  597. {
  598. unsigned long end_pfn = start_pfn + nr_pages;
  599. return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
  600. }
  601. static bool pfn_range_valid_gigantic(unsigned long start_pfn,
  602. unsigned long nr_pages)
  603. {
  604. unsigned long i, end_pfn = start_pfn + nr_pages;
  605. struct page *page;
  606. for (i = start_pfn; i < end_pfn; i++) {
  607. if (!pfn_valid(i))
  608. return false;
  609. page = pfn_to_page(i);
  610. if (PageReserved(page))
  611. return false;
  612. if (page_count(page) > 0)
  613. return false;
  614. if (PageHuge(page))
  615. return false;
  616. }
  617. return true;
  618. }
  619. static bool zone_spans_last_pfn(const struct zone *zone,
  620. unsigned long start_pfn, unsigned long nr_pages)
  621. {
  622. unsigned long last_pfn = start_pfn + nr_pages - 1;
  623. return zone_spans_pfn(zone, last_pfn);
  624. }
  625. static struct page *alloc_gigantic_page(int nid, unsigned order)
  626. {
  627. unsigned long nr_pages = 1 << order;
  628. unsigned long ret, pfn, flags;
  629. struct zone *z;
  630. z = NODE_DATA(nid)->node_zones;
  631. for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
  632. spin_lock_irqsave(&z->lock, flags);
  633. pfn = ALIGN(z->zone_start_pfn, nr_pages);
  634. while (zone_spans_last_pfn(z, pfn, nr_pages)) {
  635. if (pfn_range_valid_gigantic(pfn, nr_pages)) {
  636. /*
  637. * We release the zone lock here because
  638. * alloc_contig_range() will also lock the zone
  639. * at some point. If there's an allocation
  640. * spinning on this lock, it may win the race
  641. * and cause alloc_contig_range() to fail...
  642. */
  643. spin_unlock_irqrestore(&z->lock, flags);
  644. ret = __alloc_gigantic_page(pfn, nr_pages);
  645. if (!ret)
  646. return pfn_to_page(pfn);
  647. spin_lock_irqsave(&z->lock, flags);
  648. }
  649. pfn += nr_pages;
  650. }
  651. spin_unlock_irqrestore(&z->lock, flags);
  652. }
  653. return NULL;
  654. }
  655. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
  656. static void prep_compound_gigantic_page(struct page *page, unsigned long order);
  657. static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
  658. {
  659. struct page *page;
  660. page = alloc_gigantic_page(nid, huge_page_order(h));
  661. if (page) {
  662. prep_compound_gigantic_page(page, huge_page_order(h));
  663. prep_new_huge_page(h, page, nid);
  664. }
  665. return page;
  666. }
  667. static int alloc_fresh_gigantic_page(struct hstate *h,
  668. nodemask_t *nodes_allowed)
  669. {
  670. struct page *page = NULL;
  671. int nr_nodes, node;
  672. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  673. page = alloc_fresh_gigantic_page_node(h, node);
  674. if (page)
  675. return 1;
  676. }
  677. return 0;
  678. }
  679. static inline bool gigantic_page_supported(void) { return true; }
  680. #else
  681. static inline bool gigantic_page_supported(void) { return false; }
  682. static inline void free_gigantic_page(struct page *page, unsigned order) { }
  683. static inline void destroy_compound_gigantic_page(struct page *page,
  684. unsigned long order) { }
  685. static inline int alloc_fresh_gigantic_page(struct hstate *h,
  686. nodemask_t *nodes_allowed) { return 0; }
  687. #endif
  688. static void update_and_free_page(struct hstate *h, struct page *page)
  689. {
  690. int i;
  691. if (hstate_is_gigantic(h) && !gigantic_page_supported())
  692. return;
  693. h->nr_huge_pages--;
  694. h->nr_huge_pages_node[page_to_nid(page)]--;
  695. for (i = 0; i < pages_per_huge_page(h); i++) {
  696. page[i].flags &= ~(1 << PG_locked | 1 << PG_error |
  697. 1 << PG_referenced | 1 << PG_dirty |
  698. 1 << PG_active | 1 << PG_private |
  699. 1 << PG_writeback);
  700. }
  701. VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
  702. set_compound_page_dtor(page, NULL);
  703. set_page_refcounted(page);
  704. if (hstate_is_gigantic(h)) {
  705. destroy_compound_gigantic_page(page, huge_page_order(h));
  706. free_gigantic_page(page, huge_page_order(h));
  707. } else {
  708. arch_release_hugepage(page);
  709. __free_pages(page, huge_page_order(h));
  710. }
  711. }
  712. struct hstate *size_to_hstate(unsigned long size)
  713. {
  714. struct hstate *h;
  715. for_each_hstate(h) {
  716. if (huge_page_size(h) == size)
  717. return h;
  718. }
  719. return NULL;
  720. }
  721. void free_huge_page(struct page *page)
  722. {
  723. /*
  724. * Can't pass hstate in here because it is called from the
  725. * compound page destructor.
  726. */
  727. struct hstate *h = page_hstate(page);
  728. int nid = page_to_nid(page);
  729. struct hugepage_subpool *spool =
  730. (struct hugepage_subpool *)page_private(page);
  731. bool restore_reserve;
  732. set_page_private(page, 0);
  733. page->mapping = NULL;
  734. BUG_ON(page_count(page));
  735. BUG_ON(page_mapcount(page));
  736. restore_reserve = PagePrivate(page);
  737. ClearPagePrivate(page);
  738. spin_lock(&hugetlb_lock);
  739. hugetlb_cgroup_uncharge_page(hstate_index(h),
  740. pages_per_huge_page(h), page);
  741. if (restore_reserve)
  742. h->resv_huge_pages++;
  743. if (h->surplus_huge_pages_node[nid]) {
  744. /* remove the page from active list */
  745. list_del(&page->lru);
  746. update_and_free_page(h, page);
  747. h->surplus_huge_pages--;
  748. h->surplus_huge_pages_node[nid]--;
  749. } else {
  750. arch_clear_hugepage_flags(page);
  751. enqueue_huge_page(h, page);
  752. }
  753. spin_unlock(&hugetlb_lock);
  754. hugepage_subpool_put_pages(spool, 1);
  755. }
  756. static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
  757. {
  758. INIT_LIST_HEAD(&page->lru);
  759. set_compound_page_dtor(page, free_huge_page);
  760. spin_lock(&hugetlb_lock);
  761. set_hugetlb_cgroup(page, NULL);
  762. h->nr_huge_pages++;
  763. h->nr_huge_pages_node[nid]++;
  764. spin_unlock(&hugetlb_lock);
  765. put_page(page); /* free it into the hugepage allocator */
  766. }
  767. static void prep_compound_gigantic_page(struct page *page, unsigned long order)
  768. {
  769. int i;
  770. int nr_pages = 1 << order;
  771. struct page *p = page + 1;
  772. /* we rely on prep_new_huge_page to set the destructor */
  773. set_compound_order(page, order);
  774. __SetPageHead(page);
  775. __ClearPageReserved(page);
  776. for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
  777. __SetPageTail(p);
  778. /*
  779. * For gigantic hugepages allocated through bootmem at
  780. * boot, it's safer to be consistent with the not-gigantic
  781. * hugepages and clear the PG_reserved bit from all tail pages
  782. * too. Otherwse drivers using get_user_pages() to access tail
  783. * pages may get the reference counting wrong if they see
  784. * PG_reserved set on a tail page (despite the head page not
  785. * having PG_reserved set). Enforcing this consistency between
  786. * head and tail pages allows drivers to optimize away a check
  787. * on the head page when they need know if put_page() is needed
  788. * after get_user_pages().
  789. */
  790. __ClearPageReserved(p);
  791. set_page_count(p, 0);
  792. p->first_page = page;
  793. }
  794. }
  795. /*
  796. * PageHuge() only returns true for hugetlbfs pages, but not for normal or
  797. * transparent huge pages. See the PageTransHuge() documentation for more
  798. * details.
  799. */
  800. int PageHuge(struct page *page)
  801. {
  802. if (!PageCompound(page))
  803. return 0;
  804. page = compound_head(page);
  805. return get_compound_page_dtor(page) == free_huge_page;
  806. }
  807. EXPORT_SYMBOL_GPL(PageHuge);
  808. /*
  809. * PageHeadHuge() only returns true for hugetlbfs head page, but not for
  810. * normal or transparent huge pages.
  811. */
  812. int PageHeadHuge(struct page *page_head)
  813. {
  814. if (!PageHead(page_head))
  815. return 0;
  816. return get_compound_page_dtor(page_head) == free_huge_page;
  817. }
  818. pgoff_t __basepage_index(struct page *page)
  819. {
  820. struct page *page_head = compound_head(page);
  821. pgoff_t index = page_index(page_head);
  822. unsigned long compound_idx;
  823. if (!PageHuge(page_head))
  824. return page_index(page);
  825. if (compound_order(page_head) >= MAX_ORDER)
  826. compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
  827. else
  828. compound_idx = page - page_head;
  829. return (index << compound_order(page_head)) + compound_idx;
  830. }
  831. static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
  832. {
  833. struct page *page;
  834. page = alloc_pages_exact_node(nid,
  835. htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
  836. __GFP_REPEAT|__GFP_NOWARN,
  837. huge_page_order(h));
  838. if (page) {
  839. if (arch_prepare_hugepage(page)) {
  840. __free_pages(page, huge_page_order(h));
  841. return NULL;
  842. }
  843. prep_new_huge_page(h, page, nid);
  844. }
  845. return page;
  846. }
  847. static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
  848. {
  849. struct page *page;
  850. int nr_nodes, node;
  851. int ret = 0;
  852. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  853. page = alloc_fresh_huge_page_node(h, node);
  854. if (page) {
  855. ret = 1;
  856. break;
  857. }
  858. }
  859. if (ret)
  860. count_vm_event(HTLB_BUDDY_PGALLOC);
  861. else
  862. count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  863. return ret;
  864. }
  865. /*
  866. * Free huge page from pool from next node to free.
  867. * Attempt to keep persistent huge pages more or less
  868. * balanced over allowed nodes.
  869. * Called with hugetlb_lock locked.
  870. */
  871. static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
  872. bool acct_surplus)
  873. {
  874. int nr_nodes, node;
  875. int ret = 0;
  876. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  877. /*
  878. * If we're returning unused surplus pages, only examine
  879. * nodes with surplus pages.
  880. */
  881. if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
  882. !list_empty(&h->hugepage_freelists[node])) {
  883. struct page *page =
  884. list_entry(h->hugepage_freelists[node].next,
  885. struct page, lru);
  886. list_del(&page->lru);
  887. h->free_huge_pages--;
  888. h->free_huge_pages_node[node]--;
  889. if (acct_surplus) {
  890. h->surplus_huge_pages--;
  891. h->surplus_huge_pages_node[node]--;
  892. }
  893. update_and_free_page(h, page);
  894. ret = 1;
  895. break;
  896. }
  897. }
  898. return ret;
  899. }
  900. /*
  901. * Dissolve a given free hugepage into free buddy pages. This function does
  902. * nothing for in-use (including surplus) hugepages.
  903. */
  904. static void dissolve_free_huge_page(struct page *page)
  905. {
  906. spin_lock(&hugetlb_lock);
  907. if (PageHuge(page) && !page_count(page)) {
  908. struct hstate *h = page_hstate(page);
  909. int nid = page_to_nid(page);
  910. list_del(&page->lru);
  911. h->free_huge_pages--;
  912. h->free_huge_pages_node[nid]--;
  913. update_and_free_page(h, page);
  914. }
  915. spin_unlock(&hugetlb_lock);
  916. }
  917. /*
  918. * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
  919. * make specified memory blocks removable from the system.
  920. * Note that start_pfn should aligned with (minimum) hugepage size.
  921. */
  922. void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
  923. {
  924. unsigned int order = 8 * sizeof(void *);
  925. unsigned long pfn;
  926. struct hstate *h;
  927. if (!hugepages_supported())
  928. return;
  929. /* Set scan step to minimum hugepage size */
  930. for_each_hstate(h)
  931. if (order > huge_page_order(h))
  932. order = huge_page_order(h);
  933. VM_BUG_ON(!IS_ALIGNED(start_pfn, 1 << order));
  934. for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order)
  935. dissolve_free_huge_page(pfn_to_page(pfn));
  936. }
  937. static struct page *alloc_buddy_huge_page(struct hstate *h, int nid)
  938. {
  939. struct page *page;
  940. unsigned int r_nid;
  941. if (hstate_is_gigantic(h))
  942. return NULL;
  943. /*
  944. * Assume we will successfully allocate the surplus page to
  945. * prevent racing processes from causing the surplus to exceed
  946. * overcommit
  947. *
  948. * This however introduces a different race, where a process B
  949. * tries to grow the static hugepage pool while alloc_pages() is
  950. * called by process A. B will only examine the per-node
  951. * counters in determining if surplus huge pages can be
  952. * converted to normal huge pages in adjust_pool_surplus(). A
  953. * won't be able to increment the per-node counter, until the
  954. * lock is dropped by B, but B doesn't drop hugetlb_lock until
  955. * no more huge pages can be converted from surplus to normal
  956. * state (and doesn't try to convert again). Thus, we have a
  957. * case where a surplus huge page exists, the pool is grown, and
  958. * the surplus huge page still exists after, even though it
  959. * should just have been converted to a normal huge page. This
  960. * does not leak memory, though, as the hugepage will be freed
  961. * once it is out of use. It also does not allow the counters to
  962. * go out of whack in adjust_pool_surplus() as we don't modify
  963. * the node values until we've gotten the hugepage and only the
  964. * per-node value is checked there.
  965. */
  966. spin_lock(&hugetlb_lock);
  967. if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
  968. spin_unlock(&hugetlb_lock);
  969. return NULL;
  970. } else {
  971. h->nr_huge_pages++;
  972. h->surplus_huge_pages++;
  973. }
  974. spin_unlock(&hugetlb_lock);
  975. if (nid == NUMA_NO_NODE)
  976. page = alloc_pages(htlb_alloc_mask(h)|__GFP_COMP|
  977. __GFP_REPEAT|__GFP_NOWARN,
  978. huge_page_order(h));
  979. else
  980. page = alloc_pages_exact_node(nid,
  981. htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
  982. __GFP_REPEAT|__GFP_NOWARN, huge_page_order(h));
  983. if (page && arch_prepare_hugepage(page)) {
  984. __free_pages(page, huge_page_order(h));
  985. page = NULL;
  986. }
  987. spin_lock(&hugetlb_lock);
  988. if (page) {
  989. INIT_LIST_HEAD(&page->lru);
  990. r_nid = page_to_nid(page);
  991. set_compound_page_dtor(page, free_huge_page);
  992. set_hugetlb_cgroup(page, NULL);
  993. /*
  994. * We incremented the global counters already
  995. */
  996. h->nr_huge_pages_node[r_nid]++;
  997. h->surplus_huge_pages_node[r_nid]++;
  998. __count_vm_event(HTLB_BUDDY_PGALLOC);
  999. } else {
  1000. h->nr_huge_pages--;
  1001. h->surplus_huge_pages--;
  1002. __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
  1003. }
  1004. spin_unlock(&hugetlb_lock);
  1005. return page;
  1006. }
  1007. /*
  1008. * This allocation function is useful in the context where vma is irrelevant.
  1009. * E.g. soft-offlining uses this function because it only cares physical
  1010. * address of error page.
  1011. */
  1012. struct page *alloc_huge_page_node(struct hstate *h, int nid)
  1013. {
  1014. struct page *page = NULL;
  1015. spin_lock(&hugetlb_lock);
  1016. if (h->free_huge_pages - h->resv_huge_pages > 0)
  1017. page = dequeue_huge_page_node(h, nid);
  1018. spin_unlock(&hugetlb_lock);
  1019. if (!page)
  1020. page = alloc_buddy_huge_page(h, nid);
  1021. return page;
  1022. }
  1023. /*
  1024. * Increase the hugetlb pool such that it can accommodate a reservation
  1025. * of size 'delta'.
  1026. */
  1027. static int gather_surplus_pages(struct hstate *h, int delta)
  1028. {
  1029. struct list_head surplus_list;
  1030. struct page *page, *tmp;
  1031. int ret, i;
  1032. int needed, allocated;
  1033. bool alloc_ok = true;
  1034. needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
  1035. if (needed <= 0) {
  1036. h->resv_huge_pages += delta;
  1037. return 0;
  1038. }
  1039. allocated = 0;
  1040. INIT_LIST_HEAD(&surplus_list);
  1041. ret = -ENOMEM;
  1042. retry:
  1043. spin_unlock(&hugetlb_lock);
  1044. for (i = 0; i < needed; i++) {
  1045. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  1046. if (!page) {
  1047. alloc_ok = false;
  1048. break;
  1049. }
  1050. list_add(&page->lru, &surplus_list);
  1051. }
  1052. allocated += i;
  1053. /*
  1054. * After retaking hugetlb_lock, we need to recalculate 'needed'
  1055. * because either resv_huge_pages or free_huge_pages may have changed.
  1056. */
  1057. spin_lock(&hugetlb_lock);
  1058. needed = (h->resv_huge_pages + delta) -
  1059. (h->free_huge_pages + allocated);
  1060. if (needed > 0) {
  1061. if (alloc_ok)
  1062. goto retry;
  1063. /*
  1064. * We were not able to allocate enough pages to
  1065. * satisfy the entire reservation so we free what
  1066. * we've allocated so far.
  1067. */
  1068. goto free;
  1069. }
  1070. /*
  1071. * The surplus_list now contains _at_least_ the number of extra pages
  1072. * needed to accommodate the reservation. Add the appropriate number
  1073. * of pages to the hugetlb pool and free the extras back to the buddy
  1074. * allocator. Commit the entire reservation here to prevent another
  1075. * process from stealing the pages as they are added to the pool but
  1076. * before they are reserved.
  1077. */
  1078. needed += allocated;
  1079. h->resv_huge_pages += delta;
  1080. ret = 0;
  1081. /* Free the needed pages to the hugetlb pool */
  1082. list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
  1083. if ((--needed) < 0)
  1084. break;
  1085. /*
  1086. * This page is now managed by the hugetlb allocator and has
  1087. * no users -- drop the buddy allocator's reference.
  1088. */
  1089. put_page_testzero(page);
  1090. VM_BUG_ON_PAGE(page_count(page), page);
  1091. enqueue_huge_page(h, page);
  1092. }
  1093. free:
  1094. spin_unlock(&hugetlb_lock);
  1095. /* Free unnecessary surplus pages to the buddy allocator */
  1096. list_for_each_entry_safe(page, tmp, &surplus_list, lru)
  1097. put_page(page);
  1098. spin_lock(&hugetlb_lock);
  1099. return ret;
  1100. }
  1101. /*
  1102. * When releasing a hugetlb pool reservation, any surplus pages that were
  1103. * allocated to satisfy the reservation must be explicitly freed if they were
  1104. * never used.
  1105. * Called with hugetlb_lock held.
  1106. */
  1107. static void return_unused_surplus_pages(struct hstate *h,
  1108. unsigned long unused_resv_pages)
  1109. {
  1110. unsigned long nr_pages;
  1111. /* Uncommit the reservation */
  1112. h->resv_huge_pages -= unused_resv_pages;
  1113. /* Cannot return gigantic pages currently */
  1114. if (hstate_is_gigantic(h))
  1115. return;
  1116. nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
  1117. /*
  1118. * We want to release as many surplus pages as possible, spread
  1119. * evenly across all nodes with memory. Iterate across these nodes
  1120. * until we can no longer free unreserved surplus pages. This occurs
  1121. * when the nodes with surplus pages have no free pages.
  1122. * free_pool_huge_page() will balance the the freed pages across the
  1123. * on-line nodes with memory and will handle the hstate accounting.
  1124. */
  1125. while (nr_pages--) {
  1126. if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
  1127. break;
  1128. cond_resched_lock(&hugetlb_lock);
  1129. }
  1130. }
  1131. /*
  1132. * Determine if the huge page at addr within the vma has an associated
  1133. * reservation. Where it does not we will need to logically increase
  1134. * reservation and actually increase subpool usage before an allocation
  1135. * can occur. Where any new reservation would be required the
  1136. * reservation change is prepared, but not committed. Once the page
  1137. * has been allocated from the subpool and instantiated the change should
  1138. * be committed via vma_commit_reservation. No action is required on
  1139. * failure.
  1140. */
  1141. static long vma_needs_reservation(struct hstate *h,
  1142. struct vm_area_struct *vma, unsigned long addr)
  1143. {
  1144. struct resv_map *resv;
  1145. pgoff_t idx;
  1146. long chg;
  1147. resv = vma_resv_map(vma);
  1148. if (!resv)
  1149. return 1;
  1150. idx = vma_hugecache_offset(h, vma, addr);
  1151. chg = region_chg(resv, idx, idx + 1);
  1152. if (vma->vm_flags & VM_MAYSHARE)
  1153. return chg;
  1154. else
  1155. return chg < 0 ? chg : 0;
  1156. }
  1157. static void vma_commit_reservation(struct hstate *h,
  1158. struct vm_area_struct *vma, unsigned long addr)
  1159. {
  1160. struct resv_map *resv;
  1161. pgoff_t idx;
  1162. resv = vma_resv_map(vma);
  1163. if (!resv)
  1164. return;
  1165. idx = vma_hugecache_offset(h, vma, addr);
  1166. region_add(resv, idx, idx + 1);
  1167. }
  1168. static struct page *alloc_huge_page(struct vm_area_struct *vma,
  1169. unsigned long addr, int avoid_reserve)
  1170. {
  1171. struct hugepage_subpool *spool = subpool_vma(vma);
  1172. struct hstate *h = hstate_vma(vma);
  1173. struct page *page;
  1174. long chg;
  1175. int ret, idx;
  1176. struct hugetlb_cgroup *h_cg;
  1177. idx = hstate_index(h);
  1178. /*
  1179. * Processes that did not create the mapping will have no
  1180. * reserves and will not have accounted against subpool
  1181. * limit. Check that the subpool limit can be made before
  1182. * satisfying the allocation MAP_NORESERVE mappings may also
  1183. * need pages and subpool limit allocated allocated if no reserve
  1184. * mapping overlaps.
  1185. */
  1186. chg = vma_needs_reservation(h, vma, addr);
  1187. if (chg < 0)
  1188. return ERR_PTR(-ENOMEM);
  1189. if (chg || avoid_reserve)
  1190. if (hugepage_subpool_get_pages(spool, 1))
  1191. return ERR_PTR(-ENOSPC);
  1192. ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
  1193. if (ret)
  1194. goto out_subpool_put;
  1195. spin_lock(&hugetlb_lock);
  1196. page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, chg);
  1197. if (!page) {
  1198. spin_unlock(&hugetlb_lock);
  1199. page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
  1200. if (!page)
  1201. goto out_uncharge_cgroup;
  1202. spin_lock(&hugetlb_lock);
  1203. list_move(&page->lru, &h->hugepage_activelist);
  1204. /* Fall through */
  1205. }
  1206. hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
  1207. spin_unlock(&hugetlb_lock);
  1208. set_page_private(page, (unsigned long)spool);
  1209. vma_commit_reservation(h, vma, addr);
  1210. return page;
  1211. out_uncharge_cgroup:
  1212. hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
  1213. out_subpool_put:
  1214. if (chg || avoid_reserve)
  1215. hugepage_subpool_put_pages(spool, 1);
  1216. return ERR_PTR(-ENOSPC);
  1217. }
  1218. /*
  1219. * alloc_huge_page()'s wrapper which simply returns the page if allocation
  1220. * succeeds, otherwise NULL. This function is called from new_vma_page(),
  1221. * where no ERR_VALUE is expected to be returned.
  1222. */
  1223. struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
  1224. unsigned long addr, int avoid_reserve)
  1225. {
  1226. struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
  1227. if (IS_ERR(page))
  1228. page = NULL;
  1229. return page;
  1230. }
  1231. int __weak alloc_bootmem_huge_page(struct hstate *h)
  1232. {
  1233. struct huge_bootmem_page *m;
  1234. int nr_nodes, node;
  1235. for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
  1236. void *addr;
  1237. addr = memblock_virt_alloc_try_nid_nopanic(
  1238. huge_page_size(h), huge_page_size(h),
  1239. 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
  1240. if (addr) {
  1241. /*
  1242. * Use the beginning of the huge page to store the
  1243. * huge_bootmem_page struct (until gather_bootmem
  1244. * puts them into the mem_map).
  1245. */
  1246. m = addr;
  1247. goto found;
  1248. }
  1249. }
  1250. return 0;
  1251. found:
  1252. BUG_ON((unsigned long)virt_to_phys(m) & (huge_page_size(h) - 1));
  1253. /* Put them into a private list first because mem_map is not up yet */
  1254. list_add(&m->list, &huge_boot_pages);
  1255. m->hstate = h;
  1256. return 1;
  1257. }
  1258. static void __init prep_compound_huge_page(struct page *page, int order)
  1259. {
  1260. if (unlikely(order > (MAX_ORDER - 1)))
  1261. prep_compound_gigantic_page(page, order);
  1262. else
  1263. prep_compound_page(page, order);
  1264. }
  1265. /* Put bootmem huge pages into the standard lists after mem_map is up */
  1266. static void __init gather_bootmem_prealloc(void)
  1267. {
  1268. struct huge_bootmem_page *m;
  1269. list_for_each_entry(m, &huge_boot_pages, list) {
  1270. struct hstate *h = m->hstate;
  1271. struct page *page;
  1272. #ifdef CONFIG_HIGHMEM
  1273. page = pfn_to_page(m->phys >> PAGE_SHIFT);
  1274. memblock_free_late(__pa(m),
  1275. sizeof(struct huge_bootmem_page));
  1276. #else
  1277. page = virt_to_page(m);
  1278. #endif
  1279. WARN_ON(page_count(page) != 1);
  1280. prep_compound_huge_page(page, h->order);
  1281. WARN_ON(PageReserved(page));
  1282. prep_new_huge_page(h, page, page_to_nid(page));
  1283. /*
  1284. * If we had gigantic hugepages allocated at boot time, we need
  1285. * to restore the 'stolen' pages to totalram_pages in order to
  1286. * fix confusing memory reports from free(1) and another
  1287. * side-effects, like CommitLimit going negative.
  1288. */
  1289. if (hstate_is_gigantic(h))
  1290. adjust_managed_page_count(page, 1 << h->order);
  1291. }
  1292. }
  1293. static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
  1294. {
  1295. unsigned long i;
  1296. for (i = 0; i < h->max_huge_pages; ++i) {
  1297. if (hstate_is_gigantic(h)) {
  1298. if (!alloc_bootmem_huge_page(h))
  1299. break;
  1300. } else if (!alloc_fresh_huge_page(h,
  1301. &node_states[N_MEMORY]))
  1302. break;
  1303. }
  1304. h->max_huge_pages = i;
  1305. }
  1306. static void __init hugetlb_init_hstates(void)
  1307. {
  1308. struct hstate *h;
  1309. for_each_hstate(h) {
  1310. /* oversize hugepages were init'ed in early boot */
  1311. if (!hstate_is_gigantic(h))
  1312. hugetlb_hstate_alloc_pages(h);
  1313. }
  1314. }
  1315. static char * __init memfmt(char *buf, unsigned long n)
  1316. {
  1317. if (n >= (1UL << 30))
  1318. sprintf(buf, "%lu GB", n >> 30);
  1319. else if (n >= (1UL << 20))
  1320. sprintf(buf, "%lu MB", n >> 20);
  1321. else
  1322. sprintf(buf, "%lu KB", n >> 10);
  1323. return buf;
  1324. }
  1325. static void __init report_hugepages(void)
  1326. {
  1327. struct hstate *h;
  1328. for_each_hstate(h) {
  1329. char buf[32];
  1330. pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
  1331. memfmt(buf, huge_page_size(h)),
  1332. h->free_huge_pages);
  1333. }
  1334. }
  1335. #ifdef CONFIG_HIGHMEM
  1336. static void try_to_free_low(struct hstate *h, unsigned long count,
  1337. nodemask_t *nodes_allowed)
  1338. {
  1339. int i;
  1340. if (hstate_is_gigantic(h))
  1341. return;
  1342. for_each_node_mask(i, *nodes_allowed) {
  1343. struct page *page, *next;
  1344. struct list_head *freel = &h->hugepage_freelists[i];
  1345. list_for_each_entry_safe(page, next, freel, lru) {
  1346. if (count >= h->nr_huge_pages)
  1347. return;
  1348. if (PageHighMem(page))
  1349. continue;
  1350. list_del(&page->lru);
  1351. update_and_free_page(h, page);
  1352. h->free_huge_pages--;
  1353. h->free_huge_pages_node[page_to_nid(page)]--;
  1354. }
  1355. }
  1356. }
  1357. #else
  1358. static inline void try_to_free_low(struct hstate *h, unsigned long count,
  1359. nodemask_t *nodes_allowed)
  1360. {
  1361. }
  1362. #endif
  1363. /*
  1364. * Increment or decrement surplus_huge_pages. Keep node-specific counters
  1365. * balanced by operating on them in a round-robin fashion.
  1366. * Returns 1 if an adjustment was made.
  1367. */
  1368. static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
  1369. int delta)
  1370. {
  1371. int nr_nodes, node;
  1372. VM_BUG_ON(delta != -1 && delta != 1);
  1373. if (delta < 0) {
  1374. for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
  1375. if (h->surplus_huge_pages_node[node])
  1376. goto found;
  1377. }
  1378. } else {
  1379. for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
  1380. if (h->surplus_huge_pages_node[node] <
  1381. h->nr_huge_pages_node[node])
  1382. goto found;
  1383. }
  1384. }
  1385. return 0;
  1386. found:
  1387. h->surplus_huge_pages += delta;
  1388. h->surplus_huge_pages_node[node] += delta;
  1389. return 1;
  1390. }
  1391. #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
  1392. static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
  1393. nodemask_t *nodes_allowed)
  1394. {
  1395. unsigned long min_count, ret;
  1396. if (hstate_is_gigantic(h) && !gigantic_page_supported())
  1397. return h->max_huge_pages;
  1398. /*
  1399. * Increase the pool size
  1400. * First take pages out of surplus state. Then make up the
  1401. * remaining difference by allocating fresh huge pages.
  1402. *
  1403. * We might race with alloc_buddy_huge_page() here and be unable
  1404. * to convert a surplus huge page to a normal huge page. That is
  1405. * not critical, though, it just means the overall size of the
  1406. * pool might be one hugepage larger than it needs to be, but
  1407. * within all the constraints specified by the sysctls.
  1408. */
  1409. spin_lock(&hugetlb_lock);
  1410. while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
  1411. if (!adjust_pool_surplus(h, nodes_allowed, -1))
  1412. break;
  1413. }
  1414. while (count > persistent_huge_pages(h)) {
  1415. /*
  1416. * If this allocation races such that we no longer need the
  1417. * page, free_huge_page will handle it by freeing the page
  1418. * and reducing the surplus.
  1419. */
  1420. spin_unlock(&hugetlb_lock);
  1421. if (hstate_is_gigantic(h))
  1422. ret = alloc_fresh_gigantic_page(h, nodes_allowed);
  1423. else
  1424. ret = alloc_fresh_huge_page(h, nodes_allowed);
  1425. spin_lock(&hugetlb_lock);
  1426. if (!ret)
  1427. goto out;
  1428. /* Bail for signals. Probably ctrl-c from user */
  1429. if (signal_pending(current))
  1430. goto out;
  1431. }
  1432. /*
  1433. * Decrease the pool size
  1434. * First return free pages to the buddy allocator (being careful
  1435. * to keep enough around to satisfy reservations). Then place
  1436. * pages into surplus state as needed so the pool will shrink
  1437. * to the desired size as pages become free.
  1438. *
  1439. * By placing pages into the surplus state independent of the
  1440. * overcommit value, we are allowing the surplus pool size to
  1441. * exceed overcommit. There are few sane options here. Since
  1442. * alloc_buddy_huge_page() is checking the global counter,
  1443. * though, we'll note that we're not allowed to exceed surplus
  1444. * and won't grow the pool anywhere else. Not until one of the
  1445. * sysctls are changed, or the surplus pages go out of use.
  1446. */
  1447. min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
  1448. min_count = max(count, min_count);
  1449. try_to_free_low(h, min_count, nodes_allowed);
  1450. while (min_count < persistent_huge_pages(h)) {
  1451. if (!free_pool_huge_page(h, nodes_allowed, 0))
  1452. break;
  1453. cond_resched_lock(&hugetlb_lock);
  1454. }
  1455. while (count < persistent_huge_pages(h)) {
  1456. if (!adjust_pool_surplus(h, nodes_allowed, 1))
  1457. break;
  1458. }
  1459. out:
  1460. ret = persistent_huge_pages(h);
  1461. spin_unlock(&hugetlb_lock);
  1462. return ret;
  1463. }
  1464. #define HSTATE_ATTR_RO(_name) \
  1465. static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
  1466. #define HSTATE_ATTR(_name) \
  1467. static struct kobj_attribute _name##_attr = \
  1468. __ATTR(_name, 0644, _name##_show, _name##_store)
  1469. static struct kobject *hugepages_kobj;
  1470. static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1471. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
  1472. static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
  1473. {
  1474. int i;
  1475. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1476. if (hstate_kobjs[i] == kobj) {
  1477. if (nidp)
  1478. *nidp = NUMA_NO_NODE;
  1479. return &hstates[i];
  1480. }
  1481. return kobj_to_node_hstate(kobj, nidp);
  1482. }
  1483. static ssize_t nr_hugepages_show_common(struct kobject *kobj,
  1484. struct kobj_attribute *attr, char *buf)
  1485. {
  1486. struct hstate *h;
  1487. unsigned long nr_huge_pages;
  1488. int nid;
  1489. h = kobj_to_hstate(kobj, &nid);
  1490. if (nid == NUMA_NO_NODE)
  1491. nr_huge_pages = h->nr_huge_pages;
  1492. else
  1493. nr_huge_pages = h->nr_huge_pages_node[nid];
  1494. return sprintf(buf, "%lu\n", nr_huge_pages);
  1495. }
  1496. static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
  1497. struct hstate *h, int nid,
  1498. unsigned long count, size_t len)
  1499. {
  1500. int err;
  1501. NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
  1502. if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
  1503. err = -EINVAL;
  1504. goto out;
  1505. }
  1506. if (nid == NUMA_NO_NODE) {
  1507. /*
  1508. * global hstate attribute
  1509. */
  1510. if (!(obey_mempolicy &&
  1511. init_nodemask_of_mempolicy(nodes_allowed))) {
  1512. NODEMASK_FREE(nodes_allowed);
  1513. nodes_allowed = &node_states[N_MEMORY];
  1514. }
  1515. } else if (nodes_allowed) {
  1516. /*
  1517. * per node hstate attribute: adjust count to global,
  1518. * but restrict alloc/free to the specified node.
  1519. */
  1520. count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
  1521. init_nodemask_of_node(nodes_allowed, nid);
  1522. } else
  1523. nodes_allowed = &node_states[N_MEMORY];
  1524. h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
  1525. if (nodes_allowed != &node_states[N_MEMORY])
  1526. NODEMASK_FREE(nodes_allowed);
  1527. return len;
  1528. out:
  1529. NODEMASK_FREE(nodes_allowed);
  1530. return err;
  1531. }
  1532. static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
  1533. struct kobject *kobj, const char *buf,
  1534. size_t len)
  1535. {
  1536. struct hstate *h;
  1537. unsigned long count;
  1538. int nid;
  1539. int err;
  1540. err = kstrtoul(buf, 10, &count);
  1541. if (err)
  1542. return err;
  1543. h = kobj_to_hstate(kobj, &nid);
  1544. return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
  1545. }
  1546. static ssize_t nr_hugepages_show(struct kobject *kobj,
  1547. struct kobj_attribute *attr, char *buf)
  1548. {
  1549. return nr_hugepages_show_common(kobj, attr, buf);
  1550. }
  1551. static ssize_t nr_hugepages_store(struct kobject *kobj,
  1552. struct kobj_attribute *attr, const char *buf, size_t len)
  1553. {
  1554. return nr_hugepages_store_common(false, kobj, buf, len);
  1555. }
  1556. HSTATE_ATTR(nr_hugepages);
  1557. #ifdef CONFIG_NUMA
  1558. /*
  1559. * hstate attribute for optionally mempolicy-based constraint on persistent
  1560. * huge page alloc/free.
  1561. */
  1562. static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
  1563. struct kobj_attribute *attr, char *buf)
  1564. {
  1565. return nr_hugepages_show_common(kobj, attr, buf);
  1566. }
  1567. static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
  1568. struct kobj_attribute *attr, const char *buf, size_t len)
  1569. {
  1570. return nr_hugepages_store_common(true, kobj, buf, len);
  1571. }
  1572. HSTATE_ATTR(nr_hugepages_mempolicy);
  1573. #endif
  1574. static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
  1575. struct kobj_attribute *attr, char *buf)
  1576. {
  1577. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1578. return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
  1579. }
  1580. static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
  1581. struct kobj_attribute *attr, const char *buf, size_t count)
  1582. {
  1583. int err;
  1584. unsigned long input;
  1585. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1586. if (hstate_is_gigantic(h))
  1587. return -EINVAL;
  1588. err = kstrtoul(buf, 10, &input);
  1589. if (err)
  1590. return err;
  1591. spin_lock(&hugetlb_lock);
  1592. h->nr_overcommit_huge_pages = input;
  1593. spin_unlock(&hugetlb_lock);
  1594. return count;
  1595. }
  1596. HSTATE_ATTR(nr_overcommit_hugepages);
  1597. static ssize_t free_hugepages_show(struct kobject *kobj,
  1598. struct kobj_attribute *attr, char *buf)
  1599. {
  1600. struct hstate *h;
  1601. unsigned long free_huge_pages;
  1602. int nid;
  1603. h = kobj_to_hstate(kobj, &nid);
  1604. if (nid == NUMA_NO_NODE)
  1605. free_huge_pages = h->free_huge_pages;
  1606. else
  1607. free_huge_pages = h->free_huge_pages_node[nid];
  1608. return sprintf(buf, "%lu\n", free_huge_pages);
  1609. }
  1610. HSTATE_ATTR_RO(free_hugepages);
  1611. static ssize_t resv_hugepages_show(struct kobject *kobj,
  1612. struct kobj_attribute *attr, char *buf)
  1613. {
  1614. struct hstate *h = kobj_to_hstate(kobj, NULL);
  1615. return sprintf(buf, "%lu\n", h->resv_huge_pages);
  1616. }
  1617. HSTATE_ATTR_RO(resv_hugepages);
  1618. static ssize_t surplus_hugepages_show(struct kobject *kobj,
  1619. struct kobj_attribute *attr, char *buf)
  1620. {
  1621. struct hstate *h;
  1622. unsigned long surplus_huge_pages;
  1623. int nid;
  1624. h = kobj_to_hstate(kobj, &nid);
  1625. if (nid == NUMA_NO_NODE)
  1626. surplus_huge_pages = h->surplus_huge_pages;
  1627. else
  1628. surplus_huge_pages = h->surplus_huge_pages_node[nid];
  1629. return sprintf(buf, "%lu\n", surplus_huge_pages);
  1630. }
  1631. HSTATE_ATTR_RO(surplus_hugepages);
  1632. static struct attribute *hstate_attrs[] = {
  1633. &nr_hugepages_attr.attr,
  1634. &nr_overcommit_hugepages_attr.attr,
  1635. &free_hugepages_attr.attr,
  1636. &resv_hugepages_attr.attr,
  1637. &surplus_hugepages_attr.attr,
  1638. #ifdef CONFIG_NUMA
  1639. &nr_hugepages_mempolicy_attr.attr,
  1640. #endif
  1641. NULL,
  1642. };
  1643. static struct attribute_group hstate_attr_group = {
  1644. .attrs = hstate_attrs,
  1645. };
  1646. static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
  1647. struct kobject **hstate_kobjs,
  1648. struct attribute_group *hstate_attr_group)
  1649. {
  1650. int retval;
  1651. int hi = hstate_index(h);
  1652. hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
  1653. if (!hstate_kobjs[hi])
  1654. return -ENOMEM;
  1655. retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
  1656. if (retval)
  1657. kobject_put(hstate_kobjs[hi]);
  1658. return retval;
  1659. }
  1660. static void __init hugetlb_sysfs_init(void)
  1661. {
  1662. struct hstate *h;
  1663. int err;
  1664. hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
  1665. if (!hugepages_kobj)
  1666. return;
  1667. for_each_hstate(h) {
  1668. err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
  1669. hstate_kobjs, &hstate_attr_group);
  1670. if (err)
  1671. pr_err("Hugetlb: Unable to add hstate %s", h->name);
  1672. }
  1673. }
  1674. #ifdef CONFIG_NUMA
  1675. /*
  1676. * node_hstate/s - associate per node hstate attributes, via their kobjects,
  1677. * with node devices in node_devices[] using a parallel array. The array
  1678. * index of a node device or _hstate == node id.
  1679. * This is here to avoid any static dependency of the node device driver, in
  1680. * the base kernel, on the hugetlb module.
  1681. */
  1682. struct node_hstate {
  1683. struct kobject *hugepages_kobj;
  1684. struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
  1685. };
  1686. struct node_hstate node_hstates[MAX_NUMNODES];
  1687. /*
  1688. * A subset of global hstate attributes for node devices
  1689. */
  1690. static struct attribute *per_node_hstate_attrs[] = {
  1691. &nr_hugepages_attr.attr,
  1692. &free_hugepages_attr.attr,
  1693. &surplus_hugepages_attr.attr,
  1694. NULL,
  1695. };
  1696. static struct attribute_group per_node_hstate_attr_group = {
  1697. .attrs = per_node_hstate_attrs,
  1698. };
  1699. /*
  1700. * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
  1701. * Returns node id via non-NULL nidp.
  1702. */
  1703. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1704. {
  1705. int nid;
  1706. for (nid = 0; nid < nr_node_ids; nid++) {
  1707. struct node_hstate *nhs = &node_hstates[nid];
  1708. int i;
  1709. for (i = 0; i < HUGE_MAX_HSTATE; i++)
  1710. if (nhs->hstate_kobjs[i] == kobj) {
  1711. if (nidp)
  1712. *nidp = nid;
  1713. return &hstates[i];
  1714. }
  1715. }
  1716. BUG();
  1717. return NULL;
  1718. }
  1719. /*
  1720. * Unregister hstate attributes from a single node device.
  1721. * No-op if no hstate attributes attached.
  1722. */
  1723. static void hugetlb_unregister_node(struct node *node)
  1724. {
  1725. struct hstate *h;
  1726. struct node_hstate *nhs = &node_hstates[node->dev.id];
  1727. if (!nhs->hugepages_kobj)
  1728. return; /* no hstate attributes */
  1729. for_each_hstate(h) {
  1730. int idx = hstate_index(h);
  1731. if (nhs->hstate_kobjs[idx]) {
  1732. kobject_put(nhs->hstate_kobjs[idx]);
  1733. nhs->hstate_kobjs[idx] = NULL;
  1734. }
  1735. }
  1736. kobject_put(nhs->hugepages_kobj);
  1737. nhs->hugepages_kobj = NULL;
  1738. }
  1739. /*
  1740. * hugetlb module exit: unregister hstate attributes from node devices
  1741. * that have them.
  1742. */
  1743. static void hugetlb_unregister_all_nodes(void)
  1744. {
  1745. int nid;
  1746. /*
  1747. * disable node device registrations.
  1748. */
  1749. register_hugetlbfs_with_node(NULL, NULL);
  1750. /*
  1751. * remove hstate attributes from any nodes that have them.
  1752. */
  1753. for (nid = 0; nid < nr_node_ids; nid++)
  1754. hugetlb_unregister_node(node_devices[nid]);
  1755. }
  1756. /*
  1757. * Register hstate attributes for a single node device.
  1758. * No-op if attributes already registered.
  1759. */
  1760. static void hugetlb_register_node(struct node *node)
  1761. {
  1762. struct hstate *h;
  1763. struct node_hstate *nhs = &node_hstates[node->dev.id];
  1764. int err;
  1765. if (nhs->hugepages_kobj)
  1766. return; /* already allocated */
  1767. nhs->hugepages_kobj = kobject_create_and_add("hugepages",
  1768. &node->dev.kobj);
  1769. if (!nhs->hugepages_kobj)
  1770. return;
  1771. for_each_hstate(h) {
  1772. err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
  1773. nhs->hstate_kobjs,
  1774. &per_node_hstate_attr_group);
  1775. if (err) {
  1776. pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
  1777. h->name, node->dev.id);
  1778. hugetlb_unregister_node(node);
  1779. break;
  1780. }
  1781. }
  1782. }
  1783. /*
  1784. * hugetlb init time: register hstate attributes for all registered node
  1785. * devices of nodes that have memory. All on-line nodes should have
  1786. * registered their associated device by this time.
  1787. */
  1788. static void hugetlb_register_all_nodes(void)
  1789. {
  1790. int nid;
  1791. for_each_node_state(nid, N_MEMORY) {
  1792. struct node *node = node_devices[nid];
  1793. if (node->dev.id == nid)
  1794. hugetlb_register_node(node);
  1795. }
  1796. /*
  1797. * Let the node device driver know we're here so it can
  1798. * [un]register hstate attributes on node hotplug.
  1799. */
  1800. register_hugetlbfs_with_node(hugetlb_register_node,
  1801. hugetlb_unregister_node);
  1802. }
  1803. #else /* !CONFIG_NUMA */
  1804. static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
  1805. {
  1806. BUG();
  1807. if (nidp)
  1808. *nidp = -1;
  1809. return NULL;
  1810. }
  1811. static void hugetlb_unregister_all_nodes(void) { }
  1812. static void hugetlb_register_all_nodes(void) { }
  1813. #endif
  1814. static void __exit hugetlb_exit(void)
  1815. {
  1816. struct hstate *h;
  1817. hugetlb_unregister_all_nodes();
  1818. for_each_hstate(h) {
  1819. kobject_put(hstate_kobjs[hstate_index(h)]);
  1820. }
  1821. kobject_put(hugepages_kobj);
  1822. kfree(htlb_fault_mutex_table);
  1823. }
  1824. module_exit(hugetlb_exit);
  1825. static int __init hugetlb_init(void)
  1826. {
  1827. int i;
  1828. if (!hugepages_supported())
  1829. return 0;
  1830. if (!size_to_hstate(default_hstate_size)) {
  1831. default_hstate_size = HPAGE_SIZE;
  1832. if (!size_to_hstate(default_hstate_size))
  1833. hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
  1834. }
  1835. default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
  1836. if (default_hstate_max_huge_pages)
  1837. default_hstate.max_huge_pages = default_hstate_max_huge_pages;
  1838. hugetlb_init_hstates();
  1839. gather_bootmem_prealloc();
  1840. report_hugepages();
  1841. hugetlb_sysfs_init();
  1842. hugetlb_register_all_nodes();
  1843. hugetlb_cgroup_file_init();
  1844. #ifdef CONFIG_SMP
  1845. num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
  1846. #else
  1847. num_fault_mutexes = 1;
  1848. #endif
  1849. htlb_fault_mutex_table =
  1850. kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
  1851. BUG_ON(!htlb_fault_mutex_table);
  1852. for (i = 0; i < num_fault_mutexes; i++)
  1853. mutex_init(&htlb_fault_mutex_table[i]);
  1854. return 0;
  1855. }
  1856. module_init(hugetlb_init);
  1857. /* Should be called on processing a hugepagesz=... option */
  1858. void __init hugetlb_add_hstate(unsigned order)
  1859. {
  1860. struct hstate *h;
  1861. unsigned long i;
  1862. if (size_to_hstate(PAGE_SIZE << order)) {
  1863. pr_warning("hugepagesz= specified twice, ignoring\n");
  1864. return;
  1865. }
  1866. BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
  1867. BUG_ON(order == 0);
  1868. h = &hstates[hugetlb_max_hstate++];
  1869. h->order = order;
  1870. h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
  1871. h->nr_huge_pages = 0;
  1872. h->free_huge_pages = 0;
  1873. for (i = 0; i < MAX_NUMNODES; ++i)
  1874. INIT_LIST_HEAD(&h->hugepage_freelists[i]);
  1875. INIT_LIST_HEAD(&h->hugepage_activelist);
  1876. h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
  1877. h->next_nid_to_free = first_node(node_states[N_MEMORY]);
  1878. snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
  1879. huge_page_size(h)/1024);
  1880. parsed_hstate = h;
  1881. }
  1882. static int __init hugetlb_nrpages_setup(char *s)
  1883. {
  1884. unsigned long *mhp;
  1885. static unsigned long *last_mhp;
  1886. /*
  1887. * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
  1888. * so this hugepages= parameter goes to the "default hstate".
  1889. */
  1890. if (!hugetlb_max_hstate)
  1891. mhp = &default_hstate_max_huge_pages;
  1892. else
  1893. mhp = &parsed_hstate->max_huge_pages;
  1894. if (mhp == last_mhp) {
  1895. pr_warning("hugepages= specified twice without "
  1896. "interleaving hugepagesz=, ignoring\n");
  1897. return 1;
  1898. }
  1899. if (sscanf(s, "%lu", mhp) <= 0)
  1900. *mhp = 0;
  1901. /*
  1902. * Global state is always initialized later in hugetlb_init.
  1903. * But we need to allocate >= MAX_ORDER hstates here early to still
  1904. * use the bootmem allocator.
  1905. */
  1906. if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
  1907. hugetlb_hstate_alloc_pages(parsed_hstate);
  1908. last_mhp = mhp;
  1909. return 1;
  1910. }
  1911. __setup("hugepages=", hugetlb_nrpages_setup);
  1912. static int __init hugetlb_default_setup(char *s)
  1913. {
  1914. default_hstate_size = memparse(s, &s);
  1915. return 1;
  1916. }
  1917. __setup("default_hugepagesz=", hugetlb_default_setup);
  1918. static unsigned int cpuset_mems_nr(unsigned int *array)
  1919. {
  1920. int node;
  1921. unsigned int nr = 0;
  1922. for_each_node_mask(node, cpuset_current_mems_allowed)
  1923. nr += array[node];
  1924. return nr;
  1925. }
  1926. #ifdef CONFIG_SYSCTL
  1927. static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
  1928. struct ctl_table *table, int write,
  1929. void __user *buffer, size_t *length, loff_t *ppos)
  1930. {
  1931. struct hstate *h = &default_hstate;
  1932. unsigned long tmp = h->max_huge_pages;
  1933. int ret;
  1934. if (!hugepages_supported())
  1935. return -ENOTSUPP;
  1936. table->data = &tmp;
  1937. table->maxlen = sizeof(unsigned long);
  1938. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1939. if (ret)
  1940. goto out;
  1941. if (write)
  1942. ret = __nr_hugepages_store_common(obey_mempolicy, h,
  1943. NUMA_NO_NODE, tmp, *length);
  1944. out:
  1945. return ret;
  1946. }
  1947. int hugetlb_sysctl_handler(struct ctl_table *table, int write,
  1948. void __user *buffer, size_t *length, loff_t *ppos)
  1949. {
  1950. return hugetlb_sysctl_handler_common(false, table, write,
  1951. buffer, length, ppos);
  1952. }
  1953. #ifdef CONFIG_NUMA
  1954. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
  1955. void __user *buffer, size_t *length, loff_t *ppos)
  1956. {
  1957. return hugetlb_sysctl_handler_common(true, table, write,
  1958. buffer, length, ppos);
  1959. }
  1960. #endif /* CONFIG_NUMA */
  1961. int hugetlb_overcommit_handler(struct ctl_table *table, int write,
  1962. void __user *buffer,
  1963. size_t *length, loff_t *ppos)
  1964. {
  1965. struct hstate *h = &default_hstate;
  1966. unsigned long tmp;
  1967. int ret;
  1968. if (!hugepages_supported())
  1969. return -ENOTSUPP;
  1970. tmp = h->nr_overcommit_huge_pages;
  1971. if (write && hstate_is_gigantic(h))
  1972. return -EINVAL;
  1973. table->data = &tmp;
  1974. table->maxlen = sizeof(unsigned long);
  1975. ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
  1976. if (ret)
  1977. goto out;
  1978. if (write) {
  1979. spin_lock(&hugetlb_lock);
  1980. h->nr_overcommit_huge_pages = tmp;
  1981. spin_unlock(&hugetlb_lock);
  1982. }
  1983. out:
  1984. return ret;
  1985. }
  1986. #endif /* CONFIG_SYSCTL */
  1987. void hugetlb_report_meminfo(struct seq_file *m)
  1988. {
  1989. struct hstate *h = &default_hstate;
  1990. if (!hugepages_supported())
  1991. return;
  1992. seq_printf(m,
  1993. "HugePages_Total: %5lu\n"
  1994. "HugePages_Free: %5lu\n"
  1995. "HugePages_Rsvd: %5lu\n"
  1996. "HugePages_Surp: %5lu\n"
  1997. "Hugepagesize: %8lu kB\n",
  1998. h->nr_huge_pages,
  1999. h->free_huge_pages,
  2000. h->resv_huge_pages,
  2001. h->surplus_huge_pages,
  2002. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  2003. }
  2004. int hugetlb_report_node_meminfo(int nid, char *buf)
  2005. {
  2006. struct hstate *h = &default_hstate;
  2007. if (!hugepages_supported())
  2008. return 0;
  2009. return sprintf(buf,
  2010. "Node %d HugePages_Total: %5u\n"
  2011. "Node %d HugePages_Free: %5u\n"
  2012. "Node %d HugePages_Surp: %5u\n",
  2013. nid, h->nr_huge_pages_node[nid],
  2014. nid, h->free_huge_pages_node[nid],
  2015. nid, h->surplus_huge_pages_node[nid]);
  2016. }
  2017. void hugetlb_show_meminfo(void)
  2018. {
  2019. struct hstate *h;
  2020. int nid;
  2021. if (!hugepages_supported())
  2022. return;
  2023. for_each_node_state(nid, N_MEMORY)
  2024. for_each_hstate(h)
  2025. pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
  2026. nid,
  2027. h->nr_huge_pages_node[nid],
  2028. h->free_huge_pages_node[nid],
  2029. h->surplus_huge_pages_node[nid],
  2030. 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
  2031. }
  2032. /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
  2033. unsigned long hugetlb_total_pages(void)
  2034. {
  2035. struct hstate *h;
  2036. unsigned long nr_total_pages = 0;
  2037. for_each_hstate(h)
  2038. nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
  2039. return nr_total_pages;
  2040. }
  2041. static int hugetlb_acct_memory(struct hstate *h, long delta)
  2042. {
  2043. int ret = -ENOMEM;
  2044. spin_lock(&hugetlb_lock);
  2045. /*
  2046. * When cpuset is configured, it breaks the strict hugetlb page
  2047. * reservation as the accounting is done on a global variable. Such
  2048. * reservation is completely rubbish in the presence of cpuset because
  2049. * the reservation is not checked against page availability for the
  2050. * current cpuset. Application can still potentially OOM'ed by kernel
  2051. * with lack of free htlb page in cpuset that the task is in.
  2052. * Attempt to enforce strict accounting with cpuset is almost
  2053. * impossible (or too ugly) because cpuset is too fluid that
  2054. * task or memory node can be dynamically moved between cpusets.
  2055. *
  2056. * The change of semantics for shared hugetlb mapping with cpuset is
  2057. * undesirable. However, in order to preserve some of the semantics,
  2058. * we fall back to check against current free page availability as
  2059. * a best attempt and hopefully to minimize the impact of changing
  2060. * semantics that cpuset has.
  2061. */
  2062. if (delta > 0) {
  2063. if (gather_surplus_pages(h, delta) < 0)
  2064. goto out;
  2065. if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
  2066. return_unused_surplus_pages(h, delta);
  2067. goto out;
  2068. }
  2069. }
  2070. ret = 0;
  2071. if (delta < 0)
  2072. return_unused_surplus_pages(h, (unsigned long) -delta);
  2073. out:
  2074. spin_unlock(&hugetlb_lock);
  2075. return ret;
  2076. }
  2077. static void hugetlb_vm_op_open(struct vm_area_struct *vma)
  2078. {
  2079. struct resv_map *resv = vma_resv_map(vma);
  2080. /*
  2081. * This new VMA should share its siblings reservation map if present.
  2082. * The VMA will only ever have a valid reservation map pointer where
  2083. * it is being copied for another still existing VMA. As that VMA
  2084. * has a reference to the reservation map it cannot disappear until
  2085. * after this open call completes. It is therefore safe to take a
  2086. * new reference here without additional locking.
  2087. */
  2088. if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2089. kref_get(&resv->refs);
  2090. }
  2091. static void hugetlb_vm_op_close(struct vm_area_struct *vma)
  2092. {
  2093. struct hstate *h = hstate_vma(vma);
  2094. struct resv_map *resv = vma_resv_map(vma);
  2095. struct hugepage_subpool *spool = subpool_vma(vma);
  2096. unsigned long reserve, start, end;
  2097. if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2098. return;
  2099. start = vma_hugecache_offset(h, vma, vma->vm_start);
  2100. end = vma_hugecache_offset(h, vma, vma->vm_end);
  2101. reserve = (end - start) - region_count(resv, start, end);
  2102. kref_put(&resv->refs, resv_map_release);
  2103. if (reserve) {
  2104. hugetlb_acct_memory(h, -reserve);
  2105. hugepage_subpool_put_pages(spool, reserve);
  2106. }
  2107. }
  2108. /*
  2109. * We cannot handle pagefaults against hugetlb pages at all. They cause
  2110. * handle_mm_fault() to try to instantiate regular-sized pages in the
  2111. * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
  2112. * this far.
  2113. */
  2114. static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  2115. {
  2116. BUG();
  2117. return 0;
  2118. }
  2119. const struct vm_operations_struct hugetlb_vm_ops = {
  2120. .fault = hugetlb_vm_op_fault,
  2121. .open = hugetlb_vm_op_open,
  2122. .close = hugetlb_vm_op_close,
  2123. };
  2124. static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
  2125. int writable)
  2126. {
  2127. pte_t entry;
  2128. if (writable) {
  2129. entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
  2130. vma->vm_page_prot)));
  2131. } else {
  2132. entry = huge_pte_wrprotect(mk_huge_pte(page,
  2133. vma->vm_page_prot));
  2134. }
  2135. entry = pte_mkyoung(entry);
  2136. entry = pte_mkhuge(entry);
  2137. entry = arch_make_huge_pte(entry, vma, page, writable);
  2138. return entry;
  2139. }
  2140. static void set_huge_ptep_writable(struct vm_area_struct *vma,
  2141. unsigned long address, pte_t *ptep)
  2142. {
  2143. pte_t entry;
  2144. entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
  2145. if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
  2146. update_mmu_cache(vma, address, ptep);
  2147. }
  2148. static int is_hugetlb_entry_migration(pte_t pte)
  2149. {
  2150. swp_entry_t swp;
  2151. if (huge_pte_none(pte) || pte_present(pte))
  2152. return 0;
  2153. swp = pte_to_swp_entry(pte);
  2154. if (non_swap_entry(swp) && is_migration_entry(swp))
  2155. return 1;
  2156. else
  2157. return 0;
  2158. }
  2159. static int is_hugetlb_entry_hwpoisoned(pte_t pte)
  2160. {
  2161. swp_entry_t swp;
  2162. if (huge_pte_none(pte) || pte_present(pte))
  2163. return 0;
  2164. swp = pte_to_swp_entry(pte);
  2165. if (non_swap_entry(swp) && is_hwpoison_entry(swp))
  2166. return 1;
  2167. else
  2168. return 0;
  2169. }
  2170. int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
  2171. struct vm_area_struct *vma)
  2172. {
  2173. pte_t *src_pte, *dst_pte, entry;
  2174. struct page *ptepage;
  2175. unsigned long addr;
  2176. int cow;
  2177. struct hstate *h = hstate_vma(vma);
  2178. unsigned long sz = huge_page_size(h);
  2179. unsigned long mmun_start; /* For mmu_notifiers */
  2180. unsigned long mmun_end; /* For mmu_notifiers */
  2181. int ret = 0;
  2182. cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
  2183. mmun_start = vma->vm_start;
  2184. mmun_end = vma->vm_end;
  2185. if (cow)
  2186. mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
  2187. for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
  2188. spinlock_t *src_ptl, *dst_ptl;
  2189. src_pte = huge_pte_offset(src, addr);
  2190. if (!src_pte)
  2191. continue;
  2192. dst_pte = huge_pte_alloc(dst, addr, sz);
  2193. if (!dst_pte) {
  2194. ret = -ENOMEM;
  2195. break;
  2196. }
  2197. /* If the pagetables are shared don't copy or take references */
  2198. if (dst_pte == src_pte)
  2199. continue;
  2200. dst_ptl = huge_pte_lock(h, dst, dst_pte);
  2201. src_ptl = huge_pte_lockptr(h, src, src_pte);
  2202. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  2203. entry = huge_ptep_get(src_pte);
  2204. if (huge_pte_none(entry)) { /* skip none entry */
  2205. ;
  2206. } else if (unlikely(is_hugetlb_entry_migration(entry) ||
  2207. is_hugetlb_entry_hwpoisoned(entry))) {
  2208. swp_entry_t swp_entry = pte_to_swp_entry(entry);
  2209. if (is_write_migration_entry(swp_entry) && cow) {
  2210. /*
  2211. * COW mappings require pages in both
  2212. * parent and child to be set to read.
  2213. */
  2214. make_migration_entry_read(&swp_entry);
  2215. entry = swp_entry_to_pte(swp_entry);
  2216. set_huge_pte_at(src, addr, src_pte, entry);
  2217. }
  2218. set_huge_pte_at(dst, addr, dst_pte, entry);
  2219. } else {
  2220. if (cow)
  2221. huge_ptep_set_wrprotect(src, addr, src_pte);
  2222. entry = huge_ptep_get(src_pte);
  2223. ptepage = pte_page(entry);
  2224. get_page(ptepage);
  2225. page_dup_rmap(ptepage);
  2226. set_huge_pte_at(dst, addr, dst_pte, entry);
  2227. }
  2228. spin_unlock(src_ptl);
  2229. spin_unlock(dst_ptl);
  2230. }
  2231. if (cow)
  2232. mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
  2233. return ret;
  2234. }
  2235. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  2236. unsigned long start, unsigned long end,
  2237. struct page *ref_page)
  2238. {
  2239. int force_flush = 0;
  2240. struct mm_struct *mm = vma->vm_mm;
  2241. unsigned long address;
  2242. pte_t *ptep;
  2243. pte_t pte;
  2244. spinlock_t *ptl;
  2245. struct page *page;
  2246. struct hstate *h = hstate_vma(vma);
  2247. unsigned long sz = huge_page_size(h);
  2248. const unsigned long mmun_start = start; /* For mmu_notifiers */
  2249. const unsigned long mmun_end = end; /* For mmu_notifiers */
  2250. WARN_ON(!is_vm_hugetlb_page(vma));
  2251. BUG_ON(start & ~huge_page_mask(h));
  2252. BUG_ON(end & ~huge_page_mask(h));
  2253. tlb_start_vma(tlb, vma);
  2254. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2255. again:
  2256. for (address = start; address < end; address += sz) {
  2257. ptep = huge_pte_offset(mm, address);
  2258. if (!ptep)
  2259. continue;
  2260. ptl = huge_pte_lock(h, mm, ptep);
  2261. if (huge_pmd_unshare(mm, &address, ptep))
  2262. goto unlock;
  2263. pte = huge_ptep_get(ptep);
  2264. if (huge_pte_none(pte))
  2265. goto unlock;
  2266. /*
  2267. * HWPoisoned hugepage is already unmapped and dropped reference
  2268. */
  2269. if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
  2270. huge_pte_clear(mm, address, ptep);
  2271. goto unlock;
  2272. }
  2273. page = pte_page(pte);
  2274. /*
  2275. * If a reference page is supplied, it is because a specific
  2276. * page is being unmapped, not a range. Ensure the page we
  2277. * are about to unmap is the actual page of interest.
  2278. */
  2279. if (ref_page) {
  2280. if (page != ref_page)
  2281. goto unlock;
  2282. /*
  2283. * Mark the VMA as having unmapped its page so that
  2284. * future faults in this VMA will fail rather than
  2285. * looking like data was lost
  2286. */
  2287. set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
  2288. }
  2289. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2290. tlb_remove_tlb_entry(tlb, ptep, address);
  2291. if (huge_pte_dirty(pte))
  2292. set_page_dirty(page);
  2293. page_remove_rmap(page);
  2294. force_flush = !__tlb_remove_page(tlb, page);
  2295. if (force_flush) {
  2296. spin_unlock(ptl);
  2297. break;
  2298. }
  2299. /* Bail out after unmapping reference page if supplied */
  2300. if (ref_page) {
  2301. spin_unlock(ptl);
  2302. break;
  2303. }
  2304. unlock:
  2305. spin_unlock(ptl);
  2306. }
  2307. /*
  2308. * mmu_gather ran out of room to batch pages, we break out of
  2309. * the PTE lock to avoid doing the potential expensive TLB invalidate
  2310. * and page-free while holding it.
  2311. */
  2312. if (force_flush) {
  2313. force_flush = 0;
  2314. tlb_flush_mmu(tlb);
  2315. if (address < end && !ref_page)
  2316. goto again;
  2317. }
  2318. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2319. tlb_end_vma(tlb, vma);
  2320. }
  2321. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  2322. struct vm_area_struct *vma, unsigned long start,
  2323. unsigned long end, struct page *ref_page)
  2324. {
  2325. __unmap_hugepage_range(tlb, vma, start, end, ref_page);
  2326. /*
  2327. * Clear this flag so that x86's huge_pmd_share page_table_shareable
  2328. * test will fail on a vma being torn down, and not grab a page table
  2329. * on its way out. We're lucky that the flag has such an appropriate
  2330. * name, and can in fact be safely cleared here. We could clear it
  2331. * before the __unmap_hugepage_range above, but all that's necessary
  2332. * is to clear it before releasing the i_mmap_mutex. This works
  2333. * because in the context this is called, the VMA is about to be
  2334. * destroyed and the i_mmap_mutex is held.
  2335. */
  2336. vma->vm_flags &= ~VM_MAYSHARE;
  2337. }
  2338. void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
  2339. unsigned long end, struct page *ref_page)
  2340. {
  2341. struct mm_struct *mm;
  2342. struct mmu_gather tlb;
  2343. mm = vma->vm_mm;
  2344. tlb_gather_mmu(&tlb, mm, start, end);
  2345. __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
  2346. tlb_finish_mmu(&tlb, start, end);
  2347. }
  2348. /*
  2349. * This is called when the original mapper is failing to COW a MAP_PRIVATE
  2350. * mappping it owns the reserve page for. The intention is to unmap the page
  2351. * from other VMAs and let the children be SIGKILLed if they are faulting the
  2352. * same region.
  2353. */
  2354. static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
  2355. struct page *page, unsigned long address)
  2356. {
  2357. struct hstate *h = hstate_vma(vma);
  2358. struct vm_area_struct *iter_vma;
  2359. struct address_space *mapping;
  2360. pgoff_t pgoff;
  2361. /*
  2362. * vm_pgoff is in PAGE_SIZE units, hence the different calculation
  2363. * from page cache lookup which is in HPAGE_SIZE units.
  2364. */
  2365. address = address & huge_page_mask(h);
  2366. pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
  2367. vma->vm_pgoff;
  2368. mapping = file_inode(vma->vm_file)->i_mapping;
  2369. /*
  2370. * Take the mapping lock for the duration of the table walk. As
  2371. * this mapping should be shared between all the VMAs,
  2372. * __unmap_hugepage_range() is called as the lock is already held
  2373. */
  2374. mutex_lock(&mapping->i_mmap_mutex);
  2375. vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
  2376. /* Do not unmap the current VMA */
  2377. if (iter_vma == vma)
  2378. continue;
  2379. /*
  2380. * Unmap the page from other VMAs without their own reserves.
  2381. * They get marked to be SIGKILLed if they fault in these
  2382. * areas. This is because a future no-page fault on this VMA
  2383. * could insert a zeroed page instead of the data existing
  2384. * from the time of fork. This would look like data corruption
  2385. */
  2386. if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
  2387. unmap_hugepage_range(iter_vma, address,
  2388. address + huge_page_size(h), page);
  2389. }
  2390. mutex_unlock(&mapping->i_mmap_mutex);
  2391. }
  2392. /*
  2393. * Hugetlb_cow() should be called with page lock of the original hugepage held.
  2394. * Called with hugetlb_instantiation_mutex held and pte_page locked so we
  2395. * cannot race with other handlers or page migration.
  2396. * Keep the pte_same checks anyway to make transition from the mutex easier.
  2397. */
  2398. static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
  2399. unsigned long address, pte_t *ptep, pte_t pte,
  2400. struct page *pagecache_page, spinlock_t *ptl)
  2401. {
  2402. struct hstate *h = hstate_vma(vma);
  2403. struct page *old_page, *new_page;
  2404. int ret = 0, outside_reserve = 0;
  2405. unsigned long mmun_start; /* For mmu_notifiers */
  2406. unsigned long mmun_end; /* For mmu_notifiers */
  2407. old_page = pte_page(pte);
  2408. retry_avoidcopy:
  2409. /* If no-one else is actually using this page, avoid the copy
  2410. * and just make the page writable */
  2411. if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
  2412. page_move_anon_rmap(old_page, vma, address);
  2413. set_huge_ptep_writable(vma, address, ptep);
  2414. return 0;
  2415. }
  2416. /*
  2417. * If the process that created a MAP_PRIVATE mapping is about to
  2418. * perform a COW due to a shared page count, attempt to satisfy
  2419. * the allocation without using the existing reserves. The pagecache
  2420. * page is used to determine if the reserve at this address was
  2421. * consumed or not. If reserves were used, a partial faulted mapping
  2422. * at the time of fork() could consume its reserves on COW instead
  2423. * of the full address range.
  2424. */
  2425. if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
  2426. old_page != pagecache_page)
  2427. outside_reserve = 1;
  2428. page_cache_get(old_page);
  2429. /*
  2430. * Drop page table lock as buddy allocator may be called. It will
  2431. * be acquired again before returning to the caller, as expected.
  2432. */
  2433. spin_unlock(ptl);
  2434. new_page = alloc_huge_page(vma, address, outside_reserve);
  2435. if (IS_ERR(new_page)) {
  2436. /*
  2437. * If a process owning a MAP_PRIVATE mapping fails to COW,
  2438. * it is due to references held by a child and an insufficient
  2439. * huge page pool. To guarantee the original mappers
  2440. * reliability, unmap the page from child processes. The child
  2441. * may get SIGKILLed if it later faults.
  2442. */
  2443. if (outside_reserve) {
  2444. page_cache_release(old_page);
  2445. BUG_ON(huge_pte_none(pte));
  2446. unmap_ref_private(mm, vma, old_page, address);
  2447. BUG_ON(huge_pte_none(pte));
  2448. spin_lock(ptl);
  2449. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2450. if (likely(ptep &&
  2451. pte_same(huge_ptep_get(ptep), pte)))
  2452. goto retry_avoidcopy;
  2453. /*
  2454. * race occurs while re-acquiring page table
  2455. * lock, and our job is done.
  2456. */
  2457. return 0;
  2458. }
  2459. ret = (PTR_ERR(new_page) == -ENOMEM) ?
  2460. VM_FAULT_OOM : VM_FAULT_SIGBUS;
  2461. goto out_release_old;
  2462. }
  2463. /*
  2464. * When the original hugepage is shared one, it does not have
  2465. * anon_vma prepared.
  2466. */
  2467. if (unlikely(anon_vma_prepare(vma))) {
  2468. ret = VM_FAULT_OOM;
  2469. goto out_release_all;
  2470. }
  2471. copy_user_huge_page(new_page, old_page, address, vma,
  2472. pages_per_huge_page(h));
  2473. __SetPageUptodate(new_page);
  2474. mmun_start = address & huge_page_mask(h);
  2475. mmun_end = mmun_start + huge_page_size(h);
  2476. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2477. /*
  2478. * Retake the page table lock to check for racing updates
  2479. * before the page tables are altered
  2480. */
  2481. spin_lock(ptl);
  2482. ptep = huge_pte_offset(mm, address & huge_page_mask(h));
  2483. if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
  2484. ClearPagePrivate(new_page);
  2485. /* Break COW */
  2486. huge_ptep_clear_flush(vma, address, ptep);
  2487. set_huge_pte_at(mm, address, ptep,
  2488. make_huge_pte(vma, new_page, 1));
  2489. page_remove_rmap(old_page);
  2490. hugepage_add_new_anon_rmap(new_page, vma, address);
  2491. /* Make the old page be freed below */
  2492. new_page = old_page;
  2493. }
  2494. spin_unlock(ptl);
  2495. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2496. out_release_all:
  2497. page_cache_release(new_page);
  2498. out_release_old:
  2499. page_cache_release(old_page);
  2500. spin_lock(ptl); /* Caller expects lock to be held */
  2501. return ret;
  2502. }
  2503. /* Return the pagecache page at a given address within a VMA */
  2504. static struct page *hugetlbfs_pagecache_page(struct hstate *h,
  2505. struct vm_area_struct *vma, unsigned long address)
  2506. {
  2507. struct address_space *mapping;
  2508. pgoff_t idx;
  2509. mapping = vma->vm_file->f_mapping;
  2510. idx = vma_hugecache_offset(h, vma, address);
  2511. return find_lock_page(mapping, idx);
  2512. }
  2513. /*
  2514. * Return whether there is a pagecache page to back given address within VMA.
  2515. * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
  2516. */
  2517. static bool hugetlbfs_pagecache_present(struct hstate *h,
  2518. struct vm_area_struct *vma, unsigned long address)
  2519. {
  2520. struct address_space *mapping;
  2521. pgoff_t idx;
  2522. struct page *page;
  2523. mapping = vma->vm_file->f_mapping;
  2524. idx = vma_hugecache_offset(h, vma, address);
  2525. page = find_get_page(mapping, idx);
  2526. if (page)
  2527. put_page(page);
  2528. return page != NULL;
  2529. }
  2530. static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2531. struct address_space *mapping, pgoff_t idx,
  2532. unsigned long address, pte_t *ptep, unsigned int flags)
  2533. {
  2534. struct hstate *h = hstate_vma(vma);
  2535. int ret = VM_FAULT_SIGBUS;
  2536. int anon_rmap = 0;
  2537. unsigned long size;
  2538. struct page *page;
  2539. pte_t new_pte;
  2540. spinlock_t *ptl;
  2541. /*
  2542. * Currently, we are forced to kill the process in the event the
  2543. * original mapper has unmapped pages from the child due to a failed
  2544. * COW. Warn that such a situation has occurred as it may not be obvious
  2545. */
  2546. if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
  2547. pr_warning("PID %d killed due to inadequate hugepage pool\n",
  2548. current->pid);
  2549. return ret;
  2550. }
  2551. /*
  2552. * Use page lock to guard against racing truncation
  2553. * before we get page_table_lock.
  2554. */
  2555. retry:
  2556. page = find_lock_page(mapping, idx);
  2557. if (!page) {
  2558. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2559. if (idx >= size)
  2560. goto out;
  2561. page = alloc_huge_page(vma, address, 0);
  2562. if (IS_ERR(page)) {
  2563. ret = PTR_ERR(page);
  2564. if (ret == -ENOMEM)
  2565. ret = VM_FAULT_OOM;
  2566. else
  2567. ret = VM_FAULT_SIGBUS;
  2568. goto out;
  2569. }
  2570. clear_huge_page(page, address, pages_per_huge_page(h));
  2571. __SetPageUptodate(page);
  2572. if (vma->vm_flags & VM_MAYSHARE) {
  2573. int err;
  2574. struct inode *inode = mapping->host;
  2575. err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
  2576. if (err) {
  2577. put_page(page);
  2578. if (err == -EEXIST)
  2579. goto retry;
  2580. goto out;
  2581. }
  2582. ClearPagePrivate(page);
  2583. spin_lock(&inode->i_lock);
  2584. inode->i_blocks += blocks_per_huge_page(h);
  2585. spin_unlock(&inode->i_lock);
  2586. } else {
  2587. lock_page(page);
  2588. if (unlikely(anon_vma_prepare(vma))) {
  2589. ret = VM_FAULT_OOM;
  2590. goto backout_unlocked;
  2591. }
  2592. anon_rmap = 1;
  2593. }
  2594. } else {
  2595. /*
  2596. * If memory error occurs between mmap() and fault, some process
  2597. * don't have hwpoisoned swap entry for errored virtual address.
  2598. * So we need to block hugepage fault by PG_hwpoison bit check.
  2599. */
  2600. if (unlikely(PageHWPoison(page))) {
  2601. ret = VM_FAULT_HWPOISON |
  2602. VM_FAULT_SET_HINDEX(hstate_index(h));
  2603. goto backout_unlocked;
  2604. }
  2605. }
  2606. /*
  2607. * If we are going to COW a private mapping later, we examine the
  2608. * pending reservations for this page now. This will ensure that
  2609. * any allocations necessary to record that reservation occur outside
  2610. * the spinlock.
  2611. */
  2612. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
  2613. if (vma_needs_reservation(h, vma, address) < 0) {
  2614. ret = VM_FAULT_OOM;
  2615. goto backout_unlocked;
  2616. }
  2617. ptl = huge_pte_lockptr(h, mm, ptep);
  2618. spin_lock(ptl);
  2619. size = i_size_read(mapping->host) >> huge_page_shift(h);
  2620. if (idx >= size)
  2621. goto backout;
  2622. ret = 0;
  2623. if (!huge_pte_none(huge_ptep_get(ptep)))
  2624. goto backout;
  2625. if (anon_rmap) {
  2626. ClearPagePrivate(page);
  2627. hugepage_add_new_anon_rmap(page, vma, address);
  2628. } else
  2629. page_dup_rmap(page);
  2630. new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
  2631. && (vma->vm_flags & VM_SHARED)));
  2632. set_huge_pte_at(mm, address, ptep, new_pte);
  2633. if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
  2634. /* Optimization, do the COW without a second fault */
  2635. ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
  2636. }
  2637. spin_unlock(ptl);
  2638. unlock_page(page);
  2639. out:
  2640. return ret;
  2641. backout:
  2642. spin_unlock(ptl);
  2643. backout_unlocked:
  2644. unlock_page(page);
  2645. put_page(page);
  2646. goto out;
  2647. }
  2648. #ifdef CONFIG_SMP
  2649. static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  2650. struct vm_area_struct *vma,
  2651. struct address_space *mapping,
  2652. pgoff_t idx, unsigned long address)
  2653. {
  2654. unsigned long key[2];
  2655. u32 hash;
  2656. if (vma->vm_flags & VM_SHARED) {
  2657. key[0] = (unsigned long) mapping;
  2658. key[1] = idx;
  2659. } else {
  2660. key[0] = (unsigned long) mm;
  2661. key[1] = address >> huge_page_shift(h);
  2662. }
  2663. hash = jhash2((u32 *)&key, sizeof(key)/sizeof(u32), 0);
  2664. return hash & (num_fault_mutexes - 1);
  2665. }
  2666. #else
  2667. /*
  2668. * For uniprocesor systems we always use a single mutex, so just
  2669. * return 0 and avoid the hashing overhead.
  2670. */
  2671. static u32 fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
  2672. struct vm_area_struct *vma,
  2673. struct address_space *mapping,
  2674. pgoff_t idx, unsigned long address)
  2675. {
  2676. return 0;
  2677. }
  2678. #endif
  2679. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  2680. unsigned long address, unsigned int flags)
  2681. {
  2682. pte_t *ptep, entry;
  2683. spinlock_t *ptl;
  2684. int ret;
  2685. u32 hash;
  2686. pgoff_t idx;
  2687. struct page *page = NULL;
  2688. struct page *pagecache_page = NULL;
  2689. struct hstate *h = hstate_vma(vma);
  2690. struct address_space *mapping;
  2691. address &= huge_page_mask(h);
  2692. ptep = huge_pte_offset(mm, address);
  2693. if (ptep) {
  2694. entry = huge_ptep_get(ptep);
  2695. if (unlikely(is_hugetlb_entry_migration(entry))) {
  2696. migration_entry_wait_huge(vma, mm, ptep);
  2697. return 0;
  2698. } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
  2699. return VM_FAULT_HWPOISON_LARGE |
  2700. VM_FAULT_SET_HINDEX(hstate_index(h));
  2701. }
  2702. ptep = huge_pte_alloc(mm, address, huge_page_size(h));
  2703. if (!ptep)
  2704. return VM_FAULT_OOM;
  2705. mapping = vma->vm_file->f_mapping;
  2706. idx = vma_hugecache_offset(h, vma, address);
  2707. /*
  2708. * Serialize hugepage allocation and instantiation, so that we don't
  2709. * get spurious allocation failures if two CPUs race to instantiate
  2710. * the same page in the page cache.
  2711. */
  2712. hash = fault_mutex_hash(h, mm, vma, mapping, idx, address);
  2713. mutex_lock(&htlb_fault_mutex_table[hash]);
  2714. entry = huge_ptep_get(ptep);
  2715. if (huge_pte_none(entry)) {
  2716. ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
  2717. goto out_mutex;
  2718. }
  2719. ret = 0;
  2720. /*
  2721. * If we are going to COW the mapping later, we examine the pending
  2722. * reservations for this page now. This will ensure that any
  2723. * allocations necessary to record that reservation occur outside the
  2724. * spinlock. For private mappings, we also lookup the pagecache
  2725. * page now as it is used to determine if a reservation has been
  2726. * consumed.
  2727. */
  2728. if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
  2729. if (vma_needs_reservation(h, vma, address) < 0) {
  2730. ret = VM_FAULT_OOM;
  2731. goto out_mutex;
  2732. }
  2733. if (!(vma->vm_flags & VM_MAYSHARE))
  2734. pagecache_page = hugetlbfs_pagecache_page(h,
  2735. vma, address);
  2736. }
  2737. /*
  2738. * hugetlb_cow() requires page locks of pte_page(entry) and
  2739. * pagecache_page, so here we need take the former one
  2740. * when page != pagecache_page or !pagecache_page.
  2741. * Note that locking order is always pagecache_page -> page,
  2742. * so no worry about deadlock.
  2743. */
  2744. page = pte_page(entry);
  2745. get_page(page);
  2746. if (page != pagecache_page)
  2747. lock_page(page);
  2748. ptl = huge_pte_lockptr(h, mm, ptep);
  2749. spin_lock(ptl);
  2750. /* Check for a racing update before calling hugetlb_cow */
  2751. if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
  2752. goto out_ptl;
  2753. if (flags & FAULT_FLAG_WRITE) {
  2754. if (!huge_pte_write(entry)) {
  2755. ret = hugetlb_cow(mm, vma, address, ptep, entry,
  2756. pagecache_page, ptl);
  2757. goto out_ptl;
  2758. }
  2759. entry = huge_pte_mkdirty(entry);
  2760. }
  2761. entry = pte_mkyoung(entry);
  2762. if (huge_ptep_set_access_flags(vma, address, ptep, entry,
  2763. flags & FAULT_FLAG_WRITE))
  2764. update_mmu_cache(vma, address, ptep);
  2765. out_ptl:
  2766. spin_unlock(ptl);
  2767. if (pagecache_page) {
  2768. unlock_page(pagecache_page);
  2769. put_page(pagecache_page);
  2770. }
  2771. if (page != pagecache_page)
  2772. unlock_page(page);
  2773. put_page(page);
  2774. out_mutex:
  2775. mutex_unlock(&htlb_fault_mutex_table[hash]);
  2776. return ret;
  2777. }
  2778. long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
  2779. struct page **pages, struct vm_area_struct **vmas,
  2780. unsigned long *position, unsigned long *nr_pages,
  2781. long i, unsigned int flags)
  2782. {
  2783. unsigned long pfn_offset;
  2784. unsigned long vaddr = *position;
  2785. unsigned long remainder = *nr_pages;
  2786. struct hstate *h = hstate_vma(vma);
  2787. while (vaddr < vma->vm_end && remainder) {
  2788. pte_t *pte;
  2789. spinlock_t *ptl = NULL;
  2790. int absent;
  2791. struct page *page;
  2792. /*
  2793. * Some archs (sparc64, sh*) have multiple pte_ts to
  2794. * each hugepage. We have to make sure we get the
  2795. * first, for the page indexing below to work.
  2796. *
  2797. * Note that page table lock is not held when pte is null.
  2798. */
  2799. pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
  2800. if (pte)
  2801. ptl = huge_pte_lock(h, mm, pte);
  2802. absent = !pte || huge_pte_none(huge_ptep_get(pte));
  2803. /*
  2804. * When coredumping, it suits get_dump_page if we just return
  2805. * an error where there's an empty slot with no huge pagecache
  2806. * to back it. This way, we avoid allocating a hugepage, and
  2807. * the sparse dumpfile avoids allocating disk blocks, but its
  2808. * huge holes still show up with zeroes where they need to be.
  2809. */
  2810. if (absent && (flags & FOLL_DUMP) &&
  2811. !hugetlbfs_pagecache_present(h, vma, vaddr)) {
  2812. if (pte)
  2813. spin_unlock(ptl);
  2814. remainder = 0;
  2815. break;
  2816. }
  2817. /*
  2818. * We need call hugetlb_fault for both hugepages under migration
  2819. * (in which case hugetlb_fault waits for the migration,) and
  2820. * hwpoisoned hugepages (in which case we need to prevent the
  2821. * caller from accessing to them.) In order to do this, we use
  2822. * here is_swap_pte instead of is_hugetlb_entry_migration and
  2823. * is_hugetlb_entry_hwpoisoned. This is because it simply covers
  2824. * both cases, and because we can't follow correct pages
  2825. * directly from any kind of swap entries.
  2826. */
  2827. if (absent || is_swap_pte(huge_ptep_get(pte)) ||
  2828. ((flags & FOLL_WRITE) &&
  2829. !huge_pte_write(huge_ptep_get(pte)))) {
  2830. int ret;
  2831. if (pte)
  2832. spin_unlock(ptl);
  2833. ret = hugetlb_fault(mm, vma, vaddr,
  2834. (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
  2835. if (!(ret & VM_FAULT_ERROR))
  2836. continue;
  2837. remainder = 0;
  2838. break;
  2839. }
  2840. pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
  2841. page = pte_page(huge_ptep_get(pte));
  2842. same_page:
  2843. if (pages) {
  2844. pages[i] = mem_map_offset(page, pfn_offset);
  2845. get_page_foll(pages[i]);
  2846. }
  2847. if (vmas)
  2848. vmas[i] = vma;
  2849. vaddr += PAGE_SIZE;
  2850. ++pfn_offset;
  2851. --remainder;
  2852. ++i;
  2853. if (vaddr < vma->vm_end && remainder &&
  2854. pfn_offset < pages_per_huge_page(h)) {
  2855. /*
  2856. * We use pfn_offset to avoid touching the pageframes
  2857. * of this compound page.
  2858. */
  2859. goto same_page;
  2860. }
  2861. spin_unlock(ptl);
  2862. }
  2863. *nr_pages = remainder;
  2864. *position = vaddr;
  2865. return i ? i : -EFAULT;
  2866. }
  2867. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  2868. unsigned long address, unsigned long end, pgprot_t newprot)
  2869. {
  2870. struct mm_struct *mm = vma->vm_mm;
  2871. unsigned long start = address;
  2872. pte_t *ptep;
  2873. pte_t pte;
  2874. struct hstate *h = hstate_vma(vma);
  2875. unsigned long pages = 0;
  2876. BUG_ON(address >= end);
  2877. flush_cache_range(vma, address, end);
  2878. mmu_notifier_invalidate_range_start(mm, start, end);
  2879. mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
  2880. for (; address < end; address += huge_page_size(h)) {
  2881. spinlock_t *ptl;
  2882. ptep = huge_pte_offset(mm, address);
  2883. if (!ptep)
  2884. continue;
  2885. ptl = huge_pte_lock(h, mm, ptep);
  2886. if (huge_pmd_unshare(mm, &address, ptep)) {
  2887. pages++;
  2888. spin_unlock(ptl);
  2889. continue;
  2890. }
  2891. if (!huge_pte_none(huge_ptep_get(ptep))) {
  2892. pte = huge_ptep_get_and_clear(mm, address, ptep);
  2893. pte = pte_mkhuge(huge_pte_modify(pte, newprot));
  2894. pte = arch_make_huge_pte(pte, vma, NULL, 0);
  2895. set_huge_pte_at(mm, address, ptep, pte);
  2896. pages++;
  2897. }
  2898. spin_unlock(ptl);
  2899. }
  2900. /*
  2901. * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
  2902. * may have cleared our pud entry and done put_page on the page table:
  2903. * once we release i_mmap_mutex, another task can do the final put_page
  2904. * and that page table be reused and filled with junk.
  2905. */
  2906. flush_tlb_range(vma, start, end);
  2907. mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
  2908. mmu_notifier_invalidate_range_end(mm, start, end);
  2909. return pages << h->order;
  2910. }
  2911. int hugetlb_reserve_pages(struct inode *inode,
  2912. long from, long to,
  2913. struct vm_area_struct *vma,
  2914. vm_flags_t vm_flags)
  2915. {
  2916. long ret, chg;
  2917. struct hstate *h = hstate_inode(inode);
  2918. struct hugepage_subpool *spool = subpool_inode(inode);
  2919. struct resv_map *resv_map;
  2920. /*
  2921. * Only apply hugepage reservation if asked. At fault time, an
  2922. * attempt will be made for VM_NORESERVE to allocate a page
  2923. * without using reserves
  2924. */
  2925. if (vm_flags & VM_NORESERVE)
  2926. return 0;
  2927. /*
  2928. * Shared mappings base their reservation on the number of pages that
  2929. * are already allocated on behalf of the file. Private mappings need
  2930. * to reserve the full area even if read-only as mprotect() may be
  2931. * called to make the mapping read-write. Assume !vma is a shm mapping
  2932. */
  2933. if (!vma || vma->vm_flags & VM_MAYSHARE) {
  2934. resv_map = inode_resv_map(inode);
  2935. chg = region_chg(resv_map, from, to);
  2936. } else {
  2937. resv_map = resv_map_alloc();
  2938. if (!resv_map)
  2939. return -ENOMEM;
  2940. chg = to - from;
  2941. set_vma_resv_map(vma, resv_map);
  2942. set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
  2943. }
  2944. if (chg < 0) {
  2945. ret = chg;
  2946. goto out_err;
  2947. }
  2948. /* There must be enough pages in the subpool for the mapping */
  2949. if (hugepage_subpool_get_pages(spool, chg)) {
  2950. ret = -ENOSPC;
  2951. goto out_err;
  2952. }
  2953. /*
  2954. * Check enough hugepages are available for the reservation.
  2955. * Hand the pages back to the subpool if there are not
  2956. */
  2957. ret = hugetlb_acct_memory(h, chg);
  2958. if (ret < 0) {
  2959. hugepage_subpool_put_pages(spool, chg);
  2960. goto out_err;
  2961. }
  2962. /*
  2963. * Account for the reservations made. Shared mappings record regions
  2964. * that have reservations as they are shared by multiple VMAs.
  2965. * When the last VMA disappears, the region map says how much
  2966. * the reservation was and the page cache tells how much of
  2967. * the reservation was consumed. Private mappings are per-VMA and
  2968. * only the consumed reservations are tracked. When the VMA
  2969. * disappears, the original reservation is the VMA size and the
  2970. * consumed reservations are stored in the map. Hence, nothing
  2971. * else has to be done for private mappings here
  2972. */
  2973. if (!vma || vma->vm_flags & VM_MAYSHARE)
  2974. region_add(resv_map, from, to);
  2975. return 0;
  2976. out_err:
  2977. if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
  2978. kref_put(&resv_map->refs, resv_map_release);
  2979. return ret;
  2980. }
  2981. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
  2982. {
  2983. struct hstate *h = hstate_inode(inode);
  2984. struct resv_map *resv_map = inode_resv_map(inode);
  2985. long chg = 0;
  2986. struct hugepage_subpool *spool = subpool_inode(inode);
  2987. if (resv_map)
  2988. chg = region_truncate(resv_map, offset);
  2989. spin_lock(&inode->i_lock);
  2990. inode->i_blocks -= (blocks_per_huge_page(h) * freed);
  2991. spin_unlock(&inode->i_lock);
  2992. hugepage_subpool_put_pages(spool, (chg - freed));
  2993. hugetlb_acct_memory(h, -(chg - freed));
  2994. }
  2995. #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
  2996. static unsigned long page_table_shareable(struct vm_area_struct *svma,
  2997. struct vm_area_struct *vma,
  2998. unsigned long addr, pgoff_t idx)
  2999. {
  3000. unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
  3001. svma->vm_start;
  3002. unsigned long sbase = saddr & PUD_MASK;
  3003. unsigned long s_end = sbase + PUD_SIZE;
  3004. /* Allow segments to share if only one is marked locked */
  3005. unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
  3006. unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
  3007. /*
  3008. * match the virtual addresses, permission and the alignment of the
  3009. * page table page.
  3010. */
  3011. if (pmd_index(addr) != pmd_index(saddr) ||
  3012. vm_flags != svm_flags ||
  3013. sbase < svma->vm_start || svma->vm_end < s_end)
  3014. return 0;
  3015. return saddr;
  3016. }
  3017. static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
  3018. {
  3019. unsigned long base = addr & PUD_MASK;
  3020. unsigned long end = base + PUD_SIZE;
  3021. /*
  3022. * check on proper vm_flags and page table alignment
  3023. */
  3024. if (vma->vm_flags & VM_MAYSHARE &&
  3025. vma->vm_start <= base && end <= vma->vm_end)
  3026. return 1;
  3027. return 0;
  3028. }
  3029. /*
  3030. * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
  3031. * and returns the corresponding pte. While this is not necessary for the
  3032. * !shared pmd case because we can allocate the pmd later as well, it makes the
  3033. * code much cleaner. pmd allocation is essential for the shared case because
  3034. * pud has to be populated inside the same i_mmap_mutex section - otherwise
  3035. * racing tasks could either miss the sharing (see huge_pte_offset) or select a
  3036. * bad pmd for sharing.
  3037. */
  3038. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  3039. {
  3040. struct vm_area_struct *vma = find_vma(mm, addr);
  3041. struct address_space *mapping = vma->vm_file->f_mapping;
  3042. pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
  3043. vma->vm_pgoff;
  3044. struct vm_area_struct *svma;
  3045. unsigned long saddr;
  3046. pte_t *spte = NULL;
  3047. pte_t *pte;
  3048. spinlock_t *ptl;
  3049. if (!vma_shareable(vma, addr))
  3050. return (pte_t *)pmd_alloc(mm, pud, addr);
  3051. mutex_lock(&mapping->i_mmap_mutex);
  3052. vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
  3053. if (svma == vma)
  3054. continue;
  3055. saddr = page_table_shareable(svma, vma, addr, idx);
  3056. if (saddr) {
  3057. spte = huge_pte_offset(svma->vm_mm, saddr);
  3058. if (spte) {
  3059. get_page(virt_to_page(spte));
  3060. break;
  3061. }
  3062. }
  3063. }
  3064. if (!spte)
  3065. goto out;
  3066. ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
  3067. spin_lock(ptl);
  3068. if (pud_none(*pud))
  3069. pud_populate(mm, pud,
  3070. (pmd_t *)((unsigned long)spte & PAGE_MASK));
  3071. else
  3072. put_page(virt_to_page(spte));
  3073. spin_unlock(ptl);
  3074. out:
  3075. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  3076. mutex_unlock(&mapping->i_mmap_mutex);
  3077. return pte;
  3078. }
  3079. /*
  3080. * unmap huge page backed by shared pte.
  3081. *
  3082. * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
  3083. * indicated by page_count > 1, unmap is achieved by clearing pud and
  3084. * decrementing the ref count. If count == 1, the pte page is not shared.
  3085. *
  3086. * called with page table lock held.
  3087. *
  3088. * returns: 1 successfully unmapped a shared pte page
  3089. * 0 the underlying pte page is not shared, or it is the last user
  3090. */
  3091. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
  3092. {
  3093. pgd_t *pgd = pgd_offset(mm, *addr);
  3094. pud_t *pud = pud_offset(pgd, *addr);
  3095. BUG_ON(page_count(virt_to_page(ptep)) == 0);
  3096. if (page_count(virt_to_page(ptep)) == 1)
  3097. return 0;
  3098. pud_clear(pud);
  3099. put_page(virt_to_page(ptep));
  3100. *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
  3101. return 1;
  3102. }
  3103. #define want_pmd_share() (1)
  3104. #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  3105. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
  3106. {
  3107. return NULL;
  3108. }
  3109. #define want_pmd_share() (0)
  3110. #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
  3111. #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
  3112. pte_t *huge_pte_alloc(struct mm_struct *mm,
  3113. unsigned long addr, unsigned long sz)
  3114. {
  3115. pgd_t *pgd;
  3116. pud_t *pud;
  3117. pte_t *pte = NULL;
  3118. pgd = pgd_offset(mm, addr);
  3119. pud = pud_alloc(mm, pgd, addr);
  3120. if (pud) {
  3121. if (sz == PUD_SIZE) {
  3122. pte = (pte_t *)pud;
  3123. } else {
  3124. BUG_ON(sz != PMD_SIZE);
  3125. if (want_pmd_share() && pud_none(*pud))
  3126. pte = huge_pmd_share(mm, addr, pud);
  3127. else
  3128. pte = (pte_t *)pmd_alloc(mm, pud, addr);
  3129. }
  3130. }
  3131. BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
  3132. return pte;
  3133. }
  3134. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  3135. {
  3136. pgd_t *pgd;
  3137. pud_t *pud;
  3138. pmd_t *pmd = NULL;
  3139. pgd = pgd_offset(mm, addr);
  3140. if (pgd_present(*pgd)) {
  3141. pud = pud_offset(pgd, addr);
  3142. if (pud_present(*pud)) {
  3143. if (pud_huge(*pud))
  3144. return (pte_t *)pud;
  3145. pmd = pmd_offset(pud, addr);
  3146. }
  3147. }
  3148. return (pte_t *) pmd;
  3149. }
  3150. struct page *
  3151. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  3152. pmd_t *pmd, int write)
  3153. {
  3154. struct page *page;
  3155. page = pte_page(*(pte_t *)pmd);
  3156. if (page)
  3157. page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
  3158. return page;
  3159. }
  3160. struct page *
  3161. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  3162. pud_t *pud, int write)
  3163. {
  3164. struct page *page;
  3165. page = pte_page(*(pte_t *)pud);
  3166. if (page)
  3167. page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
  3168. return page;
  3169. }
  3170. #else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  3171. /* Can be overriden by architectures */
  3172. struct page * __weak
  3173. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  3174. pud_t *pud, int write)
  3175. {
  3176. BUG();
  3177. return NULL;
  3178. }
  3179. #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
  3180. #ifdef CONFIG_MEMORY_FAILURE
  3181. /* Should be called in hugetlb_lock */
  3182. static int is_hugepage_on_freelist(struct page *hpage)
  3183. {
  3184. struct page *page;
  3185. struct page *tmp;
  3186. struct hstate *h = page_hstate(hpage);
  3187. int nid = page_to_nid(hpage);
  3188. list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
  3189. if (page == hpage)
  3190. return 1;
  3191. return 0;
  3192. }
  3193. /*
  3194. * This function is called from memory failure code.
  3195. * Assume the caller holds page lock of the head page.
  3196. */
  3197. int dequeue_hwpoisoned_huge_page(struct page *hpage)
  3198. {
  3199. struct hstate *h = page_hstate(hpage);
  3200. int nid = page_to_nid(hpage);
  3201. int ret = -EBUSY;
  3202. spin_lock(&hugetlb_lock);
  3203. if (is_hugepage_on_freelist(hpage)) {
  3204. /*
  3205. * Hwpoisoned hugepage isn't linked to activelist or freelist,
  3206. * but dangling hpage->lru can trigger list-debug warnings
  3207. * (this happens when we call unpoison_memory() on it),
  3208. * so let it point to itself with list_del_init().
  3209. */
  3210. list_del_init(&hpage->lru);
  3211. set_page_refcounted(hpage);
  3212. h->free_huge_pages--;
  3213. h->free_huge_pages_node[nid]--;
  3214. ret = 0;
  3215. }
  3216. spin_unlock(&hugetlb_lock);
  3217. return ret;
  3218. }
  3219. #endif
  3220. bool isolate_huge_page(struct page *page, struct list_head *list)
  3221. {
  3222. VM_BUG_ON_PAGE(!PageHead(page), page);
  3223. if (!get_page_unless_zero(page))
  3224. return false;
  3225. spin_lock(&hugetlb_lock);
  3226. list_move_tail(&page->lru, list);
  3227. spin_unlock(&hugetlb_lock);
  3228. return true;
  3229. }
  3230. void putback_active_hugepage(struct page *page)
  3231. {
  3232. VM_BUG_ON_PAGE(!PageHead(page), page);
  3233. spin_lock(&hugetlb_lock);
  3234. list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
  3235. spin_unlock(&hugetlb_lock);
  3236. put_page(page);
  3237. }
  3238. bool is_hugepage_active(struct page *page)
  3239. {
  3240. VM_BUG_ON_PAGE(!PageHuge(page), page);
  3241. /*
  3242. * This function can be called for a tail page because the caller,
  3243. * scan_movable_pages, scans through a given pfn-range which typically
  3244. * covers one memory block. In systems using gigantic hugepage (1GB
  3245. * for x86_64,) a hugepage is larger than a memory block, and we don't
  3246. * support migrating such large hugepages for now, so return false
  3247. * when called for tail pages.
  3248. */
  3249. if (PageTail(page))
  3250. return false;
  3251. /*
  3252. * Refcount of a hwpoisoned hugepages is 1, but they are not active,
  3253. * so we should return false for them.
  3254. */
  3255. if (unlikely(PageHWPoison(page)))
  3256. return false;
  3257. return page_count(page) > 0;
  3258. }