huge_memory.c 78 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982
  1. /*
  2. * Copyright (C) 2009 Red Hat, Inc.
  3. *
  4. * This work is licensed under the terms of the GNU GPL, version 2. See
  5. * the COPYING file in the top-level directory.
  6. */
  7. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  8. #include <linux/mm.h>
  9. #include <linux/sched.h>
  10. #include <linux/highmem.h>
  11. #include <linux/hugetlb.h>
  12. #include <linux/mmu_notifier.h>
  13. #include <linux/rmap.h>
  14. #include <linux/swap.h>
  15. #include <linux/shrinker.h>
  16. #include <linux/mm_inline.h>
  17. #include <linux/kthread.h>
  18. #include <linux/khugepaged.h>
  19. #include <linux/freezer.h>
  20. #include <linux/mman.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/migrate.h>
  23. #include <linux/hashtable.h>
  24. #include <asm/tlb.h>
  25. #include <asm/pgalloc.h>
  26. #include "internal.h"
  27. /*
  28. * By default transparent hugepage support is disabled in order that avoid
  29. * to risk increase the memory footprint of applications without a guaranteed
  30. * benefit. When transparent hugepage support is enabled, is for all mappings,
  31. * and khugepaged scans all mappings.
  32. * Defrag is invoked by khugepaged hugepage allocations and by page faults
  33. * for all hugepage allocations.
  34. */
  35. unsigned long transparent_hugepage_flags __read_mostly =
  36. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
  37. (1<<TRANSPARENT_HUGEPAGE_FLAG)|
  38. #endif
  39. #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
  40. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
  41. #endif
  42. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
  43. (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
  44. (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  45. /* default scan 8*512 pte (or vmas) every 30 second */
  46. static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
  47. static unsigned int khugepaged_pages_collapsed;
  48. static unsigned int khugepaged_full_scans;
  49. static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
  50. /* during fragmentation poll the hugepage allocator once every minute */
  51. static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
  52. static struct task_struct *khugepaged_thread __read_mostly;
  53. static DEFINE_MUTEX(khugepaged_mutex);
  54. static DEFINE_SPINLOCK(khugepaged_mm_lock);
  55. static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
  56. /*
  57. * default collapse hugepages if there is at least one pte mapped like
  58. * it would have happened if the vma was large enough during page
  59. * fault.
  60. */
  61. static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
  62. static int khugepaged(void *none);
  63. static int khugepaged_slab_init(void);
  64. #define MM_SLOTS_HASH_BITS 10
  65. static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
  66. static struct kmem_cache *mm_slot_cache __read_mostly;
  67. /**
  68. * struct mm_slot - hash lookup from mm to mm_slot
  69. * @hash: hash collision list
  70. * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
  71. * @mm: the mm that this information is valid for
  72. */
  73. struct mm_slot {
  74. struct hlist_node hash;
  75. struct list_head mm_node;
  76. struct mm_struct *mm;
  77. };
  78. /**
  79. * struct khugepaged_scan - cursor for scanning
  80. * @mm_head: the head of the mm list to scan
  81. * @mm_slot: the current mm_slot we are scanning
  82. * @address: the next address inside that to be scanned
  83. *
  84. * There is only the one khugepaged_scan instance of this cursor structure.
  85. */
  86. struct khugepaged_scan {
  87. struct list_head mm_head;
  88. struct mm_slot *mm_slot;
  89. unsigned long address;
  90. };
  91. static struct khugepaged_scan khugepaged_scan = {
  92. .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
  93. };
  94. static int set_recommended_min_free_kbytes(void)
  95. {
  96. struct zone *zone;
  97. int nr_zones = 0;
  98. unsigned long recommended_min;
  99. if (!khugepaged_enabled())
  100. return 0;
  101. for_each_populated_zone(zone)
  102. nr_zones++;
  103. /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
  104. recommended_min = pageblock_nr_pages * nr_zones * 2;
  105. /*
  106. * Make sure that on average at least two pageblocks are almost free
  107. * of another type, one for a migratetype to fall back to and a
  108. * second to avoid subsequent fallbacks of other types There are 3
  109. * MIGRATE_TYPES we care about.
  110. */
  111. recommended_min += pageblock_nr_pages * nr_zones *
  112. MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
  113. /* don't ever allow to reserve more than 5% of the lowmem */
  114. recommended_min = min(recommended_min,
  115. (unsigned long) nr_free_buffer_pages() / 20);
  116. recommended_min <<= (PAGE_SHIFT-10);
  117. if (recommended_min > min_free_kbytes) {
  118. if (user_min_free_kbytes >= 0)
  119. pr_info("raising min_free_kbytes from %d to %lu "
  120. "to help transparent hugepage allocations\n",
  121. min_free_kbytes, recommended_min);
  122. min_free_kbytes = recommended_min;
  123. }
  124. setup_per_zone_wmarks();
  125. return 0;
  126. }
  127. late_initcall(set_recommended_min_free_kbytes);
  128. static int start_khugepaged(void)
  129. {
  130. int err = 0;
  131. if (khugepaged_enabled()) {
  132. if (!khugepaged_thread)
  133. khugepaged_thread = kthread_run(khugepaged, NULL,
  134. "khugepaged");
  135. if (unlikely(IS_ERR(khugepaged_thread))) {
  136. pr_err("khugepaged: kthread_run(khugepaged) failed\n");
  137. err = PTR_ERR(khugepaged_thread);
  138. khugepaged_thread = NULL;
  139. }
  140. if (!list_empty(&khugepaged_scan.mm_head))
  141. wake_up_interruptible(&khugepaged_wait);
  142. set_recommended_min_free_kbytes();
  143. } else if (khugepaged_thread) {
  144. kthread_stop(khugepaged_thread);
  145. khugepaged_thread = NULL;
  146. }
  147. return err;
  148. }
  149. static atomic_t huge_zero_refcount;
  150. struct page *huge_zero_page __read_mostly;
  151. static inline bool is_huge_zero_pmd(pmd_t pmd)
  152. {
  153. return is_huge_zero_page(pmd_page(pmd));
  154. }
  155. static struct page *get_huge_zero_page(void)
  156. {
  157. struct page *zero_page;
  158. retry:
  159. if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
  160. return ACCESS_ONCE(huge_zero_page);
  161. zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
  162. HPAGE_PMD_ORDER);
  163. if (!zero_page) {
  164. count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
  165. return NULL;
  166. }
  167. count_vm_event(THP_ZERO_PAGE_ALLOC);
  168. preempt_disable();
  169. if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
  170. preempt_enable();
  171. __free_pages(zero_page, compound_order(zero_page));
  172. goto retry;
  173. }
  174. /* We take additional reference here. It will be put back by shrinker */
  175. atomic_set(&huge_zero_refcount, 2);
  176. preempt_enable();
  177. return ACCESS_ONCE(huge_zero_page);
  178. }
  179. static void put_huge_zero_page(void)
  180. {
  181. /*
  182. * Counter should never go to zero here. Only shrinker can put
  183. * last reference.
  184. */
  185. BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
  186. }
  187. static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
  188. struct shrink_control *sc)
  189. {
  190. /* we can free zero page only if last reference remains */
  191. return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
  192. }
  193. static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
  194. struct shrink_control *sc)
  195. {
  196. if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
  197. struct page *zero_page = xchg(&huge_zero_page, NULL);
  198. BUG_ON(zero_page == NULL);
  199. __free_pages(zero_page, compound_order(zero_page));
  200. return HPAGE_PMD_NR;
  201. }
  202. return 0;
  203. }
  204. static struct shrinker huge_zero_page_shrinker = {
  205. .count_objects = shrink_huge_zero_page_count,
  206. .scan_objects = shrink_huge_zero_page_scan,
  207. .seeks = DEFAULT_SEEKS,
  208. };
  209. #ifdef CONFIG_SYSFS
  210. static ssize_t double_flag_show(struct kobject *kobj,
  211. struct kobj_attribute *attr, char *buf,
  212. enum transparent_hugepage_flag enabled,
  213. enum transparent_hugepage_flag req_madv)
  214. {
  215. if (test_bit(enabled, &transparent_hugepage_flags)) {
  216. VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
  217. return sprintf(buf, "[always] madvise never\n");
  218. } else if (test_bit(req_madv, &transparent_hugepage_flags))
  219. return sprintf(buf, "always [madvise] never\n");
  220. else
  221. return sprintf(buf, "always madvise [never]\n");
  222. }
  223. static ssize_t double_flag_store(struct kobject *kobj,
  224. struct kobj_attribute *attr,
  225. const char *buf, size_t count,
  226. enum transparent_hugepage_flag enabled,
  227. enum transparent_hugepage_flag req_madv)
  228. {
  229. if (!memcmp("always", buf,
  230. min(sizeof("always")-1, count))) {
  231. set_bit(enabled, &transparent_hugepage_flags);
  232. clear_bit(req_madv, &transparent_hugepage_flags);
  233. } else if (!memcmp("madvise", buf,
  234. min(sizeof("madvise")-1, count))) {
  235. clear_bit(enabled, &transparent_hugepage_flags);
  236. set_bit(req_madv, &transparent_hugepage_flags);
  237. } else if (!memcmp("never", buf,
  238. min(sizeof("never")-1, count))) {
  239. clear_bit(enabled, &transparent_hugepage_flags);
  240. clear_bit(req_madv, &transparent_hugepage_flags);
  241. } else
  242. return -EINVAL;
  243. return count;
  244. }
  245. static ssize_t enabled_show(struct kobject *kobj,
  246. struct kobj_attribute *attr, char *buf)
  247. {
  248. return double_flag_show(kobj, attr, buf,
  249. TRANSPARENT_HUGEPAGE_FLAG,
  250. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  251. }
  252. static ssize_t enabled_store(struct kobject *kobj,
  253. struct kobj_attribute *attr,
  254. const char *buf, size_t count)
  255. {
  256. ssize_t ret;
  257. ret = double_flag_store(kobj, attr, buf, count,
  258. TRANSPARENT_HUGEPAGE_FLAG,
  259. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
  260. if (ret > 0) {
  261. int err;
  262. mutex_lock(&khugepaged_mutex);
  263. err = start_khugepaged();
  264. mutex_unlock(&khugepaged_mutex);
  265. if (err)
  266. ret = err;
  267. }
  268. return ret;
  269. }
  270. static struct kobj_attribute enabled_attr =
  271. __ATTR(enabled, 0644, enabled_show, enabled_store);
  272. static ssize_t single_flag_show(struct kobject *kobj,
  273. struct kobj_attribute *attr, char *buf,
  274. enum transparent_hugepage_flag flag)
  275. {
  276. return sprintf(buf, "%d\n",
  277. !!test_bit(flag, &transparent_hugepage_flags));
  278. }
  279. static ssize_t single_flag_store(struct kobject *kobj,
  280. struct kobj_attribute *attr,
  281. const char *buf, size_t count,
  282. enum transparent_hugepage_flag flag)
  283. {
  284. unsigned long value;
  285. int ret;
  286. ret = kstrtoul(buf, 10, &value);
  287. if (ret < 0)
  288. return ret;
  289. if (value > 1)
  290. return -EINVAL;
  291. if (value)
  292. set_bit(flag, &transparent_hugepage_flags);
  293. else
  294. clear_bit(flag, &transparent_hugepage_flags);
  295. return count;
  296. }
  297. /*
  298. * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
  299. * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
  300. * memory just to allocate one more hugepage.
  301. */
  302. static ssize_t defrag_show(struct kobject *kobj,
  303. struct kobj_attribute *attr, char *buf)
  304. {
  305. return double_flag_show(kobj, attr, buf,
  306. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  307. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  308. }
  309. static ssize_t defrag_store(struct kobject *kobj,
  310. struct kobj_attribute *attr,
  311. const char *buf, size_t count)
  312. {
  313. return double_flag_store(kobj, attr, buf, count,
  314. TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
  315. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
  316. }
  317. static struct kobj_attribute defrag_attr =
  318. __ATTR(defrag, 0644, defrag_show, defrag_store);
  319. static ssize_t use_zero_page_show(struct kobject *kobj,
  320. struct kobj_attribute *attr, char *buf)
  321. {
  322. return single_flag_show(kobj, attr, buf,
  323. TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  324. }
  325. static ssize_t use_zero_page_store(struct kobject *kobj,
  326. struct kobj_attribute *attr, const char *buf, size_t count)
  327. {
  328. return single_flag_store(kobj, attr, buf, count,
  329. TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
  330. }
  331. static struct kobj_attribute use_zero_page_attr =
  332. __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
  333. #ifdef CONFIG_DEBUG_VM
  334. static ssize_t debug_cow_show(struct kobject *kobj,
  335. struct kobj_attribute *attr, char *buf)
  336. {
  337. return single_flag_show(kobj, attr, buf,
  338. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  339. }
  340. static ssize_t debug_cow_store(struct kobject *kobj,
  341. struct kobj_attribute *attr,
  342. const char *buf, size_t count)
  343. {
  344. return single_flag_store(kobj, attr, buf, count,
  345. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
  346. }
  347. static struct kobj_attribute debug_cow_attr =
  348. __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
  349. #endif /* CONFIG_DEBUG_VM */
  350. static struct attribute *hugepage_attr[] = {
  351. &enabled_attr.attr,
  352. &defrag_attr.attr,
  353. &use_zero_page_attr.attr,
  354. #ifdef CONFIG_DEBUG_VM
  355. &debug_cow_attr.attr,
  356. #endif
  357. NULL,
  358. };
  359. static struct attribute_group hugepage_attr_group = {
  360. .attrs = hugepage_attr,
  361. };
  362. static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
  363. struct kobj_attribute *attr,
  364. char *buf)
  365. {
  366. return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
  367. }
  368. static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
  369. struct kobj_attribute *attr,
  370. const char *buf, size_t count)
  371. {
  372. unsigned long msecs;
  373. int err;
  374. err = kstrtoul(buf, 10, &msecs);
  375. if (err || msecs > UINT_MAX)
  376. return -EINVAL;
  377. khugepaged_scan_sleep_millisecs = msecs;
  378. wake_up_interruptible(&khugepaged_wait);
  379. return count;
  380. }
  381. static struct kobj_attribute scan_sleep_millisecs_attr =
  382. __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
  383. scan_sleep_millisecs_store);
  384. static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
  385. struct kobj_attribute *attr,
  386. char *buf)
  387. {
  388. return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
  389. }
  390. static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
  391. struct kobj_attribute *attr,
  392. const char *buf, size_t count)
  393. {
  394. unsigned long msecs;
  395. int err;
  396. err = kstrtoul(buf, 10, &msecs);
  397. if (err || msecs > UINT_MAX)
  398. return -EINVAL;
  399. khugepaged_alloc_sleep_millisecs = msecs;
  400. wake_up_interruptible(&khugepaged_wait);
  401. return count;
  402. }
  403. static struct kobj_attribute alloc_sleep_millisecs_attr =
  404. __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
  405. alloc_sleep_millisecs_store);
  406. static ssize_t pages_to_scan_show(struct kobject *kobj,
  407. struct kobj_attribute *attr,
  408. char *buf)
  409. {
  410. return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
  411. }
  412. static ssize_t pages_to_scan_store(struct kobject *kobj,
  413. struct kobj_attribute *attr,
  414. const char *buf, size_t count)
  415. {
  416. int err;
  417. unsigned long pages;
  418. err = kstrtoul(buf, 10, &pages);
  419. if (err || !pages || pages > UINT_MAX)
  420. return -EINVAL;
  421. khugepaged_pages_to_scan = pages;
  422. return count;
  423. }
  424. static struct kobj_attribute pages_to_scan_attr =
  425. __ATTR(pages_to_scan, 0644, pages_to_scan_show,
  426. pages_to_scan_store);
  427. static ssize_t pages_collapsed_show(struct kobject *kobj,
  428. struct kobj_attribute *attr,
  429. char *buf)
  430. {
  431. return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
  432. }
  433. static struct kobj_attribute pages_collapsed_attr =
  434. __ATTR_RO(pages_collapsed);
  435. static ssize_t full_scans_show(struct kobject *kobj,
  436. struct kobj_attribute *attr,
  437. char *buf)
  438. {
  439. return sprintf(buf, "%u\n", khugepaged_full_scans);
  440. }
  441. static struct kobj_attribute full_scans_attr =
  442. __ATTR_RO(full_scans);
  443. static ssize_t khugepaged_defrag_show(struct kobject *kobj,
  444. struct kobj_attribute *attr, char *buf)
  445. {
  446. return single_flag_show(kobj, attr, buf,
  447. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  448. }
  449. static ssize_t khugepaged_defrag_store(struct kobject *kobj,
  450. struct kobj_attribute *attr,
  451. const char *buf, size_t count)
  452. {
  453. return single_flag_store(kobj, attr, buf, count,
  454. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
  455. }
  456. static struct kobj_attribute khugepaged_defrag_attr =
  457. __ATTR(defrag, 0644, khugepaged_defrag_show,
  458. khugepaged_defrag_store);
  459. /*
  460. * max_ptes_none controls if khugepaged should collapse hugepages over
  461. * any unmapped ptes in turn potentially increasing the memory
  462. * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
  463. * reduce the available free memory in the system as it
  464. * runs. Increasing max_ptes_none will instead potentially reduce the
  465. * free memory in the system during the khugepaged scan.
  466. */
  467. static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
  468. struct kobj_attribute *attr,
  469. char *buf)
  470. {
  471. return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
  472. }
  473. static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
  474. struct kobj_attribute *attr,
  475. const char *buf, size_t count)
  476. {
  477. int err;
  478. unsigned long max_ptes_none;
  479. err = kstrtoul(buf, 10, &max_ptes_none);
  480. if (err || max_ptes_none > HPAGE_PMD_NR-1)
  481. return -EINVAL;
  482. khugepaged_max_ptes_none = max_ptes_none;
  483. return count;
  484. }
  485. static struct kobj_attribute khugepaged_max_ptes_none_attr =
  486. __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
  487. khugepaged_max_ptes_none_store);
  488. static struct attribute *khugepaged_attr[] = {
  489. &khugepaged_defrag_attr.attr,
  490. &khugepaged_max_ptes_none_attr.attr,
  491. &pages_to_scan_attr.attr,
  492. &pages_collapsed_attr.attr,
  493. &full_scans_attr.attr,
  494. &scan_sleep_millisecs_attr.attr,
  495. &alloc_sleep_millisecs_attr.attr,
  496. NULL,
  497. };
  498. static struct attribute_group khugepaged_attr_group = {
  499. .attrs = khugepaged_attr,
  500. .name = "khugepaged",
  501. };
  502. static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
  503. {
  504. int err;
  505. *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
  506. if (unlikely(!*hugepage_kobj)) {
  507. pr_err("failed to create transparent hugepage kobject\n");
  508. return -ENOMEM;
  509. }
  510. err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
  511. if (err) {
  512. pr_err("failed to register transparent hugepage group\n");
  513. goto delete_obj;
  514. }
  515. err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
  516. if (err) {
  517. pr_err("failed to register transparent hugepage group\n");
  518. goto remove_hp_group;
  519. }
  520. return 0;
  521. remove_hp_group:
  522. sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
  523. delete_obj:
  524. kobject_put(*hugepage_kobj);
  525. return err;
  526. }
  527. static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
  528. {
  529. sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
  530. sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
  531. kobject_put(hugepage_kobj);
  532. }
  533. #else
  534. static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
  535. {
  536. return 0;
  537. }
  538. static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
  539. {
  540. }
  541. #endif /* CONFIG_SYSFS */
  542. static int __init hugepage_init(void)
  543. {
  544. int err;
  545. struct kobject *hugepage_kobj;
  546. if (!has_transparent_hugepage()) {
  547. transparent_hugepage_flags = 0;
  548. return -EINVAL;
  549. }
  550. err = hugepage_init_sysfs(&hugepage_kobj);
  551. if (err)
  552. return err;
  553. err = khugepaged_slab_init();
  554. if (err)
  555. goto out;
  556. register_shrinker(&huge_zero_page_shrinker);
  557. /*
  558. * By default disable transparent hugepages on smaller systems,
  559. * where the extra memory used could hurt more than TLB overhead
  560. * is likely to save. The admin can still enable it through /sys.
  561. */
  562. if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
  563. transparent_hugepage_flags = 0;
  564. start_khugepaged();
  565. return 0;
  566. out:
  567. hugepage_exit_sysfs(hugepage_kobj);
  568. return err;
  569. }
  570. subsys_initcall(hugepage_init);
  571. static int __init setup_transparent_hugepage(char *str)
  572. {
  573. int ret = 0;
  574. if (!str)
  575. goto out;
  576. if (!strcmp(str, "always")) {
  577. set_bit(TRANSPARENT_HUGEPAGE_FLAG,
  578. &transparent_hugepage_flags);
  579. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  580. &transparent_hugepage_flags);
  581. ret = 1;
  582. } else if (!strcmp(str, "madvise")) {
  583. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  584. &transparent_hugepage_flags);
  585. set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  586. &transparent_hugepage_flags);
  587. ret = 1;
  588. } else if (!strcmp(str, "never")) {
  589. clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
  590. &transparent_hugepage_flags);
  591. clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  592. &transparent_hugepage_flags);
  593. ret = 1;
  594. }
  595. out:
  596. if (!ret)
  597. pr_warn("transparent_hugepage= cannot parse, ignored\n");
  598. return ret;
  599. }
  600. __setup("transparent_hugepage=", setup_transparent_hugepage);
  601. pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
  602. {
  603. if (likely(vma->vm_flags & VM_WRITE))
  604. pmd = pmd_mkwrite(pmd);
  605. return pmd;
  606. }
  607. static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
  608. {
  609. pmd_t entry;
  610. entry = mk_pmd(page, prot);
  611. entry = pmd_mkhuge(entry);
  612. return entry;
  613. }
  614. static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
  615. struct vm_area_struct *vma,
  616. unsigned long haddr, pmd_t *pmd,
  617. struct page *page)
  618. {
  619. struct mem_cgroup *memcg;
  620. pgtable_t pgtable;
  621. spinlock_t *ptl;
  622. VM_BUG_ON_PAGE(!PageCompound(page), page);
  623. if (mem_cgroup_try_charge(page, mm, GFP_TRANSHUGE, &memcg))
  624. return VM_FAULT_OOM;
  625. pgtable = pte_alloc_one(mm, haddr);
  626. if (unlikely(!pgtable)) {
  627. mem_cgroup_cancel_charge(page, memcg);
  628. return VM_FAULT_OOM;
  629. }
  630. clear_huge_page(page, haddr, HPAGE_PMD_NR);
  631. /*
  632. * The memory barrier inside __SetPageUptodate makes sure that
  633. * clear_huge_page writes become visible before the set_pmd_at()
  634. * write.
  635. */
  636. __SetPageUptodate(page);
  637. ptl = pmd_lock(mm, pmd);
  638. if (unlikely(!pmd_none(*pmd))) {
  639. spin_unlock(ptl);
  640. mem_cgroup_cancel_charge(page, memcg);
  641. put_page(page);
  642. pte_free(mm, pgtable);
  643. } else {
  644. pmd_t entry;
  645. entry = mk_huge_pmd(page, vma->vm_page_prot);
  646. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  647. page_add_new_anon_rmap(page, vma, haddr);
  648. mem_cgroup_commit_charge(page, memcg, false);
  649. lru_cache_add_active_or_unevictable(page, vma);
  650. pgtable_trans_huge_deposit(mm, pmd, pgtable);
  651. set_pmd_at(mm, haddr, pmd, entry);
  652. add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
  653. atomic_long_inc(&mm->nr_ptes);
  654. spin_unlock(ptl);
  655. }
  656. return 0;
  657. }
  658. static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
  659. {
  660. return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
  661. }
  662. /* Caller must hold page table lock. */
  663. static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
  664. struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
  665. struct page *zero_page)
  666. {
  667. pmd_t entry;
  668. if (!pmd_none(*pmd))
  669. return false;
  670. entry = mk_pmd(zero_page, vma->vm_page_prot);
  671. entry = pmd_mkhuge(entry);
  672. pgtable_trans_huge_deposit(mm, pmd, pgtable);
  673. set_pmd_at(mm, haddr, pmd, entry);
  674. atomic_long_inc(&mm->nr_ptes);
  675. return true;
  676. }
  677. int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
  678. unsigned long address, pmd_t *pmd,
  679. unsigned int flags)
  680. {
  681. gfp_t gfp;
  682. struct page *page;
  683. unsigned long haddr = address & HPAGE_PMD_MASK;
  684. if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
  685. return VM_FAULT_FALLBACK;
  686. if (unlikely(anon_vma_prepare(vma)))
  687. return VM_FAULT_OOM;
  688. if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
  689. return VM_FAULT_OOM;
  690. if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
  691. transparent_hugepage_use_zero_page()) {
  692. spinlock_t *ptl;
  693. pgtable_t pgtable;
  694. struct page *zero_page;
  695. bool set;
  696. pgtable = pte_alloc_one(mm, haddr);
  697. if (unlikely(!pgtable))
  698. return VM_FAULT_OOM;
  699. zero_page = get_huge_zero_page();
  700. if (unlikely(!zero_page)) {
  701. pte_free(mm, pgtable);
  702. count_vm_event(THP_FAULT_FALLBACK);
  703. return VM_FAULT_FALLBACK;
  704. }
  705. ptl = pmd_lock(mm, pmd);
  706. set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
  707. zero_page);
  708. spin_unlock(ptl);
  709. if (!set) {
  710. pte_free(mm, pgtable);
  711. put_huge_zero_page();
  712. }
  713. return 0;
  714. }
  715. gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
  716. page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
  717. if (unlikely(!page)) {
  718. count_vm_event(THP_FAULT_FALLBACK);
  719. return VM_FAULT_FALLBACK;
  720. }
  721. if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
  722. put_page(page);
  723. count_vm_event(THP_FAULT_FALLBACK);
  724. return VM_FAULT_FALLBACK;
  725. }
  726. count_vm_event(THP_FAULT_ALLOC);
  727. return 0;
  728. }
  729. int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  730. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  731. struct vm_area_struct *vma)
  732. {
  733. spinlock_t *dst_ptl, *src_ptl;
  734. struct page *src_page;
  735. pmd_t pmd;
  736. pgtable_t pgtable;
  737. int ret;
  738. ret = -ENOMEM;
  739. pgtable = pte_alloc_one(dst_mm, addr);
  740. if (unlikely(!pgtable))
  741. goto out;
  742. dst_ptl = pmd_lock(dst_mm, dst_pmd);
  743. src_ptl = pmd_lockptr(src_mm, src_pmd);
  744. spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
  745. ret = -EAGAIN;
  746. pmd = *src_pmd;
  747. if (unlikely(!pmd_trans_huge(pmd))) {
  748. pte_free(dst_mm, pgtable);
  749. goto out_unlock;
  750. }
  751. /*
  752. * When page table lock is held, the huge zero pmd should not be
  753. * under splitting since we don't split the page itself, only pmd to
  754. * a page table.
  755. */
  756. if (is_huge_zero_pmd(pmd)) {
  757. struct page *zero_page;
  758. bool set;
  759. /*
  760. * get_huge_zero_page() will never allocate a new page here,
  761. * since we already have a zero page to copy. It just takes a
  762. * reference.
  763. */
  764. zero_page = get_huge_zero_page();
  765. set = set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
  766. zero_page);
  767. BUG_ON(!set); /* unexpected !pmd_none(dst_pmd) */
  768. ret = 0;
  769. goto out_unlock;
  770. }
  771. if (unlikely(pmd_trans_splitting(pmd))) {
  772. /* split huge page running from under us */
  773. spin_unlock(src_ptl);
  774. spin_unlock(dst_ptl);
  775. pte_free(dst_mm, pgtable);
  776. wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
  777. goto out;
  778. }
  779. src_page = pmd_page(pmd);
  780. VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
  781. get_page(src_page);
  782. page_dup_rmap(src_page);
  783. add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
  784. pmdp_set_wrprotect(src_mm, addr, src_pmd);
  785. pmd = pmd_mkold(pmd_wrprotect(pmd));
  786. pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
  787. set_pmd_at(dst_mm, addr, dst_pmd, pmd);
  788. atomic_long_inc(&dst_mm->nr_ptes);
  789. ret = 0;
  790. out_unlock:
  791. spin_unlock(src_ptl);
  792. spin_unlock(dst_ptl);
  793. out:
  794. return ret;
  795. }
  796. void huge_pmd_set_accessed(struct mm_struct *mm,
  797. struct vm_area_struct *vma,
  798. unsigned long address,
  799. pmd_t *pmd, pmd_t orig_pmd,
  800. int dirty)
  801. {
  802. spinlock_t *ptl;
  803. pmd_t entry;
  804. unsigned long haddr;
  805. ptl = pmd_lock(mm, pmd);
  806. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  807. goto unlock;
  808. entry = pmd_mkyoung(orig_pmd);
  809. haddr = address & HPAGE_PMD_MASK;
  810. if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
  811. update_mmu_cache_pmd(vma, address, pmd);
  812. unlock:
  813. spin_unlock(ptl);
  814. }
  815. /*
  816. * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
  817. * during copy_user_huge_page()'s copy_page_rep(): in the case when
  818. * the source page gets split and a tail freed before copy completes.
  819. * Called under pmd_lock of checked pmd, so safe from splitting itself.
  820. */
  821. static void get_user_huge_page(struct page *page)
  822. {
  823. if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
  824. struct page *endpage = page + HPAGE_PMD_NR;
  825. atomic_add(HPAGE_PMD_NR, &page->_count);
  826. while (++page < endpage)
  827. get_huge_page_tail(page);
  828. } else {
  829. get_page(page);
  830. }
  831. }
  832. static void put_user_huge_page(struct page *page)
  833. {
  834. if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
  835. struct page *endpage = page + HPAGE_PMD_NR;
  836. while (page < endpage)
  837. put_page(page++);
  838. } else {
  839. put_page(page);
  840. }
  841. }
  842. static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
  843. struct vm_area_struct *vma,
  844. unsigned long address,
  845. pmd_t *pmd, pmd_t orig_pmd,
  846. struct page *page,
  847. unsigned long haddr)
  848. {
  849. struct mem_cgroup *memcg;
  850. spinlock_t *ptl;
  851. pgtable_t pgtable;
  852. pmd_t _pmd;
  853. int ret = 0, i;
  854. struct page **pages;
  855. unsigned long mmun_start; /* For mmu_notifiers */
  856. unsigned long mmun_end; /* For mmu_notifiers */
  857. pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
  858. GFP_KERNEL);
  859. if (unlikely(!pages)) {
  860. ret |= VM_FAULT_OOM;
  861. goto out;
  862. }
  863. for (i = 0; i < HPAGE_PMD_NR; i++) {
  864. pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
  865. __GFP_OTHER_NODE,
  866. vma, address, page_to_nid(page));
  867. if (unlikely(!pages[i] ||
  868. mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
  869. &memcg))) {
  870. if (pages[i])
  871. put_page(pages[i]);
  872. while (--i >= 0) {
  873. memcg = (void *)page_private(pages[i]);
  874. set_page_private(pages[i], 0);
  875. mem_cgroup_cancel_charge(pages[i], memcg);
  876. put_page(pages[i]);
  877. }
  878. kfree(pages);
  879. ret |= VM_FAULT_OOM;
  880. goto out;
  881. }
  882. set_page_private(pages[i], (unsigned long)memcg);
  883. }
  884. for (i = 0; i < HPAGE_PMD_NR; i++) {
  885. copy_user_highpage(pages[i], page + i,
  886. haddr + PAGE_SIZE * i, vma);
  887. __SetPageUptodate(pages[i]);
  888. cond_resched();
  889. }
  890. mmun_start = haddr;
  891. mmun_end = haddr + HPAGE_PMD_SIZE;
  892. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  893. ptl = pmd_lock(mm, pmd);
  894. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  895. goto out_free_pages;
  896. VM_BUG_ON_PAGE(!PageHead(page), page);
  897. pmdp_clear_flush_notify(vma, haddr, pmd);
  898. /* leave pmd empty until pte is filled */
  899. pgtable = pgtable_trans_huge_withdraw(mm, pmd);
  900. pmd_populate(mm, &_pmd, pgtable);
  901. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  902. pte_t *pte, entry;
  903. entry = mk_pte(pages[i], vma->vm_page_prot);
  904. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  905. memcg = (void *)page_private(pages[i]);
  906. set_page_private(pages[i], 0);
  907. page_add_new_anon_rmap(pages[i], vma, haddr);
  908. mem_cgroup_commit_charge(pages[i], memcg, false);
  909. lru_cache_add_active_or_unevictable(pages[i], vma);
  910. pte = pte_offset_map(&_pmd, haddr);
  911. VM_BUG_ON(!pte_none(*pte));
  912. set_pte_at(mm, haddr, pte, entry);
  913. pte_unmap(pte);
  914. }
  915. kfree(pages);
  916. smp_wmb(); /* make pte visible before pmd */
  917. pmd_populate(mm, pmd, pgtable);
  918. page_remove_rmap(page);
  919. spin_unlock(ptl);
  920. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  921. ret |= VM_FAULT_WRITE;
  922. put_page(page);
  923. out:
  924. return ret;
  925. out_free_pages:
  926. spin_unlock(ptl);
  927. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  928. for (i = 0; i < HPAGE_PMD_NR; i++) {
  929. memcg = (void *)page_private(pages[i]);
  930. set_page_private(pages[i], 0);
  931. mem_cgroup_cancel_charge(pages[i], memcg);
  932. put_page(pages[i]);
  933. }
  934. kfree(pages);
  935. goto out;
  936. }
  937. int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
  938. unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
  939. {
  940. spinlock_t *ptl;
  941. int ret = 0;
  942. struct page *page = NULL, *new_page;
  943. struct mem_cgroup *memcg;
  944. unsigned long haddr;
  945. unsigned long mmun_start; /* For mmu_notifiers */
  946. unsigned long mmun_end; /* For mmu_notifiers */
  947. ptl = pmd_lockptr(mm, pmd);
  948. VM_BUG_ON_VMA(!vma->anon_vma, vma);
  949. haddr = address & HPAGE_PMD_MASK;
  950. if (is_huge_zero_pmd(orig_pmd))
  951. goto alloc;
  952. spin_lock(ptl);
  953. if (unlikely(!pmd_same(*pmd, orig_pmd)))
  954. goto out_unlock;
  955. page = pmd_page(orig_pmd);
  956. VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
  957. if (page_mapcount(page) == 1) {
  958. pmd_t entry;
  959. entry = pmd_mkyoung(orig_pmd);
  960. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  961. if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
  962. update_mmu_cache_pmd(vma, address, pmd);
  963. ret |= VM_FAULT_WRITE;
  964. goto out_unlock;
  965. }
  966. get_user_huge_page(page);
  967. spin_unlock(ptl);
  968. alloc:
  969. if (transparent_hugepage_enabled(vma) &&
  970. !transparent_hugepage_debug_cow()) {
  971. gfp_t gfp;
  972. gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
  973. new_page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
  974. } else
  975. new_page = NULL;
  976. if (unlikely(!new_page)) {
  977. if (!page) {
  978. split_huge_page_pmd(vma, address, pmd);
  979. ret |= VM_FAULT_FALLBACK;
  980. } else {
  981. ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
  982. pmd, orig_pmd, page, haddr);
  983. if (ret & VM_FAULT_OOM) {
  984. split_huge_page(page);
  985. ret |= VM_FAULT_FALLBACK;
  986. }
  987. put_user_huge_page(page);
  988. }
  989. count_vm_event(THP_FAULT_FALLBACK);
  990. goto out;
  991. }
  992. if (unlikely(mem_cgroup_try_charge(new_page, mm,
  993. GFP_TRANSHUGE, &memcg))) {
  994. put_page(new_page);
  995. if (page) {
  996. split_huge_page(page);
  997. put_user_huge_page(page);
  998. } else
  999. split_huge_page_pmd(vma, address, pmd);
  1000. ret |= VM_FAULT_FALLBACK;
  1001. count_vm_event(THP_FAULT_FALLBACK);
  1002. goto out;
  1003. }
  1004. count_vm_event(THP_FAULT_ALLOC);
  1005. if (!page)
  1006. clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
  1007. else
  1008. copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
  1009. __SetPageUptodate(new_page);
  1010. mmun_start = haddr;
  1011. mmun_end = haddr + HPAGE_PMD_SIZE;
  1012. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  1013. spin_lock(ptl);
  1014. if (page)
  1015. put_user_huge_page(page);
  1016. if (unlikely(!pmd_same(*pmd, orig_pmd))) {
  1017. spin_unlock(ptl);
  1018. mem_cgroup_cancel_charge(new_page, memcg);
  1019. put_page(new_page);
  1020. goto out_mn;
  1021. } else {
  1022. pmd_t entry;
  1023. entry = mk_huge_pmd(new_page, vma->vm_page_prot);
  1024. entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
  1025. pmdp_clear_flush_notify(vma, haddr, pmd);
  1026. page_add_new_anon_rmap(new_page, vma, haddr);
  1027. mem_cgroup_commit_charge(new_page, memcg, false);
  1028. lru_cache_add_active_or_unevictable(new_page, vma);
  1029. set_pmd_at(mm, haddr, pmd, entry);
  1030. update_mmu_cache_pmd(vma, address, pmd);
  1031. if (!page) {
  1032. add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
  1033. put_huge_zero_page();
  1034. } else {
  1035. VM_BUG_ON_PAGE(!PageHead(page), page);
  1036. page_remove_rmap(page);
  1037. put_page(page);
  1038. }
  1039. ret |= VM_FAULT_WRITE;
  1040. }
  1041. spin_unlock(ptl);
  1042. out_mn:
  1043. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  1044. out:
  1045. return ret;
  1046. out_unlock:
  1047. spin_unlock(ptl);
  1048. return ret;
  1049. }
  1050. struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  1051. unsigned long addr,
  1052. pmd_t *pmd,
  1053. unsigned int flags)
  1054. {
  1055. struct mm_struct *mm = vma->vm_mm;
  1056. struct page *page = NULL;
  1057. assert_spin_locked(pmd_lockptr(mm, pmd));
  1058. if (flags & FOLL_WRITE && !pmd_write(*pmd))
  1059. goto out;
  1060. /* Avoid dumping huge zero page */
  1061. if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
  1062. return ERR_PTR(-EFAULT);
  1063. /* Full NUMA hinting faults to serialise migration in fault paths */
  1064. if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
  1065. goto out;
  1066. page = pmd_page(*pmd);
  1067. VM_BUG_ON_PAGE(!PageHead(page), page);
  1068. if (flags & FOLL_TOUCH) {
  1069. pmd_t _pmd;
  1070. /*
  1071. * We should set the dirty bit only for FOLL_WRITE but
  1072. * for now the dirty bit in the pmd is meaningless.
  1073. * And if the dirty bit will become meaningful and
  1074. * we'll only set it with FOLL_WRITE, an atomic
  1075. * set_bit will be required on the pmd to set the
  1076. * young bit, instead of the current set_pmd_at.
  1077. */
  1078. _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
  1079. if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
  1080. pmd, _pmd, 1))
  1081. update_mmu_cache_pmd(vma, addr, pmd);
  1082. }
  1083. if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
  1084. if (page->mapping && trylock_page(page)) {
  1085. lru_add_drain();
  1086. if (page->mapping)
  1087. mlock_vma_page(page);
  1088. unlock_page(page);
  1089. }
  1090. }
  1091. page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
  1092. VM_BUG_ON_PAGE(!PageCompound(page), page);
  1093. if (flags & FOLL_GET)
  1094. get_page_foll(page);
  1095. out:
  1096. return page;
  1097. }
  1098. /* NUMA hinting page fault entry point for trans huge pmds */
  1099. int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
  1100. unsigned long addr, pmd_t pmd, pmd_t *pmdp)
  1101. {
  1102. spinlock_t *ptl;
  1103. struct anon_vma *anon_vma = NULL;
  1104. struct page *page;
  1105. unsigned long haddr = addr & HPAGE_PMD_MASK;
  1106. int page_nid = -1, this_nid = numa_node_id();
  1107. int target_nid, last_cpupid = -1;
  1108. bool page_locked;
  1109. bool migrated = false;
  1110. int flags = 0;
  1111. /* A PROT_NONE fault should not end up here */
  1112. BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
  1113. ptl = pmd_lock(mm, pmdp);
  1114. if (unlikely(!pmd_same(pmd, *pmdp)))
  1115. goto out_unlock;
  1116. /*
  1117. * If there are potential migrations, wait for completion and retry
  1118. * without disrupting NUMA hinting information. Do not relock and
  1119. * check_same as the page may no longer be mapped.
  1120. */
  1121. if (unlikely(pmd_trans_migrating(*pmdp))) {
  1122. page = pmd_page(*pmdp);
  1123. spin_unlock(ptl);
  1124. wait_on_page_locked(page);
  1125. goto out;
  1126. }
  1127. page = pmd_page(pmd);
  1128. BUG_ON(is_huge_zero_page(page));
  1129. page_nid = page_to_nid(page);
  1130. last_cpupid = page_cpupid_last(page);
  1131. count_vm_numa_event(NUMA_HINT_FAULTS);
  1132. if (page_nid == this_nid) {
  1133. count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
  1134. flags |= TNF_FAULT_LOCAL;
  1135. }
  1136. /*
  1137. * Avoid grouping on DSO/COW pages in specific and RO pages
  1138. * in general, RO pages shouldn't hurt as much anyway since
  1139. * they can be in shared cache state.
  1140. *
  1141. * FIXME! This checks "pmd_dirty()" as an approximation of
  1142. * "is this a read-only page", since checking "pmd_write()"
  1143. * is even more broken. We haven't actually turned this into
  1144. * a writable page, so pmd_write() will always be false.
  1145. */
  1146. if (!pmd_dirty(pmd))
  1147. flags |= TNF_NO_GROUP;
  1148. /*
  1149. * Acquire the page lock to serialise THP migrations but avoid dropping
  1150. * page_table_lock if at all possible
  1151. */
  1152. page_locked = trylock_page(page);
  1153. target_nid = mpol_misplaced(page, vma, haddr);
  1154. if (target_nid == -1) {
  1155. /* If the page was locked, there are no parallel migrations */
  1156. if (page_locked)
  1157. goto clear_pmdnuma;
  1158. }
  1159. /* Migration could have started since the pmd_trans_migrating check */
  1160. if (!page_locked) {
  1161. spin_unlock(ptl);
  1162. wait_on_page_locked(page);
  1163. page_nid = -1;
  1164. goto out;
  1165. }
  1166. /*
  1167. * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
  1168. * to serialises splits
  1169. */
  1170. get_page(page);
  1171. spin_unlock(ptl);
  1172. anon_vma = page_lock_anon_vma_read(page);
  1173. /* Confirm the PMD did not change while page_table_lock was released */
  1174. spin_lock(ptl);
  1175. if (unlikely(!pmd_same(pmd, *pmdp))) {
  1176. unlock_page(page);
  1177. put_page(page);
  1178. page_nid = -1;
  1179. goto out_unlock;
  1180. }
  1181. /* Bail if we fail to protect against THP splits for any reason */
  1182. if (unlikely(!anon_vma)) {
  1183. put_page(page);
  1184. page_nid = -1;
  1185. goto clear_pmdnuma;
  1186. }
  1187. /*
  1188. * Migrate the THP to the requested node, returns with page unlocked
  1189. * and access rights restored.
  1190. */
  1191. spin_unlock(ptl);
  1192. migrated = migrate_misplaced_transhuge_page(mm, vma,
  1193. pmdp, pmd, addr, page, target_nid);
  1194. if (migrated) {
  1195. flags |= TNF_MIGRATED;
  1196. page_nid = target_nid;
  1197. }
  1198. goto out;
  1199. clear_pmdnuma:
  1200. BUG_ON(!PageLocked(page));
  1201. pmd = pmd_modify(pmd, vma->vm_page_prot);
  1202. set_pmd_at(mm, haddr, pmdp, pmd);
  1203. update_mmu_cache_pmd(vma, addr, pmdp);
  1204. unlock_page(page);
  1205. out_unlock:
  1206. spin_unlock(ptl);
  1207. out:
  1208. if (anon_vma)
  1209. page_unlock_anon_vma_read(anon_vma);
  1210. if (page_nid != -1)
  1211. task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
  1212. return 0;
  1213. }
  1214. int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
  1215. pmd_t *pmd, unsigned long addr)
  1216. {
  1217. spinlock_t *ptl;
  1218. int ret = 0;
  1219. if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
  1220. struct page *page;
  1221. pgtable_t pgtable;
  1222. pmd_t orig_pmd;
  1223. /*
  1224. * For architectures like ppc64 we look at deposited pgtable
  1225. * when calling pmdp_get_and_clear. So do the
  1226. * pgtable_trans_huge_withdraw after finishing pmdp related
  1227. * operations.
  1228. */
  1229. orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd,
  1230. tlb->fullmm);
  1231. tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
  1232. pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
  1233. if (is_huge_zero_pmd(orig_pmd)) {
  1234. atomic_long_dec(&tlb->mm->nr_ptes);
  1235. spin_unlock(ptl);
  1236. put_huge_zero_page();
  1237. } else {
  1238. page = pmd_page(orig_pmd);
  1239. page_remove_rmap(page);
  1240. VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
  1241. add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
  1242. VM_BUG_ON_PAGE(!PageHead(page), page);
  1243. atomic_long_dec(&tlb->mm->nr_ptes);
  1244. spin_unlock(ptl);
  1245. tlb_remove_page(tlb, page);
  1246. }
  1247. pte_free(tlb->mm, pgtable);
  1248. ret = 1;
  1249. }
  1250. return ret;
  1251. }
  1252. int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
  1253. unsigned long old_addr,
  1254. unsigned long new_addr, unsigned long old_end,
  1255. pmd_t *old_pmd, pmd_t *new_pmd)
  1256. {
  1257. spinlock_t *old_ptl, *new_ptl;
  1258. int ret = 0;
  1259. pmd_t pmd;
  1260. struct mm_struct *mm = vma->vm_mm;
  1261. if ((old_addr & ~HPAGE_PMD_MASK) ||
  1262. (new_addr & ~HPAGE_PMD_MASK) ||
  1263. old_end - old_addr < HPAGE_PMD_SIZE ||
  1264. (new_vma->vm_flags & VM_NOHUGEPAGE))
  1265. goto out;
  1266. /*
  1267. * The destination pmd shouldn't be established, free_pgtables()
  1268. * should have release it.
  1269. */
  1270. if (WARN_ON(!pmd_none(*new_pmd))) {
  1271. VM_BUG_ON(pmd_trans_huge(*new_pmd));
  1272. goto out;
  1273. }
  1274. /*
  1275. * We don't have to worry about the ordering of src and dst
  1276. * ptlocks because exclusive mmap_sem prevents deadlock.
  1277. */
  1278. ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
  1279. if (ret == 1) {
  1280. new_ptl = pmd_lockptr(mm, new_pmd);
  1281. if (new_ptl != old_ptl)
  1282. spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
  1283. pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
  1284. VM_BUG_ON(!pmd_none(*new_pmd));
  1285. if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
  1286. pgtable_t pgtable;
  1287. pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
  1288. pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
  1289. }
  1290. set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
  1291. if (new_ptl != old_ptl)
  1292. spin_unlock(new_ptl);
  1293. spin_unlock(old_ptl);
  1294. }
  1295. out:
  1296. return ret;
  1297. }
  1298. /*
  1299. * Returns
  1300. * - 0 if PMD could not be locked
  1301. * - 1 if PMD was locked but protections unchange and TLB flush unnecessary
  1302. * - HPAGE_PMD_NR is protections changed and TLB flush necessary
  1303. */
  1304. int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  1305. unsigned long addr, pgprot_t newprot, int prot_numa)
  1306. {
  1307. struct mm_struct *mm = vma->vm_mm;
  1308. spinlock_t *ptl;
  1309. int ret = 0;
  1310. if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
  1311. pmd_t entry;
  1312. ret = 1;
  1313. /*
  1314. * Avoid trapping faults against the zero page. The read-only
  1315. * data is likely to be read-cached on the local CPU and
  1316. * local/remote hits to the zero page are not interesting.
  1317. */
  1318. if (prot_numa && is_huge_zero_pmd(*pmd)) {
  1319. spin_unlock(ptl);
  1320. return ret;
  1321. }
  1322. if (!prot_numa || !pmd_protnone(*pmd)) {
  1323. entry = pmdp_get_and_clear_notify(mm, addr, pmd);
  1324. entry = pmd_modify(entry, newprot);
  1325. ret = HPAGE_PMD_NR;
  1326. set_pmd_at(mm, addr, pmd, entry);
  1327. BUG_ON(pmd_write(entry));
  1328. }
  1329. spin_unlock(ptl);
  1330. }
  1331. return ret;
  1332. }
  1333. /*
  1334. * Returns 1 if a given pmd maps a stable (not under splitting) thp.
  1335. * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
  1336. *
  1337. * Note that if it returns 1, this routine returns without unlocking page
  1338. * table locks. So callers must unlock them.
  1339. */
  1340. int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
  1341. spinlock_t **ptl)
  1342. {
  1343. *ptl = pmd_lock(vma->vm_mm, pmd);
  1344. if (likely(pmd_trans_huge(*pmd))) {
  1345. if (unlikely(pmd_trans_splitting(*pmd))) {
  1346. spin_unlock(*ptl);
  1347. wait_split_huge_page(vma->anon_vma, pmd);
  1348. return -1;
  1349. } else {
  1350. /* Thp mapped by 'pmd' is stable, so we can
  1351. * handle it as it is. */
  1352. return 1;
  1353. }
  1354. }
  1355. spin_unlock(*ptl);
  1356. return 0;
  1357. }
  1358. /*
  1359. * This function returns whether a given @page is mapped onto the @address
  1360. * in the virtual space of @mm.
  1361. *
  1362. * When it's true, this function returns *pmd with holding the page table lock
  1363. * and passing it back to the caller via @ptl.
  1364. * If it's false, returns NULL without holding the page table lock.
  1365. */
  1366. pmd_t *page_check_address_pmd(struct page *page,
  1367. struct mm_struct *mm,
  1368. unsigned long address,
  1369. enum page_check_address_pmd_flag flag,
  1370. spinlock_t **ptl)
  1371. {
  1372. pgd_t *pgd;
  1373. pud_t *pud;
  1374. pmd_t *pmd;
  1375. if (address & ~HPAGE_PMD_MASK)
  1376. return NULL;
  1377. pgd = pgd_offset(mm, address);
  1378. if (!pgd_present(*pgd))
  1379. return NULL;
  1380. pud = pud_offset(pgd, address);
  1381. if (!pud_present(*pud))
  1382. return NULL;
  1383. pmd = pmd_offset(pud, address);
  1384. *ptl = pmd_lock(mm, pmd);
  1385. if (!pmd_present(*pmd))
  1386. goto unlock;
  1387. if (pmd_page(*pmd) != page)
  1388. goto unlock;
  1389. /*
  1390. * split_vma() may create temporary aliased mappings. There is
  1391. * no risk as long as all huge pmd are found and have their
  1392. * splitting bit set before __split_huge_page_refcount
  1393. * runs. Finding the same huge pmd more than once during the
  1394. * same rmap walk is not a problem.
  1395. */
  1396. if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
  1397. pmd_trans_splitting(*pmd))
  1398. goto unlock;
  1399. if (pmd_trans_huge(*pmd)) {
  1400. VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
  1401. !pmd_trans_splitting(*pmd));
  1402. return pmd;
  1403. }
  1404. unlock:
  1405. spin_unlock(*ptl);
  1406. return NULL;
  1407. }
  1408. static int __split_huge_page_splitting(struct page *page,
  1409. struct vm_area_struct *vma,
  1410. unsigned long address)
  1411. {
  1412. struct mm_struct *mm = vma->vm_mm;
  1413. spinlock_t *ptl;
  1414. pmd_t *pmd;
  1415. int ret = 0;
  1416. /* For mmu_notifiers */
  1417. const unsigned long mmun_start = address;
  1418. const unsigned long mmun_end = address + HPAGE_PMD_SIZE;
  1419. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  1420. pmd = page_check_address_pmd(page, mm, address,
  1421. PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
  1422. if (pmd) {
  1423. /*
  1424. * We can't temporarily set the pmd to null in order
  1425. * to split it, the pmd must remain marked huge at all
  1426. * times or the VM won't take the pmd_trans_huge paths
  1427. * and it won't wait on the anon_vma->root->rwsem to
  1428. * serialize against split_huge_page*.
  1429. */
  1430. pmdp_splitting_flush(vma, address, pmd);
  1431. ret = 1;
  1432. spin_unlock(ptl);
  1433. }
  1434. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  1435. return ret;
  1436. }
  1437. static void __split_huge_page_refcount(struct page *page,
  1438. struct list_head *list)
  1439. {
  1440. int i;
  1441. struct zone *zone = page_zone(page);
  1442. struct lruvec *lruvec;
  1443. int tail_count = 0;
  1444. /* prevent PageLRU to go away from under us, and freeze lru stats */
  1445. spin_lock_irq(&zone->lru_lock);
  1446. lruvec = mem_cgroup_page_lruvec(page, zone);
  1447. compound_lock(page);
  1448. /* complete memcg works before add pages to LRU */
  1449. mem_cgroup_split_huge_fixup(page);
  1450. for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
  1451. struct page *page_tail = page + i;
  1452. /* tail_page->_mapcount cannot change */
  1453. BUG_ON(page_mapcount(page_tail) < 0);
  1454. tail_count += page_mapcount(page_tail);
  1455. /* check for overflow */
  1456. BUG_ON(tail_count < 0);
  1457. BUG_ON(atomic_read(&page_tail->_count) != 0);
  1458. /*
  1459. * tail_page->_count is zero and not changing from
  1460. * under us. But get_page_unless_zero() may be running
  1461. * from under us on the tail_page. If we used
  1462. * atomic_set() below instead of atomic_add(), we
  1463. * would then run atomic_set() concurrently with
  1464. * get_page_unless_zero(), and atomic_set() is
  1465. * implemented in C not using locked ops. spin_unlock
  1466. * on x86 sometime uses locked ops because of PPro
  1467. * errata 66, 92, so unless somebody can guarantee
  1468. * atomic_set() here would be safe on all archs (and
  1469. * not only on x86), it's safer to use atomic_add().
  1470. */
  1471. atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
  1472. &page_tail->_count);
  1473. /* after clearing PageTail the gup refcount can be released */
  1474. smp_mb__after_atomic();
  1475. /*
  1476. * retain hwpoison flag of the poisoned tail page:
  1477. * fix for the unsuitable process killed on Guest Machine(KVM)
  1478. * by the memory-failure.
  1479. */
  1480. page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
  1481. page_tail->flags |= (page->flags &
  1482. ((1L << PG_referenced) |
  1483. (1L << PG_swapbacked) |
  1484. (1L << PG_mlocked) |
  1485. (1L << PG_uptodate) |
  1486. (1L << PG_active) |
  1487. (1L << PG_unevictable)));
  1488. page_tail->flags |= (1L << PG_dirty);
  1489. /* clear PageTail before overwriting first_page */
  1490. smp_wmb();
  1491. /*
  1492. * __split_huge_page_splitting() already set the
  1493. * splitting bit in all pmd that could map this
  1494. * hugepage, that will ensure no CPU can alter the
  1495. * mapcount on the head page. The mapcount is only
  1496. * accounted in the head page and it has to be
  1497. * transferred to all tail pages in the below code. So
  1498. * for this code to be safe, the split the mapcount
  1499. * can't change. But that doesn't mean userland can't
  1500. * keep changing and reading the page contents while
  1501. * we transfer the mapcount, so the pmd splitting
  1502. * status is achieved setting a reserved bit in the
  1503. * pmd, not by clearing the present bit.
  1504. */
  1505. page_tail->_mapcount = page->_mapcount;
  1506. BUG_ON(page_tail->mapping);
  1507. page_tail->mapping = page->mapping;
  1508. page_tail->index = page->index + i;
  1509. page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
  1510. BUG_ON(!PageAnon(page_tail));
  1511. BUG_ON(!PageUptodate(page_tail));
  1512. BUG_ON(!PageDirty(page_tail));
  1513. BUG_ON(!PageSwapBacked(page_tail));
  1514. lru_add_page_tail(page, page_tail, lruvec, list);
  1515. }
  1516. atomic_sub(tail_count, &page->_count);
  1517. BUG_ON(atomic_read(&page->_count) <= 0);
  1518. __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
  1519. ClearPageCompound(page);
  1520. compound_unlock(page);
  1521. spin_unlock_irq(&zone->lru_lock);
  1522. for (i = 1; i < HPAGE_PMD_NR; i++) {
  1523. struct page *page_tail = page + i;
  1524. BUG_ON(page_count(page_tail) <= 0);
  1525. /*
  1526. * Tail pages may be freed if there wasn't any mapping
  1527. * like if add_to_swap() is running on a lru page that
  1528. * had its mapping zapped. And freeing these pages
  1529. * requires taking the lru_lock so we do the put_page
  1530. * of the tail pages after the split is complete.
  1531. */
  1532. put_page(page_tail);
  1533. }
  1534. /*
  1535. * Only the head page (now become a regular page) is required
  1536. * to be pinned by the caller.
  1537. */
  1538. BUG_ON(page_count(page) <= 0);
  1539. }
  1540. static int __split_huge_page_map(struct page *page,
  1541. struct vm_area_struct *vma,
  1542. unsigned long address)
  1543. {
  1544. struct mm_struct *mm = vma->vm_mm;
  1545. spinlock_t *ptl;
  1546. pmd_t *pmd, _pmd;
  1547. int ret = 0, i;
  1548. pgtable_t pgtable;
  1549. unsigned long haddr;
  1550. pmd = page_check_address_pmd(page, mm, address,
  1551. PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
  1552. if (pmd) {
  1553. pgtable = pgtable_trans_huge_withdraw(mm, pmd);
  1554. pmd_populate(mm, &_pmd, pgtable);
  1555. if (pmd_write(*pmd))
  1556. BUG_ON(page_mapcount(page) != 1);
  1557. haddr = address;
  1558. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  1559. pte_t *pte, entry;
  1560. BUG_ON(PageCompound(page+i));
  1561. /*
  1562. * Note that NUMA hinting access restrictions are not
  1563. * transferred to avoid any possibility of altering
  1564. * permissions across VMAs.
  1565. */
  1566. entry = mk_pte(page + i, vma->vm_page_prot);
  1567. entry = maybe_mkwrite(pte_mkdirty(entry), vma);
  1568. if (!pmd_write(*pmd))
  1569. entry = pte_wrprotect(entry);
  1570. if (!pmd_young(*pmd))
  1571. entry = pte_mkold(entry);
  1572. pte = pte_offset_map(&_pmd, haddr);
  1573. BUG_ON(!pte_none(*pte));
  1574. set_pte_at(mm, haddr, pte, entry);
  1575. pte_unmap(pte);
  1576. }
  1577. smp_wmb(); /* make pte visible before pmd */
  1578. /*
  1579. * Up to this point the pmd is present and huge and
  1580. * userland has the whole access to the hugepage
  1581. * during the split (which happens in place). If we
  1582. * overwrite the pmd with the not-huge version
  1583. * pointing to the pte here (which of course we could
  1584. * if all CPUs were bug free), userland could trigger
  1585. * a small page size TLB miss on the small sized TLB
  1586. * while the hugepage TLB entry is still established
  1587. * in the huge TLB. Some CPU doesn't like that. See
  1588. * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
  1589. * Erratum 383 on page 93. Intel should be safe but is
  1590. * also warns that it's only safe if the permission
  1591. * and cache attributes of the two entries loaded in
  1592. * the two TLB is identical (which should be the case
  1593. * here). But it is generally safer to never allow
  1594. * small and huge TLB entries for the same virtual
  1595. * address to be loaded simultaneously. So instead of
  1596. * doing "pmd_populate(); flush_tlb_range();" we first
  1597. * mark the current pmd notpresent (atomically because
  1598. * here the pmd_trans_huge and pmd_trans_splitting
  1599. * must remain set at all times on the pmd until the
  1600. * split is complete for this pmd), then we flush the
  1601. * SMP TLB and finally we write the non-huge version
  1602. * of the pmd entry with pmd_populate.
  1603. */
  1604. pmdp_invalidate(vma, address, pmd);
  1605. pmd_populate(mm, pmd, pgtable);
  1606. ret = 1;
  1607. spin_unlock(ptl);
  1608. }
  1609. return ret;
  1610. }
  1611. /* must be called with anon_vma->root->rwsem held */
  1612. static void __split_huge_page(struct page *page,
  1613. struct anon_vma *anon_vma,
  1614. struct list_head *list)
  1615. {
  1616. int mapcount, mapcount2;
  1617. pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  1618. struct anon_vma_chain *avc;
  1619. BUG_ON(!PageHead(page));
  1620. BUG_ON(PageTail(page));
  1621. mapcount = 0;
  1622. anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
  1623. struct vm_area_struct *vma = avc->vma;
  1624. unsigned long addr = vma_address(page, vma);
  1625. BUG_ON(is_vma_temporary_stack(vma));
  1626. mapcount += __split_huge_page_splitting(page, vma, addr);
  1627. }
  1628. /*
  1629. * It is critical that new vmas are added to the tail of the
  1630. * anon_vma list. This guarantes that if copy_huge_pmd() runs
  1631. * and establishes a child pmd before
  1632. * __split_huge_page_splitting() freezes the parent pmd (so if
  1633. * we fail to prevent copy_huge_pmd() from running until the
  1634. * whole __split_huge_page() is complete), we will still see
  1635. * the newly established pmd of the child later during the
  1636. * walk, to be able to set it as pmd_trans_splitting too.
  1637. */
  1638. if (mapcount != page_mapcount(page)) {
  1639. pr_err("mapcount %d page_mapcount %d\n",
  1640. mapcount, page_mapcount(page));
  1641. BUG();
  1642. }
  1643. __split_huge_page_refcount(page, list);
  1644. mapcount2 = 0;
  1645. anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
  1646. struct vm_area_struct *vma = avc->vma;
  1647. unsigned long addr = vma_address(page, vma);
  1648. BUG_ON(is_vma_temporary_stack(vma));
  1649. mapcount2 += __split_huge_page_map(page, vma, addr);
  1650. }
  1651. if (mapcount != mapcount2) {
  1652. pr_err("mapcount %d mapcount2 %d page_mapcount %d\n",
  1653. mapcount, mapcount2, page_mapcount(page));
  1654. BUG();
  1655. }
  1656. }
  1657. /*
  1658. * Split a hugepage into normal pages. This doesn't change the position of head
  1659. * page. If @list is null, tail pages will be added to LRU list, otherwise, to
  1660. * @list. Both head page and tail pages will inherit mapping, flags, and so on
  1661. * from the hugepage.
  1662. * Return 0 if the hugepage is split successfully otherwise return 1.
  1663. */
  1664. int split_huge_page_to_list(struct page *page, struct list_head *list)
  1665. {
  1666. struct anon_vma *anon_vma;
  1667. int ret = 1;
  1668. BUG_ON(is_huge_zero_page(page));
  1669. BUG_ON(!PageAnon(page));
  1670. /*
  1671. * The caller does not necessarily hold an mmap_sem that would prevent
  1672. * the anon_vma disappearing so we first we take a reference to it
  1673. * and then lock the anon_vma for write. This is similar to
  1674. * page_lock_anon_vma_read except the write lock is taken to serialise
  1675. * against parallel split or collapse operations.
  1676. */
  1677. anon_vma = page_get_anon_vma(page);
  1678. if (!anon_vma)
  1679. goto out;
  1680. anon_vma_lock_write(anon_vma);
  1681. ret = 0;
  1682. if (!PageCompound(page))
  1683. goto out_unlock;
  1684. BUG_ON(!PageSwapBacked(page));
  1685. __split_huge_page(page, anon_vma, list);
  1686. count_vm_event(THP_SPLIT);
  1687. BUG_ON(PageCompound(page));
  1688. out_unlock:
  1689. anon_vma_unlock_write(anon_vma);
  1690. put_anon_vma(anon_vma);
  1691. out:
  1692. return ret;
  1693. }
  1694. #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
  1695. int hugepage_madvise(struct vm_area_struct *vma,
  1696. unsigned long *vm_flags, int advice)
  1697. {
  1698. switch (advice) {
  1699. case MADV_HUGEPAGE:
  1700. #ifdef CONFIG_S390
  1701. /*
  1702. * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
  1703. * can't handle this properly after s390_enable_sie, so we simply
  1704. * ignore the madvise to prevent qemu from causing a SIGSEGV.
  1705. */
  1706. if (mm_has_pgste(vma->vm_mm))
  1707. return 0;
  1708. #endif
  1709. /*
  1710. * Be somewhat over-protective like KSM for now!
  1711. */
  1712. if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
  1713. return -EINVAL;
  1714. *vm_flags &= ~VM_NOHUGEPAGE;
  1715. *vm_flags |= VM_HUGEPAGE;
  1716. /*
  1717. * If the vma become good for khugepaged to scan,
  1718. * register it here without waiting a page fault that
  1719. * may not happen any time soon.
  1720. */
  1721. if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
  1722. return -ENOMEM;
  1723. break;
  1724. case MADV_NOHUGEPAGE:
  1725. /*
  1726. * Be somewhat over-protective like KSM for now!
  1727. */
  1728. if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
  1729. return -EINVAL;
  1730. *vm_flags &= ~VM_HUGEPAGE;
  1731. *vm_flags |= VM_NOHUGEPAGE;
  1732. /*
  1733. * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
  1734. * this vma even if we leave the mm registered in khugepaged if
  1735. * it got registered before VM_NOHUGEPAGE was set.
  1736. */
  1737. break;
  1738. }
  1739. return 0;
  1740. }
  1741. static int __init khugepaged_slab_init(void)
  1742. {
  1743. mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
  1744. sizeof(struct mm_slot),
  1745. __alignof__(struct mm_slot), 0, NULL);
  1746. if (!mm_slot_cache)
  1747. return -ENOMEM;
  1748. return 0;
  1749. }
  1750. static inline struct mm_slot *alloc_mm_slot(void)
  1751. {
  1752. if (!mm_slot_cache) /* initialization failed */
  1753. return NULL;
  1754. return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
  1755. }
  1756. static inline void free_mm_slot(struct mm_slot *mm_slot)
  1757. {
  1758. kmem_cache_free(mm_slot_cache, mm_slot);
  1759. }
  1760. static struct mm_slot *get_mm_slot(struct mm_struct *mm)
  1761. {
  1762. struct mm_slot *mm_slot;
  1763. hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
  1764. if (mm == mm_slot->mm)
  1765. return mm_slot;
  1766. return NULL;
  1767. }
  1768. static void insert_to_mm_slots_hash(struct mm_struct *mm,
  1769. struct mm_slot *mm_slot)
  1770. {
  1771. mm_slot->mm = mm;
  1772. hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
  1773. }
  1774. static inline int khugepaged_test_exit(struct mm_struct *mm)
  1775. {
  1776. return atomic_read(&mm->mm_users) == 0;
  1777. }
  1778. int __khugepaged_enter(struct mm_struct *mm)
  1779. {
  1780. struct mm_slot *mm_slot;
  1781. int wakeup;
  1782. mm_slot = alloc_mm_slot();
  1783. if (!mm_slot)
  1784. return -ENOMEM;
  1785. /* __khugepaged_exit() must not run from under us */
  1786. VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
  1787. if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
  1788. free_mm_slot(mm_slot);
  1789. return 0;
  1790. }
  1791. spin_lock(&khugepaged_mm_lock);
  1792. insert_to_mm_slots_hash(mm, mm_slot);
  1793. /*
  1794. * Insert just behind the scanning cursor, to let the area settle
  1795. * down a little.
  1796. */
  1797. wakeup = list_empty(&khugepaged_scan.mm_head);
  1798. list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
  1799. spin_unlock(&khugepaged_mm_lock);
  1800. atomic_inc(&mm->mm_count);
  1801. if (wakeup)
  1802. wake_up_interruptible(&khugepaged_wait);
  1803. return 0;
  1804. }
  1805. int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
  1806. unsigned long vm_flags)
  1807. {
  1808. unsigned long hstart, hend;
  1809. if (!vma->anon_vma)
  1810. /*
  1811. * Not yet faulted in so we will register later in the
  1812. * page fault if needed.
  1813. */
  1814. return 0;
  1815. if (vma->vm_ops)
  1816. /* khugepaged not yet working on file or special mappings */
  1817. return 0;
  1818. VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
  1819. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  1820. hend = vma->vm_end & HPAGE_PMD_MASK;
  1821. if (hstart < hend)
  1822. return khugepaged_enter(vma, vm_flags);
  1823. return 0;
  1824. }
  1825. void __khugepaged_exit(struct mm_struct *mm)
  1826. {
  1827. struct mm_slot *mm_slot;
  1828. int free = 0;
  1829. spin_lock(&khugepaged_mm_lock);
  1830. mm_slot = get_mm_slot(mm);
  1831. if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
  1832. hash_del(&mm_slot->hash);
  1833. list_del(&mm_slot->mm_node);
  1834. free = 1;
  1835. }
  1836. spin_unlock(&khugepaged_mm_lock);
  1837. if (free) {
  1838. clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  1839. free_mm_slot(mm_slot);
  1840. mmdrop(mm);
  1841. } else if (mm_slot) {
  1842. /*
  1843. * This is required to serialize against
  1844. * khugepaged_test_exit() (which is guaranteed to run
  1845. * under mmap sem read mode). Stop here (after we
  1846. * return all pagetables will be destroyed) until
  1847. * khugepaged has finished working on the pagetables
  1848. * under the mmap_sem.
  1849. */
  1850. down_write(&mm->mmap_sem);
  1851. up_write(&mm->mmap_sem);
  1852. }
  1853. }
  1854. static void release_pte_page(struct page *page)
  1855. {
  1856. /* 0 stands for page_is_file_cache(page) == false */
  1857. dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1858. unlock_page(page);
  1859. putback_lru_page(page);
  1860. }
  1861. static void release_pte_pages(pte_t *pte, pte_t *_pte)
  1862. {
  1863. while (--_pte >= pte) {
  1864. pte_t pteval = *_pte;
  1865. if (!pte_none(pteval))
  1866. release_pte_page(pte_page(pteval));
  1867. }
  1868. }
  1869. static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
  1870. unsigned long address,
  1871. pte_t *pte)
  1872. {
  1873. struct page *page;
  1874. pte_t *_pte;
  1875. int none = 0;
  1876. bool referenced = false, writable = false;
  1877. for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
  1878. _pte++, address += PAGE_SIZE) {
  1879. pte_t pteval = *_pte;
  1880. if (pte_none(pteval)) {
  1881. if (++none <= khugepaged_max_ptes_none)
  1882. continue;
  1883. else
  1884. goto out;
  1885. }
  1886. if (!pte_present(pteval))
  1887. goto out;
  1888. page = vm_normal_page(vma, address, pteval);
  1889. if (unlikely(!page))
  1890. goto out;
  1891. VM_BUG_ON_PAGE(PageCompound(page), page);
  1892. VM_BUG_ON_PAGE(!PageAnon(page), page);
  1893. VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
  1894. /*
  1895. * We can do it before isolate_lru_page because the
  1896. * page can't be freed from under us. NOTE: PG_lock
  1897. * is needed to serialize against split_huge_page
  1898. * when invoked from the VM.
  1899. */
  1900. if (!trylock_page(page))
  1901. goto out;
  1902. /*
  1903. * cannot use mapcount: can't collapse if there's a gup pin.
  1904. * The page must only be referenced by the scanned process
  1905. * and page swap cache.
  1906. */
  1907. if (page_count(page) != 1 + !!PageSwapCache(page)) {
  1908. unlock_page(page);
  1909. goto out;
  1910. }
  1911. if (pte_write(pteval)) {
  1912. writable = true;
  1913. } else {
  1914. if (PageSwapCache(page) && !reuse_swap_page(page)) {
  1915. unlock_page(page);
  1916. goto out;
  1917. }
  1918. /*
  1919. * Page is not in the swap cache. It can be collapsed
  1920. * into a THP.
  1921. */
  1922. }
  1923. /*
  1924. * Isolate the page to avoid collapsing an hugepage
  1925. * currently in use by the VM.
  1926. */
  1927. if (isolate_lru_page(page)) {
  1928. unlock_page(page);
  1929. goto out;
  1930. }
  1931. /* 0 stands for page_is_file_cache(page) == false */
  1932. inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
  1933. VM_BUG_ON_PAGE(!PageLocked(page), page);
  1934. VM_BUG_ON_PAGE(PageLRU(page), page);
  1935. /* If there is no mapped pte young don't collapse the page */
  1936. if (pte_young(pteval) || PageReferenced(page) ||
  1937. mmu_notifier_test_young(vma->vm_mm, address))
  1938. referenced = true;
  1939. }
  1940. if (likely(referenced && writable))
  1941. return 1;
  1942. out:
  1943. release_pte_pages(pte, _pte);
  1944. return 0;
  1945. }
  1946. static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
  1947. struct vm_area_struct *vma,
  1948. unsigned long address,
  1949. spinlock_t *ptl)
  1950. {
  1951. pte_t *_pte;
  1952. for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
  1953. pte_t pteval = *_pte;
  1954. struct page *src_page;
  1955. if (pte_none(pteval)) {
  1956. clear_user_highpage(page, address);
  1957. add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
  1958. } else {
  1959. src_page = pte_page(pteval);
  1960. copy_user_highpage(page, src_page, address, vma);
  1961. VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
  1962. release_pte_page(src_page);
  1963. /*
  1964. * ptl mostly unnecessary, but preempt has to
  1965. * be disabled to update the per-cpu stats
  1966. * inside page_remove_rmap().
  1967. */
  1968. spin_lock(ptl);
  1969. /*
  1970. * paravirt calls inside pte_clear here are
  1971. * superfluous.
  1972. */
  1973. pte_clear(vma->vm_mm, address, _pte);
  1974. page_remove_rmap(src_page);
  1975. spin_unlock(ptl);
  1976. free_page_and_swap_cache(src_page);
  1977. }
  1978. address += PAGE_SIZE;
  1979. page++;
  1980. }
  1981. }
  1982. static void khugepaged_alloc_sleep(void)
  1983. {
  1984. wait_event_freezable_timeout(khugepaged_wait, false,
  1985. msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
  1986. }
  1987. static int khugepaged_node_load[MAX_NUMNODES];
  1988. static bool khugepaged_scan_abort(int nid)
  1989. {
  1990. int i;
  1991. /*
  1992. * If zone_reclaim_mode is disabled, then no extra effort is made to
  1993. * allocate memory locally.
  1994. */
  1995. if (!zone_reclaim_mode)
  1996. return false;
  1997. /* If there is a count for this node already, it must be acceptable */
  1998. if (khugepaged_node_load[nid])
  1999. return false;
  2000. for (i = 0; i < MAX_NUMNODES; i++) {
  2001. if (!khugepaged_node_load[i])
  2002. continue;
  2003. if (node_distance(nid, i) > RECLAIM_DISTANCE)
  2004. return true;
  2005. }
  2006. return false;
  2007. }
  2008. #ifdef CONFIG_NUMA
  2009. static int khugepaged_find_target_node(void)
  2010. {
  2011. static int last_khugepaged_target_node = NUMA_NO_NODE;
  2012. int nid, target_node = 0, max_value = 0;
  2013. /* find first node with max normal pages hit */
  2014. for (nid = 0; nid < MAX_NUMNODES; nid++)
  2015. if (khugepaged_node_load[nid] > max_value) {
  2016. max_value = khugepaged_node_load[nid];
  2017. target_node = nid;
  2018. }
  2019. /* do some balance if several nodes have the same hit record */
  2020. if (target_node <= last_khugepaged_target_node)
  2021. for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
  2022. nid++)
  2023. if (max_value == khugepaged_node_load[nid]) {
  2024. target_node = nid;
  2025. break;
  2026. }
  2027. last_khugepaged_target_node = target_node;
  2028. return target_node;
  2029. }
  2030. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  2031. {
  2032. if (IS_ERR(*hpage)) {
  2033. if (!*wait)
  2034. return false;
  2035. *wait = false;
  2036. *hpage = NULL;
  2037. khugepaged_alloc_sleep();
  2038. } else if (*hpage) {
  2039. put_page(*hpage);
  2040. *hpage = NULL;
  2041. }
  2042. return true;
  2043. }
  2044. static struct page
  2045. *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
  2046. struct vm_area_struct *vma, unsigned long address,
  2047. int node)
  2048. {
  2049. VM_BUG_ON_PAGE(*hpage, *hpage);
  2050. /*
  2051. * Before allocating the hugepage, release the mmap_sem read lock.
  2052. * The allocation can take potentially a long time if it involves
  2053. * sync compaction, and we do not need to hold the mmap_sem during
  2054. * that. We will recheck the vma after taking it again in write mode.
  2055. */
  2056. up_read(&mm->mmap_sem);
  2057. *hpage = alloc_pages_exact_node(node, alloc_hugepage_gfpmask(
  2058. khugepaged_defrag(), __GFP_OTHER_NODE), HPAGE_PMD_ORDER);
  2059. if (unlikely(!*hpage)) {
  2060. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  2061. *hpage = ERR_PTR(-ENOMEM);
  2062. return NULL;
  2063. }
  2064. count_vm_event(THP_COLLAPSE_ALLOC);
  2065. return *hpage;
  2066. }
  2067. #else
  2068. static int khugepaged_find_target_node(void)
  2069. {
  2070. return 0;
  2071. }
  2072. static inline struct page *alloc_hugepage(int defrag)
  2073. {
  2074. return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
  2075. HPAGE_PMD_ORDER);
  2076. }
  2077. static struct page *khugepaged_alloc_hugepage(bool *wait)
  2078. {
  2079. struct page *hpage;
  2080. do {
  2081. hpage = alloc_hugepage(khugepaged_defrag());
  2082. if (!hpage) {
  2083. count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
  2084. if (!*wait)
  2085. return NULL;
  2086. *wait = false;
  2087. khugepaged_alloc_sleep();
  2088. } else
  2089. count_vm_event(THP_COLLAPSE_ALLOC);
  2090. } while (unlikely(!hpage) && likely(khugepaged_enabled()));
  2091. return hpage;
  2092. }
  2093. static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
  2094. {
  2095. if (!*hpage)
  2096. *hpage = khugepaged_alloc_hugepage(wait);
  2097. if (unlikely(!*hpage))
  2098. return false;
  2099. return true;
  2100. }
  2101. static struct page
  2102. *khugepaged_alloc_page(struct page **hpage, struct mm_struct *mm,
  2103. struct vm_area_struct *vma, unsigned long address,
  2104. int node)
  2105. {
  2106. up_read(&mm->mmap_sem);
  2107. VM_BUG_ON(!*hpage);
  2108. return *hpage;
  2109. }
  2110. #endif
  2111. static bool hugepage_vma_check(struct vm_area_struct *vma)
  2112. {
  2113. if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
  2114. (vma->vm_flags & VM_NOHUGEPAGE))
  2115. return false;
  2116. if (!vma->anon_vma || vma->vm_ops)
  2117. return false;
  2118. if (is_vma_temporary_stack(vma))
  2119. return false;
  2120. VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
  2121. return true;
  2122. }
  2123. static void collapse_huge_page(struct mm_struct *mm,
  2124. unsigned long address,
  2125. struct page **hpage,
  2126. struct vm_area_struct *vma,
  2127. int node)
  2128. {
  2129. pmd_t *pmd, _pmd;
  2130. pte_t *pte;
  2131. pgtable_t pgtable;
  2132. struct page *new_page;
  2133. spinlock_t *pmd_ptl, *pte_ptl;
  2134. int isolated;
  2135. unsigned long hstart, hend;
  2136. struct mem_cgroup *memcg;
  2137. unsigned long mmun_start; /* For mmu_notifiers */
  2138. unsigned long mmun_end; /* For mmu_notifiers */
  2139. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  2140. /* release the mmap_sem read lock. */
  2141. new_page = khugepaged_alloc_page(hpage, mm, vma, address, node);
  2142. if (!new_page)
  2143. return;
  2144. if (unlikely(mem_cgroup_try_charge(new_page, mm,
  2145. GFP_TRANSHUGE, &memcg)))
  2146. return;
  2147. /*
  2148. * Prevent all access to pagetables with the exception of
  2149. * gup_fast later hanlded by the ptep_clear_flush and the VM
  2150. * handled by the anon_vma lock + PG_lock.
  2151. */
  2152. down_write(&mm->mmap_sem);
  2153. if (unlikely(khugepaged_test_exit(mm)))
  2154. goto out;
  2155. vma = find_vma(mm, address);
  2156. if (!vma)
  2157. goto out;
  2158. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  2159. hend = vma->vm_end & HPAGE_PMD_MASK;
  2160. if (address < hstart || address + HPAGE_PMD_SIZE > hend)
  2161. goto out;
  2162. if (!hugepage_vma_check(vma))
  2163. goto out;
  2164. pmd = mm_find_pmd(mm, address);
  2165. if (!pmd)
  2166. goto out;
  2167. anon_vma_lock_write(vma->anon_vma);
  2168. pte = pte_offset_map(pmd, address);
  2169. pte_ptl = pte_lockptr(mm, pmd);
  2170. mmun_start = address;
  2171. mmun_end = address + HPAGE_PMD_SIZE;
  2172. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2173. pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
  2174. /*
  2175. * After this gup_fast can't run anymore. This also removes
  2176. * any huge TLB entry from the CPU so we won't allow
  2177. * huge and small TLB entries for the same virtual address
  2178. * to avoid the risk of CPU bugs in that area.
  2179. */
  2180. _pmd = pmdp_clear_flush(vma, address, pmd);
  2181. spin_unlock(pmd_ptl);
  2182. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2183. spin_lock(pte_ptl);
  2184. isolated = __collapse_huge_page_isolate(vma, address, pte);
  2185. spin_unlock(pte_ptl);
  2186. if (unlikely(!isolated)) {
  2187. pte_unmap(pte);
  2188. spin_lock(pmd_ptl);
  2189. BUG_ON(!pmd_none(*pmd));
  2190. /*
  2191. * We can only use set_pmd_at when establishing
  2192. * hugepmds and never for establishing regular pmds that
  2193. * points to regular pagetables. Use pmd_populate for that
  2194. */
  2195. pmd_populate(mm, pmd, pmd_pgtable(_pmd));
  2196. spin_unlock(pmd_ptl);
  2197. anon_vma_unlock_write(vma->anon_vma);
  2198. goto out;
  2199. }
  2200. /*
  2201. * All pages are isolated and locked so anon_vma rmap
  2202. * can't run anymore.
  2203. */
  2204. anon_vma_unlock_write(vma->anon_vma);
  2205. __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
  2206. pte_unmap(pte);
  2207. __SetPageUptodate(new_page);
  2208. pgtable = pmd_pgtable(_pmd);
  2209. _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
  2210. _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
  2211. /*
  2212. * spin_lock() below is not the equivalent of smp_wmb(), so
  2213. * this is needed to avoid the copy_huge_page writes to become
  2214. * visible after the set_pmd_at() write.
  2215. */
  2216. smp_wmb();
  2217. spin_lock(pmd_ptl);
  2218. BUG_ON(!pmd_none(*pmd));
  2219. page_add_new_anon_rmap(new_page, vma, address);
  2220. mem_cgroup_commit_charge(new_page, memcg, false);
  2221. lru_cache_add_active_or_unevictable(new_page, vma);
  2222. pgtable_trans_huge_deposit(mm, pmd, pgtable);
  2223. set_pmd_at(mm, address, pmd, _pmd);
  2224. update_mmu_cache_pmd(vma, address, pmd);
  2225. spin_unlock(pmd_ptl);
  2226. *hpage = NULL;
  2227. khugepaged_pages_collapsed++;
  2228. out_up_write:
  2229. up_write(&mm->mmap_sem);
  2230. return;
  2231. out:
  2232. mem_cgroup_cancel_charge(new_page, memcg);
  2233. goto out_up_write;
  2234. }
  2235. static int khugepaged_scan_pmd(struct mm_struct *mm,
  2236. struct vm_area_struct *vma,
  2237. unsigned long address,
  2238. struct page **hpage)
  2239. {
  2240. pmd_t *pmd;
  2241. pte_t *pte, *_pte;
  2242. int ret = 0, none = 0;
  2243. struct page *page;
  2244. unsigned long _address;
  2245. spinlock_t *ptl;
  2246. int node = NUMA_NO_NODE;
  2247. bool writable = false, referenced = false;
  2248. VM_BUG_ON(address & ~HPAGE_PMD_MASK);
  2249. pmd = mm_find_pmd(mm, address);
  2250. if (!pmd)
  2251. goto out;
  2252. memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
  2253. pte = pte_offset_map_lock(mm, pmd, address, &ptl);
  2254. for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
  2255. _pte++, _address += PAGE_SIZE) {
  2256. pte_t pteval = *_pte;
  2257. if (pte_none(pteval)) {
  2258. if (++none <= khugepaged_max_ptes_none)
  2259. continue;
  2260. else
  2261. goto out_unmap;
  2262. }
  2263. if (!pte_present(pteval))
  2264. goto out_unmap;
  2265. if (pte_write(pteval))
  2266. writable = true;
  2267. page = vm_normal_page(vma, _address, pteval);
  2268. if (unlikely(!page))
  2269. goto out_unmap;
  2270. /*
  2271. * Record which node the original page is from and save this
  2272. * information to khugepaged_node_load[].
  2273. * Khupaged will allocate hugepage from the node has the max
  2274. * hit record.
  2275. */
  2276. node = page_to_nid(page);
  2277. if (khugepaged_scan_abort(node))
  2278. goto out_unmap;
  2279. khugepaged_node_load[node]++;
  2280. VM_BUG_ON_PAGE(PageCompound(page), page);
  2281. if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
  2282. goto out_unmap;
  2283. /*
  2284. * cannot use mapcount: can't collapse if there's a gup pin.
  2285. * The page must only be referenced by the scanned process
  2286. * and page swap cache.
  2287. */
  2288. if (page_count(page) != 1 + !!PageSwapCache(page))
  2289. goto out_unmap;
  2290. if (pte_young(pteval) || PageReferenced(page) ||
  2291. mmu_notifier_test_young(vma->vm_mm, address))
  2292. referenced = true;
  2293. }
  2294. if (referenced && writable)
  2295. ret = 1;
  2296. out_unmap:
  2297. pte_unmap_unlock(pte, ptl);
  2298. if (ret) {
  2299. node = khugepaged_find_target_node();
  2300. /* collapse_huge_page will return with the mmap_sem released */
  2301. collapse_huge_page(mm, address, hpage, vma, node);
  2302. }
  2303. out:
  2304. return ret;
  2305. }
  2306. static void collect_mm_slot(struct mm_slot *mm_slot)
  2307. {
  2308. struct mm_struct *mm = mm_slot->mm;
  2309. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  2310. if (khugepaged_test_exit(mm)) {
  2311. /* free mm_slot */
  2312. hash_del(&mm_slot->hash);
  2313. list_del(&mm_slot->mm_node);
  2314. /*
  2315. * Not strictly needed because the mm exited already.
  2316. *
  2317. * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
  2318. */
  2319. /* khugepaged_mm_lock actually not necessary for the below */
  2320. free_mm_slot(mm_slot);
  2321. mmdrop(mm);
  2322. }
  2323. }
  2324. static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
  2325. struct page **hpage)
  2326. __releases(&khugepaged_mm_lock)
  2327. __acquires(&khugepaged_mm_lock)
  2328. {
  2329. struct mm_slot *mm_slot;
  2330. struct mm_struct *mm;
  2331. struct vm_area_struct *vma;
  2332. int progress = 0;
  2333. VM_BUG_ON(!pages);
  2334. VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
  2335. if (khugepaged_scan.mm_slot)
  2336. mm_slot = khugepaged_scan.mm_slot;
  2337. else {
  2338. mm_slot = list_entry(khugepaged_scan.mm_head.next,
  2339. struct mm_slot, mm_node);
  2340. khugepaged_scan.address = 0;
  2341. khugepaged_scan.mm_slot = mm_slot;
  2342. }
  2343. spin_unlock(&khugepaged_mm_lock);
  2344. mm = mm_slot->mm;
  2345. down_read(&mm->mmap_sem);
  2346. if (unlikely(khugepaged_test_exit(mm)))
  2347. vma = NULL;
  2348. else
  2349. vma = find_vma(mm, khugepaged_scan.address);
  2350. progress++;
  2351. for (; vma; vma = vma->vm_next) {
  2352. unsigned long hstart, hend;
  2353. cond_resched();
  2354. if (unlikely(khugepaged_test_exit(mm))) {
  2355. progress++;
  2356. break;
  2357. }
  2358. if (!hugepage_vma_check(vma)) {
  2359. skip:
  2360. progress++;
  2361. continue;
  2362. }
  2363. hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
  2364. hend = vma->vm_end & HPAGE_PMD_MASK;
  2365. if (hstart >= hend)
  2366. goto skip;
  2367. if (khugepaged_scan.address > hend)
  2368. goto skip;
  2369. if (khugepaged_scan.address < hstart)
  2370. khugepaged_scan.address = hstart;
  2371. VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
  2372. while (khugepaged_scan.address < hend) {
  2373. int ret;
  2374. cond_resched();
  2375. if (unlikely(khugepaged_test_exit(mm)))
  2376. goto breakouterloop;
  2377. VM_BUG_ON(khugepaged_scan.address < hstart ||
  2378. khugepaged_scan.address + HPAGE_PMD_SIZE >
  2379. hend);
  2380. ret = khugepaged_scan_pmd(mm, vma,
  2381. khugepaged_scan.address,
  2382. hpage);
  2383. /* move to next address */
  2384. khugepaged_scan.address += HPAGE_PMD_SIZE;
  2385. progress += HPAGE_PMD_NR;
  2386. if (ret)
  2387. /* we released mmap_sem so break loop */
  2388. goto breakouterloop_mmap_sem;
  2389. if (progress >= pages)
  2390. goto breakouterloop;
  2391. }
  2392. }
  2393. breakouterloop:
  2394. up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
  2395. breakouterloop_mmap_sem:
  2396. spin_lock(&khugepaged_mm_lock);
  2397. VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
  2398. /*
  2399. * Release the current mm_slot if this mm is about to die, or
  2400. * if we scanned all vmas of this mm.
  2401. */
  2402. if (khugepaged_test_exit(mm) || !vma) {
  2403. /*
  2404. * Make sure that if mm_users is reaching zero while
  2405. * khugepaged runs here, khugepaged_exit will find
  2406. * mm_slot not pointing to the exiting mm.
  2407. */
  2408. if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
  2409. khugepaged_scan.mm_slot = list_entry(
  2410. mm_slot->mm_node.next,
  2411. struct mm_slot, mm_node);
  2412. khugepaged_scan.address = 0;
  2413. } else {
  2414. khugepaged_scan.mm_slot = NULL;
  2415. khugepaged_full_scans++;
  2416. }
  2417. collect_mm_slot(mm_slot);
  2418. }
  2419. return progress;
  2420. }
  2421. static int khugepaged_has_work(void)
  2422. {
  2423. return !list_empty(&khugepaged_scan.mm_head) &&
  2424. khugepaged_enabled();
  2425. }
  2426. static int khugepaged_wait_event(void)
  2427. {
  2428. return !list_empty(&khugepaged_scan.mm_head) ||
  2429. kthread_should_stop();
  2430. }
  2431. static void khugepaged_do_scan(void)
  2432. {
  2433. struct page *hpage = NULL;
  2434. unsigned int progress = 0, pass_through_head = 0;
  2435. unsigned int pages = khugepaged_pages_to_scan;
  2436. bool wait = true;
  2437. barrier(); /* write khugepaged_pages_to_scan to local stack */
  2438. while (progress < pages) {
  2439. if (!khugepaged_prealloc_page(&hpage, &wait))
  2440. break;
  2441. cond_resched();
  2442. if (unlikely(kthread_should_stop() || freezing(current)))
  2443. break;
  2444. spin_lock(&khugepaged_mm_lock);
  2445. if (!khugepaged_scan.mm_slot)
  2446. pass_through_head++;
  2447. if (khugepaged_has_work() &&
  2448. pass_through_head < 2)
  2449. progress += khugepaged_scan_mm_slot(pages - progress,
  2450. &hpage);
  2451. else
  2452. progress = pages;
  2453. spin_unlock(&khugepaged_mm_lock);
  2454. }
  2455. if (!IS_ERR_OR_NULL(hpage))
  2456. put_page(hpage);
  2457. }
  2458. static void khugepaged_wait_work(void)
  2459. {
  2460. try_to_freeze();
  2461. if (khugepaged_has_work()) {
  2462. if (!khugepaged_scan_sleep_millisecs)
  2463. return;
  2464. wait_event_freezable_timeout(khugepaged_wait,
  2465. kthread_should_stop(),
  2466. msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
  2467. return;
  2468. }
  2469. if (khugepaged_enabled())
  2470. wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
  2471. }
  2472. static int khugepaged(void *none)
  2473. {
  2474. struct mm_slot *mm_slot;
  2475. set_freezable();
  2476. set_user_nice(current, MAX_NICE);
  2477. while (!kthread_should_stop()) {
  2478. khugepaged_do_scan();
  2479. khugepaged_wait_work();
  2480. }
  2481. spin_lock(&khugepaged_mm_lock);
  2482. mm_slot = khugepaged_scan.mm_slot;
  2483. khugepaged_scan.mm_slot = NULL;
  2484. if (mm_slot)
  2485. collect_mm_slot(mm_slot);
  2486. spin_unlock(&khugepaged_mm_lock);
  2487. return 0;
  2488. }
  2489. static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
  2490. unsigned long haddr, pmd_t *pmd)
  2491. {
  2492. struct mm_struct *mm = vma->vm_mm;
  2493. pgtable_t pgtable;
  2494. pmd_t _pmd;
  2495. int i;
  2496. pmdp_clear_flush_notify(vma, haddr, pmd);
  2497. /* leave pmd empty until pte is filled */
  2498. pgtable = pgtable_trans_huge_withdraw(mm, pmd);
  2499. pmd_populate(mm, &_pmd, pgtable);
  2500. for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
  2501. pte_t *pte, entry;
  2502. entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
  2503. entry = pte_mkspecial(entry);
  2504. pte = pte_offset_map(&_pmd, haddr);
  2505. VM_BUG_ON(!pte_none(*pte));
  2506. set_pte_at(mm, haddr, pte, entry);
  2507. pte_unmap(pte);
  2508. }
  2509. smp_wmb(); /* make pte visible before pmd */
  2510. pmd_populate(mm, pmd, pgtable);
  2511. put_huge_zero_page();
  2512. }
  2513. void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
  2514. pmd_t *pmd)
  2515. {
  2516. spinlock_t *ptl;
  2517. struct page *page;
  2518. struct mm_struct *mm = vma->vm_mm;
  2519. unsigned long haddr = address & HPAGE_PMD_MASK;
  2520. unsigned long mmun_start; /* For mmu_notifiers */
  2521. unsigned long mmun_end; /* For mmu_notifiers */
  2522. BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
  2523. mmun_start = haddr;
  2524. mmun_end = haddr + HPAGE_PMD_SIZE;
  2525. again:
  2526. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  2527. ptl = pmd_lock(mm, pmd);
  2528. if (unlikely(!pmd_trans_huge(*pmd))) {
  2529. spin_unlock(ptl);
  2530. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2531. return;
  2532. }
  2533. if (is_huge_zero_pmd(*pmd)) {
  2534. __split_huge_zero_page_pmd(vma, haddr, pmd);
  2535. spin_unlock(ptl);
  2536. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2537. return;
  2538. }
  2539. page = pmd_page(*pmd);
  2540. VM_BUG_ON_PAGE(!page_count(page), page);
  2541. get_page(page);
  2542. spin_unlock(ptl);
  2543. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  2544. split_huge_page(page);
  2545. put_page(page);
  2546. /*
  2547. * We don't always have down_write of mmap_sem here: a racing
  2548. * do_huge_pmd_wp_page() might have copied-on-write to another
  2549. * huge page before our split_huge_page() got the anon_vma lock.
  2550. */
  2551. if (unlikely(pmd_trans_huge(*pmd)))
  2552. goto again;
  2553. }
  2554. void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
  2555. pmd_t *pmd)
  2556. {
  2557. struct vm_area_struct *vma;
  2558. vma = find_vma(mm, address);
  2559. BUG_ON(vma == NULL);
  2560. split_huge_page_pmd(vma, address, pmd);
  2561. }
  2562. static void split_huge_page_address(struct mm_struct *mm,
  2563. unsigned long address)
  2564. {
  2565. pgd_t *pgd;
  2566. pud_t *pud;
  2567. pmd_t *pmd;
  2568. VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
  2569. pgd = pgd_offset(mm, address);
  2570. if (!pgd_present(*pgd))
  2571. return;
  2572. pud = pud_offset(pgd, address);
  2573. if (!pud_present(*pud))
  2574. return;
  2575. pmd = pmd_offset(pud, address);
  2576. if (!pmd_present(*pmd))
  2577. return;
  2578. /*
  2579. * Caller holds the mmap_sem write mode, so a huge pmd cannot
  2580. * materialize from under us.
  2581. */
  2582. split_huge_page_pmd_mm(mm, address, pmd);
  2583. }
  2584. void __vma_adjust_trans_huge(struct vm_area_struct *vma,
  2585. unsigned long start,
  2586. unsigned long end,
  2587. long adjust_next)
  2588. {
  2589. /*
  2590. * If the new start address isn't hpage aligned and it could
  2591. * previously contain an hugepage: check if we need to split
  2592. * an huge pmd.
  2593. */
  2594. if (start & ~HPAGE_PMD_MASK &&
  2595. (start & HPAGE_PMD_MASK) >= vma->vm_start &&
  2596. (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2597. split_huge_page_address(vma->vm_mm, start);
  2598. /*
  2599. * If the new end address isn't hpage aligned and it could
  2600. * previously contain an hugepage: check if we need to split
  2601. * an huge pmd.
  2602. */
  2603. if (end & ~HPAGE_PMD_MASK &&
  2604. (end & HPAGE_PMD_MASK) >= vma->vm_start &&
  2605. (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
  2606. split_huge_page_address(vma->vm_mm, end);
  2607. /*
  2608. * If we're also updating the vma->vm_next->vm_start, if the new
  2609. * vm_next->vm_start isn't page aligned and it could previously
  2610. * contain an hugepage: check if we need to split an huge pmd.
  2611. */
  2612. if (adjust_next > 0) {
  2613. struct vm_area_struct *next = vma->vm_next;
  2614. unsigned long nstart = next->vm_start;
  2615. nstart += adjust_next << PAGE_SHIFT;
  2616. if (nstart & ~HPAGE_PMD_MASK &&
  2617. (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
  2618. (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
  2619. split_huge_page_address(next->vm_mm, nstart);
  2620. }
  2621. }