hugetlbpage.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044
  1. /*
  2. * PPC Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2003 David Gibson, IBM Corporation.
  5. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
  6. *
  7. * Based on the IA-32 version:
  8. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/io.h>
  12. #include <linux/slab.h>
  13. #include <linux/hugetlb.h>
  14. #include <linux/export.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/memblock.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/moduleparam.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/setup.h>
  23. #include <asm/hugetlb.h>
  24. #ifdef CONFIG_HUGETLB_PAGE
  25. #define PAGE_SHIFT_64K 16
  26. #define PAGE_SHIFT_16M 24
  27. #define PAGE_SHIFT_16G 34
  28. unsigned int HPAGE_SHIFT;
  29. /*
  30. * Tracks gpages after the device tree is scanned and before the
  31. * huge_boot_pages list is ready. On non-Freescale implementations, this is
  32. * just used to track 16G pages and so is a single array. FSL-based
  33. * implementations may have more than one gpage size, so we need multiple
  34. * arrays
  35. */
  36. #ifdef CONFIG_PPC_FSL_BOOK3E
  37. #define MAX_NUMBER_GPAGES 128
  38. struct psize_gpages {
  39. u64 gpage_list[MAX_NUMBER_GPAGES];
  40. unsigned int nr_gpages;
  41. };
  42. static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
  43. #else
  44. #define MAX_NUMBER_GPAGES 1024
  45. static u64 gpage_freearray[MAX_NUMBER_GPAGES];
  46. static unsigned nr_gpages;
  47. #endif
  48. #define hugepd_none(hpd) ((hpd).pd == 0)
  49. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  50. {
  51. /* Only called for hugetlbfs pages, hence can ignore THP */
  52. return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
  53. }
  54. static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
  55. unsigned long address, unsigned pdshift, unsigned pshift)
  56. {
  57. struct kmem_cache *cachep;
  58. pte_t *new;
  59. #ifdef CONFIG_PPC_FSL_BOOK3E
  60. int i;
  61. int num_hugepd = 1 << (pshift - pdshift);
  62. cachep = hugepte_cache;
  63. #else
  64. cachep = PGT_CACHE(pdshift - pshift);
  65. #endif
  66. new = kmem_cache_zalloc(cachep, GFP_KERNEL|__GFP_REPEAT);
  67. BUG_ON(pshift > HUGEPD_SHIFT_MASK);
  68. BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
  69. if (! new)
  70. return -ENOMEM;
  71. spin_lock(&mm->page_table_lock);
  72. #ifdef CONFIG_PPC_FSL_BOOK3E
  73. /*
  74. * We have multiple higher-level entries that point to the same
  75. * actual pte location. Fill in each as we go and backtrack on error.
  76. * We need all of these so the DTLB pgtable walk code can find the
  77. * right higher-level entry without knowing if it's a hugepage or not.
  78. */
  79. for (i = 0; i < num_hugepd; i++, hpdp++) {
  80. if (unlikely(!hugepd_none(*hpdp)))
  81. break;
  82. else
  83. /* We use the old format for PPC_FSL_BOOK3E */
  84. hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
  85. }
  86. /* If we bailed from the for loop early, an error occurred, clean up */
  87. if (i < num_hugepd) {
  88. for (i = i - 1 ; i >= 0; i--, hpdp--)
  89. hpdp->pd = 0;
  90. kmem_cache_free(cachep, new);
  91. }
  92. #else
  93. if (!hugepd_none(*hpdp))
  94. kmem_cache_free(cachep, new);
  95. else {
  96. #ifdef CONFIG_PPC_BOOK3S_64
  97. hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2);
  98. #else
  99. hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
  100. #endif
  101. }
  102. #endif
  103. spin_unlock(&mm->page_table_lock);
  104. return 0;
  105. }
  106. /*
  107. * These macros define how to determine which level of the page table holds
  108. * the hpdp.
  109. */
  110. #ifdef CONFIG_PPC_FSL_BOOK3E
  111. #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
  112. #define HUGEPD_PUD_SHIFT PUD_SHIFT
  113. #else
  114. #define HUGEPD_PGD_SHIFT PUD_SHIFT
  115. #define HUGEPD_PUD_SHIFT PMD_SHIFT
  116. #endif
  117. #ifdef CONFIG_PPC_BOOK3S_64
  118. /*
  119. * At this point we do the placement change only for BOOK3S 64. This would
  120. * possibly work on other subarchs.
  121. */
  122. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  123. {
  124. pgd_t *pg;
  125. pud_t *pu;
  126. pmd_t *pm;
  127. hugepd_t *hpdp = NULL;
  128. unsigned pshift = __ffs(sz);
  129. unsigned pdshift = PGDIR_SHIFT;
  130. addr &= ~(sz-1);
  131. pg = pgd_offset(mm, addr);
  132. if (pshift == PGDIR_SHIFT)
  133. /* 16GB huge page */
  134. return (pte_t *) pg;
  135. else if (pshift > PUD_SHIFT)
  136. /*
  137. * We need to use hugepd table
  138. */
  139. hpdp = (hugepd_t *)pg;
  140. else {
  141. pdshift = PUD_SHIFT;
  142. pu = pud_alloc(mm, pg, addr);
  143. if (pshift == PUD_SHIFT)
  144. return (pte_t *)pu;
  145. else if (pshift > PMD_SHIFT)
  146. hpdp = (hugepd_t *)pu;
  147. else {
  148. pdshift = PMD_SHIFT;
  149. pm = pmd_alloc(mm, pu, addr);
  150. if (pshift == PMD_SHIFT)
  151. /* 16MB hugepage */
  152. return (pte_t *)pm;
  153. else
  154. hpdp = (hugepd_t *)pm;
  155. }
  156. }
  157. if (!hpdp)
  158. return NULL;
  159. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  160. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
  161. return NULL;
  162. return hugepte_offset(*hpdp, addr, pdshift);
  163. }
  164. #else
  165. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  166. {
  167. pgd_t *pg;
  168. pud_t *pu;
  169. pmd_t *pm;
  170. hugepd_t *hpdp = NULL;
  171. unsigned pshift = __ffs(sz);
  172. unsigned pdshift = PGDIR_SHIFT;
  173. addr &= ~(sz-1);
  174. pg = pgd_offset(mm, addr);
  175. if (pshift >= HUGEPD_PGD_SHIFT) {
  176. hpdp = (hugepd_t *)pg;
  177. } else {
  178. pdshift = PUD_SHIFT;
  179. pu = pud_alloc(mm, pg, addr);
  180. if (pshift >= HUGEPD_PUD_SHIFT) {
  181. hpdp = (hugepd_t *)pu;
  182. } else {
  183. pdshift = PMD_SHIFT;
  184. pm = pmd_alloc(mm, pu, addr);
  185. hpdp = (hugepd_t *)pm;
  186. }
  187. }
  188. if (!hpdp)
  189. return NULL;
  190. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  191. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
  192. return NULL;
  193. return hugepte_offset(*hpdp, addr, pdshift);
  194. }
  195. #endif
  196. #ifdef CONFIG_PPC_FSL_BOOK3E
  197. /* Build list of addresses of gigantic pages. This function is used in early
  198. * boot before the buddy allocator is setup.
  199. */
  200. void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  201. {
  202. unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
  203. int i;
  204. if (addr == 0)
  205. return;
  206. gpage_freearray[idx].nr_gpages = number_of_pages;
  207. for (i = 0; i < number_of_pages; i++) {
  208. gpage_freearray[idx].gpage_list[i] = addr;
  209. addr += page_size;
  210. }
  211. }
  212. /*
  213. * Moves the gigantic page addresses from the temporary list to the
  214. * huge_boot_pages list.
  215. */
  216. int alloc_bootmem_huge_page(struct hstate *hstate)
  217. {
  218. struct huge_bootmem_page *m;
  219. int idx = shift_to_mmu_psize(huge_page_shift(hstate));
  220. int nr_gpages = gpage_freearray[idx].nr_gpages;
  221. if (nr_gpages == 0)
  222. return 0;
  223. #ifdef CONFIG_HIGHMEM
  224. /*
  225. * If gpages can be in highmem we can't use the trick of storing the
  226. * data structure in the page; allocate space for this
  227. */
  228. m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
  229. m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
  230. #else
  231. m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
  232. #endif
  233. list_add(&m->list, &huge_boot_pages);
  234. gpage_freearray[idx].nr_gpages = nr_gpages;
  235. gpage_freearray[idx].gpage_list[nr_gpages] = 0;
  236. m->hstate = hstate;
  237. return 1;
  238. }
  239. /*
  240. * Scan the command line hugepagesz= options for gigantic pages; store those in
  241. * a list that we use to allocate the memory once all options are parsed.
  242. */
  243. unsigned long gpage_npages[MMU_PAGE_COUNT];
  244. static int __init do_gpage_early_setup(char *param, char *val,
  245. const char *unused, void *arg)
  246. {
  247. static phys_addr_t size;
  248. unsigned long npages;
  249. /*
  250. * The hugepagesz and hugepages cmdline options are interleaved. We
  251. * use the size variable to keep track of whether or not this was done
  252. * properly and skip over instances where it is incorrect. Other
  253. * command-line parsing code will issue warnings, so we don't need to.
  254. *
  255. */
  256. if ((strcmp(param, "default_hugepagesz") == 0) ||
  257. (strcmp(param, "hugepagesz") == 0)) {
  258. size = memparse(val, NULL);
  259. } else if (strcmp(param, "hugepages") == 0) {
  260. if (size != 0) {
  261. if (sscanf(val, "%lu", &npages) <= 0)
  262. npages = 0;
  263. if (npages > MAX_NUMBER_GPAGES) {
  264. pr_warn("MMU: %lu pages requested for page "
  265. "size %llu KB, limiting to "
  266. __stringify(MAX_NUMBER_GPAGES) "\n",
  267. npages, size / 1024);
  268. npages = MAX_NUMBER_GPAGES;
  269. }
  270. gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
  271. size = 0;
  272. }
  273. }
  274. return 0;
  275. }
  276. /*
  277. * This function allocates physical space for pages that are larger than the
  278. * buddy allocator can handle. We want to allocate these in highmem because
  279. * the amount of lowmem is limited. This means that this function MUST be
  280. * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
  281. * allocate to grab highmem.
  282. */
  283. void __init reserve_hugetlb_gpages(void)
  284. {
  285. static __initdata char cmdline[COMMAND_LINE_SIZE];
  286. phys_addr_t size, base;
  287. int i;
  288. strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
  289. parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
  290. NULL, &do_gpage_early_setup);
  291. /*
  292. * Walk gpage list in reverse, allocating larger page sizes first.
  293. * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
  294. * When we reach the point in the list where pages are no longer
  295. * considered gpages, we're done.
  296. */
  297. for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
  298. if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
  299. continue;
  300. else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
  301. break;
  302. size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
  303. base = memblock_alloc_base(size * gpage_npages[i], size,
  304. MEMBLOCK_ALLOC_ANYWHERE);
  305. add_gpage(base, size, gpage_npages[i]);
  306. }
  307. }
  308. #else /* !PPC_FSL_BOOK3E */
  309. /* Build list of addresses of gigantic pages. This function is used in early
  310. * boot before the buddy allocator is setup.
  311. */
  312. void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  313. {
  314. if (!addr)
  315. return;
  316. while (number_of_pages > 0) {
  317. gpage_freearray[nr_gpages] = addr;
  318. nr_gpages++;
  319. number_of_pages--;
  320. addr += page_size;
  321. }
  322. }
  323. /* Moves the gigantic page addresses from the temporary list to the
  324. * huge_boot_pages list.
  325. */
  326. int alloc_bootmem_huge_page(struct hstate *hstate)
  327. {
  328. struct huge_bootmem_page *m;
  329. if (nr_gpages == 0)
  330. return 0;
  331. m = phys_to_virt(gpage_freearray[--nr_gpages]);
  332. gpage_freearray[nr_gpages] = 0;
  333. list_add(&m->list, &huge_boot_pages);
  334. m->hstate = hstate;
  335. return 1;
  336. }
  337. #endif
  338. #ifdef CONFIG_PPC_FSL_BOOK3E
  339. #define HUGEPD_FREELIST_SIZE \
  340. ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
  341. struct hugepd_freelist {
  342. struct rcu_head rcu;
  343. unsigned int index;
  344. void *ptes[0];
  345. };
  346. static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
  347. static void hugepd_free_rcu_callback(struct rcu_head *head)
  348. {
  349. struct hugepd_freelist *batch =
  350. container_of(head, struct hugepd_freelist, rcu);
  351. unsigned int i;
  352. for (i = 0; i < batch->index; i++)
  353. kmem_cache_free(hugepte_cache, batch->ptes[i]);
  354. free_page((unsigned long)batch);
  355. }
  356. static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
  357. {
  358. struct hugepd_freelist **batchp;
  359. batchp = &get_cpu_var(hugepd_freelist_cur);
  360. if (atomic_read(&tlb->mm->mm_users) < 2 ||
  361. cpumask_equal(mm_cpumask(tlb->mm),
  362. cpumask_of(smp_processor_id()))) {
  363. kmem_cache_free(hugepte_cache, hugepte);
  364. put_cpu_var(hugepd_freelist_cur);
  365. return;
  366. }
  367. if (*batchp == NULL) {
  368. *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
  369. (*batchp)->index = 0;
  370. }
  371. (*batchp)->ptes[(*batchp)->index++] = hugepte;
  372. if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
  373. call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
  374. *batchp = NULL;
  375. }
  376. put_cpu_var(hugepd_freelist_cur);
  377. }
  378. #endif
  379. static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
  380. unsigned long start, unsigned long end,
  381. unsigned long floor, unsigned long ceiling)
  382. {
  383. pte_t *hugepte = hugepd_page(*hpdp);
  384. int i;
  385. unsigned long pdmask = ~((1UL << pdshift) - 1);
  386. unsigned int num_hugepd = 1;
  387. #ifdef CONFIG_PPC_FSL_BOOK3E
  388. /* Note: On fsl the hpdp may be the first of several */
  389. num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
  390. #else
  391. unsigned int shift = hugepd_shift(*hpdp);
  392. #endif
  393. start &= pdmask;
  394. if (start < floor)
  395. return;
  396. if (ceiling) {
  397. ceiling &= pdmask;
  398. if (! ceiling)
  399. return;
  400. }
  401. if (end - 1 > ceiling - 1)
  402. return;
  403. for (i = 0; i < num_hugepd; i++, hpdp++)
  404. hpdp->pd = 0;
  405. #ifdef CONFIG_PPC_FSL_BOOK3E
  406. hugepd_free(tlb, hugepte);
  407. #else
  408. pgtable_free_tlb(tlb, hugepte, pdshift - shift);
  409. #endif
  410. }
  411. static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  412. unsigned long addr, unsigned long end,
  413. unsigned long floor, unsigned long ceiling)
  414. {
  415. pmd_t *pmd;
  416. unsigned long next;
  417. unsigned long start;
  418. start = addr;
  419. do {
  420. pmd = pmd_offset(pud, addr);
  421. next = pmd_addr_end(addr, end);
  422. if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
  423. /*
  424. * if it is not hugepd pointer, we should already find
  425. * it cleared.
  426. */
  427. WARN_ON(!pmd_none_or_clear_bad(pmd));
  428. continue;
  429. }
  430. #ifdef CONFIG_PPC_FSL_BOOK3E
  431. /*
  432. * Increment next by the size of the huge mapping since
  433. * there may be more than one entry at this level for a
  434. * single hugepage, but all of them point to
  435. * the same kmem cache that holds the hugepte.
  436. */
  437. next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
  438. #endif
  439. free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
  440. addr, next, floor, ceiling);
  441. } while (addr = next, addr != end);
  442. start &= PUD_MASK;
  443. if (start < floor)
  444. return;
  445. if (ceiling) {
  446. ceiling &= PUD_MASK;
  447. if (!ceiling)
  448. return;
  449. }
  450. if (end - 1 > ceiling - 1)
  451. return;
  452. pmd = pmd_offset(pud, start);
  453. pud_clear(pud);
  454. pmd_free_tlb(tlb, pmd, start);
  455. mm_dec_nr_pmds(tlb->mm);
  456. }
  457. static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  458. unsigned long addr, unsigned long end,
  459. unsigned long floor, unsigned long ceiling)
  460. {
  461. pud_t *pud;
  462. unsigned long next;
  463. unsigned long start;
  464. start = addr;
  465. do {
  466. pud = pud_offset(pgd, addr);
  467. next = pud_addr_end(addr, end);
  468. if (!is_hugepd(__hugepd(pud_val(*pud)))) {
  469. if (pud_none_or_clear_bad(pud))
  470. continue;
  471. hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
  472. ceiling);
  473. } else {
  474. #ifdef CONFIG_PPC_FSL_BOOK3E
  475. /*
  476. * Increment next by the size of the huge mapping since
  477. * there may be more than one entry at this level for a
  478. * single hugepage, but all of them point to
  479. * the same kmem cache that holds the hugepte.
  480. */
  481. next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
  482. #endif
  483. free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
  484. addr, next, floor, ceiling);
  485. }
  486. } while (addr = next, addr != end);
  487. start &= PGDIR_MASK;
  488. if (start < floor)
  489. return;
  490. if (ceiling) {
  491. ceiling &= PGDIR_MASK;
  492. if (!ceiling)
  493. return;
  494. }
  495. if (end - 1 > ceiling - 1)
  496. return;
  497. pud = pud_offset(pgd, start);
  498. pgd_clear(pgd);
  499. pud_free_tlb(tlb, pud, start);
  500. }
  501. /*
  502. * This function frees user-level page tables of a process.
  503. */
  504. void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  505. unsigned long addr, unsigned long end,
  506. unsigned long floor, unsigned long ceiling)
  507. {
  508. pgd_t *pgd;
  509. unsigned long next;
  510. /*
  511. * Because there are a number of different possible pagetable
  512. * layouts for hugepage ranges, we limit knowledge of how
  513. * things should be laid out to the allocation path
  514. * (huge_pte_alloc(), above). Everything else works out the
  515. * structure as it goes from information in the hugepd
  516. * pointers. That means that we can't here use the
  517. * optimization used in the normal page free_pgd_range(), of
  518. * checking whether we're actually covering a large enough
  519. * range to have to do anything at the top level of the walk
  520. * instead of at the bottom.
  521. *
  522. * To make sense of this, you should probably go read the big
  523. * block comment at the top of the normal free_pgd_range(),
  524. * too.
  525. */
  526. do {
  527. next = pgd_addr_end(addr, end);
  528. pgd = pgd_offset(tlb->mm, addr);
  529. if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
  530. if (pgd_none_or_clear_bad(pgd))
  531. continue;
  532. hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
  533. } else {
  534. #ifdef CONFIG_PPC_FSL_BOOK3E
  535. /*
  536. * Increment next by the size of the huge mapping since
  537. * there may be more than one entry at the pgd level
  538. * for a single hugepage, but all of them point to the
  539. * same kmem cache that holds the hugepte.
  540. */
  541. next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
  542. #endif
  543. free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
  544. addr, next, floor, ceiling);
  545. }
  546. } while (addr = next, addr != end);
  547. }
  548. /*
  549. * We are holding mmap_sem, so a parallel huge page collapse cannot run.
  550. * To prevent hugepage split, disable irq.
  551. */
  552. struct page *
  553. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  554. {
  555. bool is_thp;
  556. pte_t *ptep, pte;
  557. unsigned shift;
  558. unsigned long mask, flags;
  559. struct page *page = ERR_PTR(-EINVAL);
  560. local_irq_save(flags);
  561. ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
  562. if (!ptep)
  563. goto no_page;
  564. pte = READ_ONCE(*ptep);
  565. /*
  566. * Verify it is a huge page else bail.
  567. * Transparent hugepages are handled by generic code. We can skip them
  568. * here.
  569. */
  570. if (!shift || is_thp)
  571. goto no_page;
  572. if (!pte_present(pte)) {
  573. page = NULL;
  574. goto no_page;
  575. }
  576. mask = (1UL << shift) - 1;
  577. page = pte_page(pte);
  578. if (page)
  579. page += (address & mask) / PAGE_SIZE;
  580. no_page:
  581. local_irq_restore(flags);
  582. return page;
  583. }
  584. struct page *
  585. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  586. pmd_t *pmd, int write)
  587. {
  588. BUG();
  589. return NULL;
  590. }
  591. struct page *
  592. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  593. pud_t *pud, int write)
  594. {
  595. BUG();
  596. return NULL;
  597. }
  598. static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
  599. unsigned long sz)
  600. {
  601. unsigned long __boundary = (addr + sz) & ~(sz-1);
  602. return (__boundary - 1 < end - 1) ? __boundary : end;
  603. }
  604. int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
  605. unsigned long end, int write, struct page **pages, int *nr)
  606. {
  607. pte_t *ptep;
  608. unsigned long sz = 1UL << hugepd_shift(hugepd);
  609. unsigned long next;
  610. ptep = hugepte_offset(hugepd, addr, pdshift);
  611. do {
  612. next = hugepte_addr_end(addr, end, sz);
  613. if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
  614. return 0;
  615. } while (ptep++, addr = next, addr != end);
  616. return 1;
  617. }
  618. #ifdef CONFIG_PPC_MM_SLICES
  619. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  620. unsigned long len, unsigned long pgoff,
  621. unsigned long flags)
  622. {
  623. struct hstate *hstate = hstate_file(file);
  624. int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
  625. return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
  626. }
  627. #endif
  628. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  629. {
  630. #ifdef CONFIG_PPC_MM_SLICES
  631. unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
  632. return 1UL << mmu_psize_to_shift(psize);
  633. #else
  634. if (!is_vm_hugetlb_page(vma))
  635. return PAGE_SIZE;
  636. return huge_page_size(hstate_vma(vma));
  637. #endif
  638. }
  639. static inline bool is_power_of_4(unsigned long x)
  640. {
  641. if (is_power_of_2(x))
  642. return (__ilog2(x) % 2) ? false : true;
  643. return false;
  644. }
  645. static int __init add_huge_page_size(unsigned long long size)
  646. {
  647. int shift = __ffs(size);
  648. int mmu_psize;
  649. /* Check that it is a page size supported by the hardware and
  650. * that it fits within pagetable and slice limits. */
  651. #ifdef CONFIG_PPC_FSL_BOOK3E
  652. if ((size < PAGE_SIZE) || !is_power_of_4(size))
  653. return -EINVAL;
  654. #else
  655. if (!is_power_of_2(size)
  656. || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
  657. return -EINVAL;
  658. #endif
  659. if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
  660. return -EINVAL;
  661. BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
  662. /* Return if huge page size has already been setup */
  663. if (size_to_hstate(size))
  664. return 0;
  665. hugetlb_add_hstate(shift - PAGE_SHIFT);
  666. return 0;
  667. }
  668. static int __init hugepage_setup_sz(char *str)
  669. {
  670. unsigned long long size;
  671. size = memparse(str, &str);
  672. if (add_huge_page_size(size) != 0) {
  673. hugetlb_bad_size();
  674. pr_err("Invalid huge page size specified(%llu)\n", size);
  675. }
  676. return 1;
  677. }
  678. __setup("hugepagesz=", hugepage_setup_sz);
  679. #ifdef CONFIG_PPC_FSL_BOOK3E
  680. struct kmem_cache *hugepte_cache;
  681. static int __init hugetlbpage_init(void)
  682. {
  683. int psize;
  684. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  685. unsigned shift;
  686. if (!mmu_psize_defs[psize].shift)
  687. continue;
  688. shift = mmu_psize_to_shift(psize);
  689. /* Don't treat normal page sizes as huge... */
  690. if (shift != PAGE_SHIFT)
  691. if (add_huge_page_size(1ULL << shift) < 0)
  692. continue;
  693. }
  694. /*
  695. * Create a kmem cache for hugeptes. The bottom bits in the pte have
  696. * size information encoded in them, so align them to allow this
  697. */
  698. hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
  699. HUGEPD_SHIFT_MASK + 1, 0, NULL);
  700. if (hugepte_cache == NULL)
  701. panic("%s: Unable to create kmem cache for hugeptes\n",
  702. __func__);
  703. /* Default hpage size = 4M */
  704. if (mmu_psize_defs[MMU_PAGE_4M].shift)
  705. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
  706. else
  707. panic("%s: Unable to set default huge page size\n", __func__);
  708. return 0;
  709. }
  710. #else
  711. static int __init hugetlbpage_init(void)
  712. {
  713. int psize;
  714. if (!mmu_has_feature(MMU_FTR_16M_PAGE))
  715. return -ENODEV;
  716. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  717. unsigned shift;
  718. unsigned pdshift;
  719. if (!mmu_psize_defs[psize].shift)
  720. continue;
  721. shift = mmu_psize_to_shift(psize);
  722. if (add_huge_page_size(1ULL << shift) < 0)
  723. continue;
  724. if (shift < PMD_SHIFT)
  725. pdshift = PMD_SHIFT;
  726. else if (shift < PUD_SHIFT)
  727. pdshift = PUD_SHIFT;
  728. else
  729. pdshift = PGDIR_SHIFT;
  730. /*
  731. * if we have pdshift and shift value same, we don't
  732. * use pgt cache for hugepd.
  733. */
  734. if (pdshift != shift) {
  735. pgtable_cache_add(pdshift - shift, NULL);
  736. if (!PGT_CACHE(pdshift - shift))
  737. panic("hugetlbpage_init(): could not create "
  738. "pgtable cache for %d bit pagesize\n", shift);
  739. }
  740. }
  741. /* Set default large page size. Currently, we pick 16M or 1M
  742. * depending on what is available
  743. */
  744. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  745. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
  746. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  747. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
  748. return 0;
  749. }
  750. #endif
  751. arch_initcall(hugetlbpage_init);
  752. void flush_dcache_icache_hugepage(struct page *page)
  753. {
  754. int i;
  755. void *start;
  756. BUG_ON(!PageCompound(page));
  757. for (i = 0; i < (1UL << compound_order(page)); i++) {
  758. if (!PageHighMem(page)) {
  759. __flush_dcache_icache(page_address(page+i));
  760. } else {
  761. start = kmap_atomic(page+i);
  762. __flush_dcache_icache(start);
  763. kunmap_atomic(start);
  764. }
  765. }
  766. }
  767. #endif /* CONFIG_HUGETLB_PAGE */
  768. /*
  769. * We have 4 cases for pgds and pmds:
  770. * (1) invalid (all zeroes)
  771. * (2) pointer to next table, as normal; bottom 6 bits == 0
  772. * (3) leaf pte for huge page _PAGE_PTE set
  773. * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
  774. *
  775. * So long as we atomically load page table pointers we are safe against teardown,
  776. * we can follow the address down to the the page and take a ref on it.
  777. * This function need to be called with interrupts disabled. We use this variant
  778. * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
  779. */
  780. pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
  781. bool *is_thp, unsigned *shift)
  782. {
  783. pgd_t pgd, *pgdp;
  784. pud_t pud, *pudp;
  785. pmd_t pmd, *pmdp;
  786. pte_t *ret_pte;
  787. hugepd_t *hpdp = NULL;
  788. unsigned pdshift = PGDIR_SHIFT;
  789. if (shift)
  790. *shift = 0;
  791. if (is_thp)
  792. *is_thp = false;
  793. pgdp = pgdir + pgd_index(ea);
  794. pgd = READ_ONCE(*pgdp);
  795. /*
  796. * Always operate on the local stack value. This make sure the
  797. * value don't get updated by a parallel THP split/collapse,
  798. * page fault or a page unmap. The return pte_t * is still not
  799. * stable. So should be checked there for above conditions.
  800. */
  801. if (pgd_none(pgd))
  802. return NULL;
  803. else if (pgd_huge(pgd)) {
  804. ret_pte = (pte_t *) pgdp;
  805. goto out;
  806. } else if (is_hugepd(__hugepd(pgd_val(pgd))))
  807. hpdp = (hugepd_t *)&pgd;
  808. else {
  809. /*
  810. * Even if we end up with an unmap, the pgtable will not
  811. * be freed, because we do an rcu free and here we are
  812. * irq disabled
  813. */
  814. pdshift = PUD_SHIFT;
  815. pudp = pud_offset(&pgd, ea);
  816. pud = READ_ONCE(*pudp);
  817. if (pud_none(pud))
  818. return NULL;
  819. else if (pud_huge(pud)) {
  820. ret_pte = (pte_t *) pudp;
  821. goto out;
  822. } else if (is_hugepd(__hugepd(pud_val(pud))))
  823. hpdp = (hugepd_t *)&pud;
  824. else {
  825. pdshift = PMD_SHIFT;
  826. pmdp = pmd_offset(&pud, ea);
  827. pmd = READ_ONCE(*pmdp);
  828. /*
  829. * A hugepage collapse is captured by pmd_none, because
  830. * it mark the pmd none and do a hpte invalidate.
  831. */
  832. if (pmd_none(pmd))
  833. return NULL;
  834. if (pmd_trans_huge(pmd)) {
  835. if (is_thp)
  836. *is_thp = true;
  837. ret_pte = (pte_t *) pmdp;
  838. goto out;
  839. }
  840. if (pmd_huge(pmd)) {
  841. ret_pte = (pte_t *) pmdp;
  842. goto out;
  843. } else if (is_hugepd(__hugepd(pmd_val(pmd))))
  844. hpdp = (hugepd_t *)&pmd;
  845. else
  846. return pte_offset_kernel(&pmd, ea);
  847. }
  848. }
  849. if (!hpdp)
  850. return NULL;
  851. ret_pte = hugepte_offset(*hpdp, ea, pdshift);
  852. pdshift = hugepd_shift(*hpdp);
  853. out:
  854. if (shift)
  855. *shift = pdshift;
  856. return ret_pte;
  857. }
  858. EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
  859. int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
  860. unsigned long end, int write, struct page **pages, int *nr)
  861. {
  862. unsigned long mask;
  863. unsigned long pte_end;
  864. struct page *head, *page;
  865. pte_t pte;
  866. int refs;
  867. pte_end = (addr + sz) & ~(sz-1);
  868. if (pte_end < end)
  869. end = pte_end;
  870. pte = READ_ONCE(*ptep);
  871. mask = _PAGE_PRESENT | _PAGE_USER;
  872. if (write)
  873. mask |= _PAGE_RW;
  874. if ((pte_val(pte) & mask) != mask)
  875. return 0;
  876. /* hugepages are never "special" */
  877. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  878. refs = 0;
  879. head = pte_page(pte);
  880. page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
  881. do {
  882. VM_BUG_ON(compound_head(page) != head);
  883. pages[*nr] = page;
  884. (*nr)++;
  885. page++;
  886. refs++;
  887. } while (addr += PAGE_SIZE, addr != end);
  888. if (!page_cache_add_speculative(head, refs)) {
  889. *nr -= refs;
  890. return 0;
  891. }
  892. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  893. /* Could be optimized better */
  894. *nr -= refs;
  895. while (refs--)
  896. put_page(head);
  897. return 0;
  898. }
  899. return 1;
  900. }