hugetlbpage.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050
  1. /*
  2. * PPC Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2003 David Gibson, IBM Corporation.
  5. * Copyright (C) 2011 Becky Bruce, Freescale Semiconductor
  6. *
  7. * Based on the IA-32 version:
  8. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  9. */
  10. #include <linux/mm.h>
  11. #include <linux/io.h>
  12. #include <linux/slab.h>
  13. #include <linux/hugetlb.h>
  14. #include <linux/export.h>
  15. #include <linux/of_fdt.h>
  16. #include <linux/memblock.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/moduleparam.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/pgalloc.h>
  21. #include <asm/tlb.h>
  22. #include <asm/setup.h>
  23. #include <asm/hugetlb.h>
  24. #ifdef CONFIG_HUGETLB_PAGE
  25. #define PAGE_SHIFT_64K 16
  26. #define PAGE_SHIFT_16M 24
  27. #define PAGE_SHIFT_16G 34
  28. unsigned int HPAGE_SHIFT;
  29. /*
  30. * Tracks gpages after the device tree is scanned and before the
  31. * huge_boot_pages list is ready. On non-Freescale implementations, this is
  32. * just used to track 16G pages and so is a single array. FSL-based
  33. * implementations may have more than one gpage size, so we need multiple
  34. * arrays
  35. */
  36. #ifdef CONFIG_PPC_FSL_BOOK3E
  37. #define MAX_NUMBER_GPAGES 128
  38. struct psize_gpages {
  39. u64 gpage_list[MAX_NUMBER_GPAGES];
  40. unsigned int nr_gpages;
  41. };
  42. static struct psize_gpages gpage_freearray[MMU_PAGE_COUNT];
  43. #else
  44. #define MAX_NUMBER_GPAGES 1024
  45. static u64 gpage_freearray[MAX_NUMBER_GPAGES];
  46. static unsigned nr_gpages;
  47. #endif
  48. #define hugepd_none(hpd) ((hpd).pd == 0)
  49. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  50. {
  51. /* Only called for hugetlbfs pages, hence can ignore THP */
  52. return __find_linux_pte_or_hugepte(mm->pgd, addr, NULL, NULL);
  53. }
  54. static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
  55. unsigned long address, unsigned pdshift, unsigned pshift)
  56. {
  57. struct kmem_cache *cachep;
  58. pte_t *new;
  59. #ifdef CONFIG_PPC_FSL_BOOK3E
  60. int i;
  61. int num_hugepd = 1 << (pshift - pdshift);
  62. cachep = hugepte_cache;
  63. #else
  64. cachep = PGT_CACHE(pdshift - pshift);
  65. #endif
  66. new = kmem_cache_zalloc(cachep, GFP_KERNEL);
  67. BUG_ON(pshift > HUGEPD_SHIFT_MASK);
  68. BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
  69. if (! new)
  70. return -ENOMEM;
  71. spin_lock(&mm->page_table_lock);
  72. #ifdef CONFIG_PPC_FSL_BOOK3E
  73. /*
  74. * We have multiple higher-level entries that point to the same
  75. * actual pte location. Fill in each as we go and backtrack on error.
  76. * We need all of these so the DTLB pgtable walk code can find the
  77. * right higher-level entry without knowing if it's a hugepage or not.
  78. */
  79. for (i = 0; i < num_hugepd; i++, hpdp++) {
  80. if (unlikely(!hugepd_none(*hpdp)))
  81. break;
  82. else
  83. /* We use the old format for PPC_FSL_BOOK3E */
  84. hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
  85. }
  86. /* If we bailed from the for loop early, an error occurred, clean up */
  87. if (i < num_hugepd) {
  88. for (i = i - 1 ; i >= 0; i--, hpdp--)
  89. hpdp->pd = 0;
  90. kmem_cache_free(cachep, new);
  91. }
  92. #else
  93. if (!hugepd_none(*hpdp))
  94. kmem_cache_free(cachep, new);
  95. else {
  96. #ifdef CONFIG_PPC_BOOK3S_64
  97. hpdp->pd = __pa(new) | (shift_to_mmu_psize(pshift) << 2);
  98. #else
  99. hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
  100. #endif
  101. }
  102. #endif
  103. spin_unlock(&mm->page_table_lock);
  104. return 0;
  105. }
  106. /*
  107. * These macros define how to determine which level of the page table holds
  108. * the hpdp.
  109. */
  110. #ifdef CONFIG_PPC_FSL_BOOK3E
  111. #define HUGEPD_PGD_SHIFT PGDIR_SHIFT
  112. #define HUGEPD_PUD_SHIFT PUD_SHIFT
  113. #else
  114. #define HUGEPD_PGD_SHIFT PUD_SHIFT
  115. #define HUGEPD_PUD_SHIFT PMD_SHIFT
  116. #endif
  117. #ifdef CONFIG_PPC_BOOK3S_64
  118. /*
  119. * At this point we do the placement change only for BOOK3S 64. This would
  120. * possibly work on other subarchs.
  121. */
  122. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  123. {
  124. pgd_t *pg;
  125. pud_t *pu;
  126. pmd_t *pm;
  127. hugepd_t *hpdp = NULL;
  128. unsigned pshift = __ffs(sz);
  129. unsigned pdshift = PGDIR_SHIFT;
  130. addr &= ~(sz-1);
  131. pg = pgd_offset(mm, addr);
  132. if (pshift == PGDIR_SHIFT)
  133. /* 16GB huge page */
  134. return (pte_t *) pg;
  135. else if (pshift > PUD_SHIFT)
  136. /*
  137. * We need to use hugepd table
  138. */
  139. hpdp = (hugepd_t *)pg;
  140. else {
  141. pdshift = PUD_SHIFT;
  142. pu = pud_alloc(mm, pg, addr);
  143. if (pshift == PUD_SHIFT)
  144. return (pte_t *)pu;
  145. else if (pshift > PMD_SHIFT)
  146. hpdp = (hugepd_t *)pu;
  147. else {
  148. pdshift = PMD_SHIFT;
  149. pm = pmd_alloc(mm, pu, addr);
  150. if (pshift == PMD_SHIFT)
  151. /* 16MB hugepage */
  152. return (pte_t *)pm;
  153. else
  154. hpdp = (hugepd_t *)pm;
  155. }
  156. }
  157. if (!hpdp)
  158. return NULL;
  159. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  160. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
  161. return NULL;
  162. return hugepte_offset(*hpdp, addr, pdshift);
  163. }
  164. #else
  165. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
  166. {
  167. pgd_t *pg;
  168. pud_t *pu;
  169. pmd_t *pm;
  170. hugepd_t *hpdp = NULL;
  171. unsigned pshift = __ffs(sz);
  172. unsigned pdshift = PGDIR_SHIFT;
  173. addr &= ~(sz-1);
  174. pg = pgd_offset(mm, addr);
  175. if (pshift >= HUGEPD_PGD_SHIFT) {
  176. hpdp = (hugepd_t *)pg;
  177. } else {
  178. pdshift = PUD_SHIFT;
  179. pu = pud_alloc(mm, pg, addr);
  180. if (pshift >= HUGEPD_PUD_SHIFT) {
  181. hpdp = (hugepd_t *)pu;
  182. } else {
  183. pdshift = PMD_SHIFT;
  184. pm = pmd_alloc(mm, pu, addr);
  185. hpdp = (hugepd_t *)pm;
  186. }
  187. }
  188. if (!hpdp)
  189. return NULL;
  190. BUG_ON(!hugepd_none(*hpdp) && !hugepd_ok(*hpdp));
  191. if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
  192. return NULL;
  193. return hugepte_offset(*hpdp, addr, pdshift);
  194. }
  195. #endif
  196. #ifdef CONFIG_PPC_FSL_BOOK3E
  197. /* Build list of addresses of gigantic pages. This function is used in early
  198. * boot before the buddy allocator is setup.
  199. */
  200. void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  201. {
  202. unsigned int idx = shift_to_mmu_psize(__ffs(page_size));
  203. int i;
  204. if (addr == 0)
  205. return;
  206. gpage_freearray[idx].nr_gpages = number_of_pages;
  207. for (i = 0; i < number_of_pages; i++) {
  208. gpage_freearray[idx].gpage_list[i] = addr;
  209. addr += page_size;
  210. }
  211. }
  212. /*
  213. * Moves the gigantic page addresses from the temporary list to the
  214. * huge_boot_pages list.
  215. */
  216. int alloc_bootmem_huge_page(struct hstate *hstate)
  217. {
  218. struct huge_bootmem_page *m;
  219. int idx = shift_to_mmu_psize(huge_page_shift(hstate));
  220. int nr_gpages = gpage_freearray[idx].nr_gpages;
  221. if (nr_gpages == 0)
  222. return 0;
  223. #ifdef CONFIG_HIGHMEM
  224. /*
  225. * If gpages can be in highmem we can't use the trick of storing the
  226. * data structure in the page; allocate space for this
  227. */
  228. m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
  229. m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
  230. #else
  231. m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
  232. #endif
  233. list_add(&m->list, &huge_boot_pages);
  234. gpage_freearray[idx].nr_gpages = nr_gpages;
  235. gpage_freearray[idx].gpage_list[nr_gpages] = 0;
  236. m->hstate = hstate;
  237. return 1;
  238. }
  239. /*
  240. * Scan the command line hugepagesz= options for gigantic pages; store those in
  241. * a list that we use to allocate the memory once all options are parsed.
  242. */
  243. unsigned long gpage_npages[MMU_PAGE_COUNT];
  244. static int __init do_gpage_early_setup(char *param, char *val,
  245. const char *unused, void *arg)
  246. {
  247. static phys_addr_t size;
  248. unsigned long npages;
  249. /*
  250. * The hugepagesz and hugepages cmdline options are interleaved. We
  251. * use the size variable to keep track of whether or not this was done
  252. * properly and skip over instances where it is incorrect. Other
  253. * command-line parsing code will issue warnings, so we don't need to.
  254. *
  255. */
  256. if ((strcmp(param, "default_hugepagesz") == 0) ||
  257. (strcmp(param, "hugepagesz") == 0)) {
  258. size = memparse(val, NULL);
  259. } else if (strcmp(param, "hugepages") == 0) {
  260. if (size != 0) {
  261. if (sscanf(val, "%lu", &npages) <= 0)
  262. npages = 0;
  263. if (npages > MAX_NUMBER_GPAGES) {
  264. pr_warn("MMU: %lu pages requested for page "
  265. "size %llu KB, limiting to "
  266. __stringify(MAX_NUMBER_GPAGES) "\n",
  267. npages, size / 1024);
  268. npages = MAX_NUMBER_GPAGES;
  269. }
  270. gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
  271. size = 0;
  272. }
  273. }
  274. return 0;
  275. }
  276. /*
  277. * This function allocates physical space for pages that are larger than the
  278. * buddy allocator can handle. We want to allocate these in highmem because
  279. * the amount of lowmem is limited. This means that this function MUST be
  280. * called before lowmem_end_addr is set up in MMU_init() in order for the lmb
  281. * allocate to grab highmem.
  282. */
  283. void __init reserve_hugetlb_gpages(void)
  284. {
  285. static __initdata char cmdline[COMMAND_LINE_SIZE];
  286. phys_addr_t size, base;
  287. int i;
  288. strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
  289. parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
  290. NULL, &do_gpage_early_setup);
  291. /*
  292. * Walk gpage list in reverse, allocating larger page sizes first.
  293. * Skip over unsupported sizes, or sizes that have 0 gpages allocated.
  294. * When we reach the point in the list where pages are no longer
  295. * considered gpages, we're done.
  296. */
  297. for (i = MMU_PAGE_COUNT-1; i >= 0; i--) {
  298. if (mmu_psize_defs[i].shift == 0 || gpage_npages[i] == 0)
  299. continue;
  300. else if (mmu_psize_to_shift(i) < (MAX_ORDER + PAGE_SHIFT))
  301. break;
  302. size = (phys_addr_t)(1ULL << mmu_psize_to_shift(i));
  303. base = memblock_alloc_base(size * gpage_npages[i], size,
  304. MEMBLOCK_ALLOC_ANYWHERE);
  305. add_gpage(base, size, gpage_npages[i]);
  306. }
  307. }
  308. #else /* !PPC_FSL_BOOK3E */
  309. /* Build list of addresses of gigantic pages. This function is used in early
  310. * boot before the buddy allocator is setup.
  311. */
  312. void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
  313. {
  314. if (!addr)
  315. return;
  316. while (number_of_pages > 0) {
  317. gpage_freearray[nr_gpages] = addr;
  318. nr_gpages++;
  319. number_of_pages--;
  320. addr += page_size;
  321. }
  322. }
  323. /* Moves the gigantic page addresses from the temporary list to the
  324. * huge_boot_pages list.
  325. */
  326. int alloc_bootmem_huge_page(struct hstate *hstate)
  327. {
  328. struct huge_bootmem_page *m;
  329. if (nr_gpages == 0)
  330. return 0;
  331. m = phys_to_virt(gpage_freearray[--nr_gpages]);
  332. gpage_freearray[nr_gpages] = 0;
  333. list_add(&m->list, &huge_boot_pages);
  334. m->hstate = hstate;
  335. return 1;
  336. }
  337. #endif
  338. #ifdef CONFIG_PPC_FSL_BOOK3E
  339. #define HUGEPD_FREELIST_SIZE \
  340. ((PAGE_SIZE - sizeof(struct hugepd_freelist)) / sizeof(pte_t))
  341. struct hugepd_freelist {
  342. struct rcu_head rcu;
  343. unsigned int index;
  344. void *ptes[0];
  345. };
  346. static DEFINE_PER_CPU(struct hugepd_freelist *, hugepd_freelist_cur);
  347. static void hugepd_free_rcu_callback(struct rcu_head *head)
  348. {
  349. struct hugepd_freelist *batch =
  350. container_of(head, struct hugepd_freelist, rcu);
  351. unsigned int i;
  352. for (i = 0; i < batch->index; i++)
  353. kmem_cache_free(hugepte_cache, batch->ptes[i]);
  354. free_page((unsigned long)batch);
  355. }
  356. static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
  357. {
  358. struct hugepd_freelist **batchp;
  359. batchp = &get_cpu_var(hugepd_freelist_cur);
  360. if (atomic_read(&tlb->mm->mm_users) < 2 ||
  361. cpumask_equal(mm_cpumask(tlb->mm),
  362. cpumask_of(smp_processor_id()))) {
  363. kmem_cache_free(hugepte_cache, hugepte);
  364. put_cpu_var(hugepd_freelist_cur);
  365. return;
  366. }
  367. if (*batchp == NULL) {
  368. *batchp = (struct hugepd_freelist *)__get_free_page(GFP_ATOMIC);
  369. (*batchp)->index = 0;
  370. }
  371. (*batchp)->ptes[(*batchp)->index++] = hugepte;
  372. if ((*batchp)->index == HUGEPD_FREELIST_SIZE) {
  373. call_rcu_sched(&(*batchp)->rcu, hugepd_free_rcu_callback);
  374. *batchp = NULL;
  375. }
  376. put_cpu_var(hugepd_freelist_cur);
  377. }
  378. #endif
  379. static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshift,
  380. unsigned long start, unsigned long end,
  381. unsigned long floor, unsigned long ceiling)
  382. {
  383. pte_t *hugepte = hugepd_page(*hpdp);
  384. int i;
  385. unsigned long pdmask = ~((1UL << pdshift) - 1);
  386. unsigned int num_hugepd = 1;
  387. #ifdef CONFIG_PPC_FSL_BOOK3E
  388. /* Note: On fsl the hpdp may be the first of several */
  389. num_hugepd = (1 << (hugepd_shift(*hpdp) - pdshift));
  390. #else
  391. unsigned int shift = hugepd_shift(*hpdp);
  392. #endif
  393. start &= pdmask;
  394. if (start < floor)
  395. return;
  396. if (ceiling) {
  397. ceiling &= pdmask;
  398. if (! ceiling)
  399. return;
  400. }
  401. if (end - 1 > ceiling - 1)
  402. return;
  403. for (i = 0; i < num_hugepd; i++, hpdp++)
  404. hpdp->pd = 0;
  405. #ifdef CONFIG_PPC_FSL_BOOK3E
  406. hugepd_free(tlb, hugepte);
  407. #else
  408. pgtable_free_tlb(tlb, hugepte, pdshift - shift);
  409. #endif
  410. }
  411. static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
  412. unsigned long addr, unsigned long end,
  413. unsigned long floor, unsigned long ceiling)
  414. {
  415. pmd_t *pmd;
  416. unsigned long next;
  417. unsigned long start;
  418. start = addr;
  419. do {
  420. pmd = pmd_offset(pud, addr);
  421. next = pmd_addr_end(addr, end);
  422. if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
  423. /*
  424. * if it is not hugepd pointer, we should already find
  425. * it cleared.
  426. */
  427. WARN_ON(!pmd_none_or_clear_bad(pmd));
  428. continue;
  429. }
  430. #ifdef CONFIG_PPC_FSL_BOOK3E
  431. /*
  432. * Increment next by the size of the huge mapping since
  433. * there may be more than one entry at this level for a
  434. * single hugepage, but all of them point to
  435. * the same kmem cache that holds the hugepte.
  436. */
  437. next = addr + (1 << hugepd_shift(*(hugepd_t *)pmd));
  438. #endif
  439. free_hugepd_range(tlb, (hugepd_t *)pmd, PMD_SHIFT,
  440. addr, next, floor, ceiling);
  441. } while (addr = next, addr != end);
  442. start &= PUD_MASK;
  443. if (start < floor)
  444. return;
  445. if (ceiling) {
  446. ceiling &= PUD_MASK;
  447. if (!ceiling)
  448. return;
  449. }
  450. if (end - 1 > ceiling - 1)
  451. return;
  452. pmd = pmd_offset(pud, start);
  453. pud_clear(pud);
  454. pmd_free_tlb(tlb, pmd, start);
  455. mm_dec_nr_pmds(tlb->mm);
  456. }
  457. static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
  458. unsigned long addr, unsigned long end,
  459. unsigned long floor, unsigned long ceiling)
  460. {
  461. pud_t *pud;
  462. unsigned long next;
  463. unsigned long start;
  464. start = addr;
  465. do {
  466. pud = pud_offset(pgd, addr);
  467. next = pud_addr_end(addr, end);
  468. if (!is_hugepd(__hugepd(pud_val(*pud)))) {
  469. if (pud_none_or_clear_bad(pud))
  470. continue;
  471. hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
  472. ceiling);
  473. } else {
  474. #ifdef CONFIG_PPC_FSL_BOOK3E
  475. /*
  476. * Increment next by the size of the huge mapping since
  477. * there may be more than one entry at this level for a
  478. * single hugepage, but all of them point to
  479. * the same kmem cache that holds the hugepte.
  480. */
  481. next = addr + (1 << hugepd_shift(*(hugepd_t *)pud));
  482. #endif
  483. free_hugepd_range(tlb, (hugepd_t *)pud, PUD_SHIFT,
  484. addr, next, floor, ceiling);
  485. }
  486. } while (addr = next, addr != end);
  487. start &= PGDIR_MASK;
  488. if (start < floor)
  489. return;
  490. if (ceiling) {
  491. ceiling &= PGDIR_MASK;
  492. if (!ceiling)
  493. return;
  494. }
  495. if (end - 1 > ceiling - 1)
  496. return;
  497. pud = pud_offset(pgd, start);
  498. pgd_clear(pgd);
  499. pud_free_tlb(tlb, pud, start);
  500. }
  501. /*
  502. * This function frees user-level page tables of a process.
  503. */
  504. void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  505. unsigned long addr, unsigned long end,
  506. unsigned long floor, unsigned long ceiling)
  507. {
  508. pgd_t *pgd;
  509. unsigned long next;
  510. /*
  511. * Because there are a number of different possible pagetable
  512. * layouts for hugepage ranges, we limit knowledge of how
  513. * things should be laid out to the allocation path
  514. * (huge_pte_alloc(), above). Everything else works out the
  515. * structure as it goes from information in the hugepd
  516. * pointers. That means that we can't here use the
  517. * optimization used in the normal page free_pgd_range(), of
  518. * checking whether we're actually covering a large enough
  519. * range to have to do anything at the top level of the walk
  520. * instead of at the bottom.
  521. *
  522. * To make sense of this, you should probably go read the big
  523. * block comment at the top of the normal free_pgd_range(),
  524. * too.
  525. */
  526. do {
  527. next = pgd_addr_end(addr, end);
  528. pgd = pgd_offset(tlb->mm, addr);
  529. if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
  530. if (pgd_none_or_clear_bad(pgd))
  531. continue;
  532. hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
  533. } else {
  534. #ifdef CONFIG_PPC_FSL_BOOK3E
  535. /*
  536. * Increment next by the size of the huge mapping since
  537. * there may be more than one entry at the pgd level
  538. * for a single hugepage, but all of them point to the
  539. * same kmem cache that holds the hugepte.
  540. */
  541. next = addr + (1 << hugepd_shift(*(hugepd_t *)pgd));
  542. #endif
  543. free_hugepd_range(tlb, (hugepd_t *)pgd, PGDIR_SHIFT,
  544. addr, next, floor, ceiling);
  545. }
  546. } while (addr = next, addr != end);
  547. }
  548. /*
  549. * We are holding mmap_sem, so a parallel huge page collapse cannot run.
  550. * To prevent hugepage split, disable irq.
  551. */
  552. struct page *
  553. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  554. {
  555. bool is_thp;
  556. pte_t *ptep, pte;
  557. unsigned shift;
  558. unsigned long mask, flags;
  559. struct page *page = ERR_PTR(-EINVAL);
  560. local_irq_save(flags);
  561. ptep = find_linux_pte_or_hugepte(mm->pgd, address, &is_thp, &shift);
  562. if (!ptep)
  563. goto no_page;
  564. pte = READ_ONCE(*ptep);
  565. /*
  566. * Verify it is a huge page else bail.
  567. * Transparent hugepages are handled by generic code. We can skip them
  568. * here.
  569. */
  570. if (!shift || is_thp)
  571. goto no_page;
  572. if (!pte_present(pte)) {
  573. page = NULL;
  574. goto no_page;
  575. }
  576. mask = (1UL << shift) - 1;
  577. page = pte_page(pte);
  578. if (page)
  579. page += (address & mask) / PAGE_SIZE;
  580. no_page:
  581. local_irq_restore(flags);
  582. return page;
  583. }
  584. struct page *
  585. follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  586. pmd_t *pmd, int write)
  587. {
  588. BUG();
  589. return NULL;
  590. }
  591. struct page *
  592. follow_huge_pud(struct mm_struct *mm, unsigned long address,
  593. pud_t *pud, int write)
  594. {
  595. BUG();
  596. return NULL;
  597. }
  598. static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
  599. unsigned long sz)
  600. {
  601. unsigned long __boundary = (addr + sz) & ~(sz-1);
  602. return (__boundary - 1 < end - 1) ? __boundary : end;
  603. }
  604. int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
  605. unsigned long end, int write, struct page **pages, int *nr)
  606. {
  607. pte_t *ptep;
  608. unsigned long sz = 1UL << hugepd_shift(hugepd);
  609. unsigned long next;
  610. ptep = hugepte_offset(hugepd, addr, pdshift);
  611. do {
  612. next = hugepte_addr_end(addr, end, sz);
  613. if (!gup_hugepte(ptep, sz, addr, end, write, pages, nr))
  614. return 0;
  615. } while (ptep++, addr = next, addr != end);
  616. return 1;
  617. }
  618. #ifdef CONFIG_PPC_MM_SLICES
  619. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  620. unsigned long len, unsigned long pgoff,
  621. unsigned long flags)
  622. {
  623. struct hstate *hstate = hstate_file(file);
  624. int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
  625. if (radix_enabled())
  626. return radix__hugetlb_get_unmapped_area(file, addr, len,
  627. pgoff, flags);
  628. return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
  629. }
  630. #endif
  631. unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
  632. {
  633. #ifdef CONFIG_PPC_MM_SLICES
  634. unsigned int psize = get_slice_psize(vma->vm_mm, vma->vm_start);
  635. /* With radix we don't use slice, so derive it from vma*/
  636. if (!radix_enabled())
  637. return 1UL << mmu_psize_to_shift(psize);
  638. #endif
  639. if (!is_vm_hugetlb_page(vma))
  640. return PAGE_SIZE;
  641. return huge_page_size(hstate_vma(vma));
  642. }
  643. static inline bool is_power_of_4(unsigned long x)
  644. {
  645. if (is_power_of_2(x))
  646. return (__ilog2(x) % 2) ? false : true;
  647. return false;
  648. }
  649. static int __init add_huge_page_size(unsigned long long size)
  650. {
  651. int shift = __ffs(size);
  652. int mmu_psize;
  653. /* Check that it is a page size supported by the hardware and
  654. * that it fits within pagetable and slice limits. */
  655. #ifdef CONFIG_PPC_FSL_BOOK3E
  656. if ((size < PAGE_SIZE) || !is_power_of_4(size))
  657. return -EINVAL;
  658. #else
  659. if (!is_power_of_2(size)
  660. || (shift > SLICE_HIGH_SHIFT) || (shift <= PAGE_SHIFT))
  661. return -EINVAL;
  662. #endif
  663. if ((mmu_psize = shift_to_mmu_psize(shift)) < 0)
  664. return -EINVAL;
  665. BUG_ON(mmu_psize_defs[mmu_psize].shift != shift);
  666. /* Return if huge page size has already been setup */
  667. if (size_to_hstate(size))
  668. return 0;
  669. hugetlb_add_hstate(shift - PAGE_SHIFT);
  670. return 0;
  671. }
  672. static int __init hugepage_setup_sz(char *str)
  673. {
  674. unsigned long long size;
  675. size = memparse(str, &str);
  676. if (add_huge_page_size(size) != 0) {
  677. hugetlb_bad_size();
  678. pr_err("Invalid huge page size specified(%llu)\n", size);
  679. }
  680. return 1;
  681. }
  682. __setup("hugepagesz=", hugepage_setup_sz);
  683. #ifdef CONFIG_PPC_FSL_BOOK3E
  684. struct kmem_cache *hugepte_cache;
  685. static int __init hugetlbpage_init(void)
  686. {
  687. int psize;
  688. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  689. unsigned shift;
  690. if (!mmu_psize_defs[psize].shift)
  691. continue;
  692. shift = mmu_psize_to_shift(psize);
  693. /* Don't treat normal page sizes as huge... */
  694. if (shift != PAGE_SHIFT)
  695. if (add_huge_page_size(1ULL << shift) < 0)
  696. continue;
  697. }
  698. /*
  699. * Create a kmem cache for hugeptes. The bottom bits in the pte have
  700. * size information encoded in them, so align them to allow this
  701. */
  702. hugepte_cache = kmem_cache_create("hugepte-cache", sizeof(pte_t),
  703. HUGEPD_SHIFT_MASK + 1, 0, NULL);
  704. if (hugepte_cache == NULL)
  705. panic("%s: Unable to create kmem cache for hugeptes\n",
  706. __func__);
  707. /* Default hpage size = 4M */
  708. if (mmu_psize_defs[MMU_PAGE_4M].shift)
  709. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_4M].shift;
  710. else
  711. panic("%s: Unable to set default huge page size\n", __func__);
  712. return 0;
  713. }
  714. #else
  715. static int __init hugetlbpage_init(void)
  716. {
  717. int psize;
  718. if (!radix_enabled() && !mmu_has_feature(MMU_FTR_16M_PAGE))
  719. return -ENODEV;
  720. for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
  721. unsigned shift;
  722. unsigned pdshift;
  723. if (!mmu_psize_defs[psize].shift)
  724. continue;
  725. shift = mmu_psize_to_shift(psize);
  726. if (add_huge_page_size(1ULL << shift) < 0)
  727. continue;
  728. if (shift < PMD_SHIFT)
  729. pdshift = PMD_SHIFT;
  730. else if (shift < PUD_SHIFT)
  731. pdshift = PUD_SHIFT;
  732. else
  733. pdshift = PGDIR_SHIFT;
  734. /*
  735. * if we have pdshift and shift value same, we don't
  736. * use pgt cache for hugepd.
  737. */
  738. if (pdshift != shift) {
  739. pgtable_cache_add(pdshift - shift, NULL);
  740. if (!PGT_CACHE(pdshift - shift))
  741. panic("hugetlbpage_init(): could not create "
  742. "pgtable cache for %d bit pagesize\n", shift);
  743. }
  744. }
  745. /* Set default large page size. Currently, we pick 16M or 1M
  746. * depending on what is available
  747. */
  748. if (mmu_psize_defs[MMU_PAGE_16M].shift)
  749. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_16M].shift;
  750. else if (mmu_psize_defs[MMU_PAGE_1M].shift)
  751. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_1M].shift;
  752. else if (mmu_psize_defs[MMU_PAGE_2M].shift)
  753. HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
  754. return 0;
  755. }
  756. #endif
  757. arch_initcall(hugetlbpage_init);
  758. void flush_dcache_icache_hugepage(struct page *page)
  759. {
  760. int i;
  761. void *start;
  762. BUG_ON(!PageCompound(page));
  763. for (i = 0; i < (1UL << compound_order(page)); i++) {
  764. if (!PageHighMem(page)) {
  765. __flush_dcache_icache(page_address(page+i));
  766. } else {
  767. start = kmap_atomic(page+i);
  768. __flush_dcache_icache(start);
  769. kunmap_atomic(start);
  770. }
  771. }
  772. }
  773. #endif /* CONFIG_HUGETLB_PAGE */
  774. /*
  775. * We have 4 cases for pgds and pmds:
  776. * (1) invalid (all zeroes)
  777. * (2) pointer to next table, as normal; bottom 6 bits == 0
  778. * (3) leaf pte for huge page _PAGE_PTE set
  779. * (4) hugepd pointer, _PAGE_PTE = 0 and bits [2..6] indicate size of table
  780. *
  781. * So long as we atomically load page table pointers we are safe against teardown,
  782. * we can follow the address down to the the page and take a ref on it.
  783. * This function need to be called with interrupts disabled. We use this variant
  784. * when we have MSR[EE] = 0 but the paca->soft_enabled = 1
  785. */
  786. pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea,
  787. bool *is_thp, unsigned *shift)
  788. {
  789. pgd_t pgd, *pgdp;
  790. pud_t pud, *pudp;
  791. pmd_t pmd, *pmdp;
  792. pte_t *ret_pte;
  793. hugepd_t *hpdp = NULL;
  794. unsigned pdshift = PGDIR_SHIFT;
  795. if (shift)
  796. *shift = 0;
  797. if (is_thp)
  798. *is_thp = false;
  799. pgdp = pgdir + pgd_index(ea);
  800. pgd = READ_ONCE(*pgdp);
  801. /*
  802. * Always operate on the local stack value. This make sure the
  803. * value don't get updated by a parallel THP split/collapse,
  804. * page fault or a page unmap. The return pte_t * is still not
  805. * stable. So should be checked there for above conditions.
  806. */
  807. if (pgd_none(pgd))
  808. return NULL;
  809. else if (pgd_huge(pgd)) {
  810. ret_pte = (pte_t *) pgdp;
  811. goto out;
  812. } else if (is_hugepd(__hugepd(pgd_val(pgd))))
  813. hpdp = (hugepd_t *)&pgd;
  814. else {
  815. /*
  816. * Even if we end up with an unmap, the pgtable will not
  817. * be freed, because we do an rcu free and here we are
  818. * irq disabled
  819. */
  820. pdshift = PUD_SHIFT;
  821. pudp = pud_offset(&pgd, ea);
  822. pud = READ_ONCE(*pudp);
  823. if (pud_none(pud))
  824. return NULL;
  825. else if (pud_huge(pud)) {
  826. ret_pte = (pte_t *) pudp;
  827. goto out;
  828. } else if (is_hugepd(__hugepd(pud_val(pud))))
  829. hpdp = (hugepd_t *)&pud;
  830. else {
  831. pdshift = PMD_SHIFT;
  832. pmdp = pmd_offset(&pud, ea);
  833. pmd = READ_ONCE(*pmdp);
  834. /*
  835. * A hugepage collapse is captured by pmd_none, because
  836. * it mark the pmd none and do a hpte invalidate.
  837. */
  838. if (pmd_none(pmd))
  839. return NULL;
  840. if (pmd_trans_huge(pmd)) {
  841. if (is_thp)
  842. *is_thp = true;
  843. ret_pte = (pte_t *) pmdp;
  844. goto out;
  845. }
  846. if (pmd_huge(pmd)) {
  847. ret_pte = (pte_t *) pmdp;
  848. goto out;
  849. } else if (is_hugepd(__hugepd(pmd_val(pmd))))
  850. hpdp = (hugepd_t *)&pmd;
  851. else
  852. return pte_offset_kernel(&pmd, ea);
  853. }
  854. }
  855. if (!hpdp)
  856. return NULL;
  857. ret_pte = hugepte_offset(*hpdp, ea, pdshift);
  858. pdshift = hugepd_shift(*hpdp);
  859. out:
  860. if (shift)
  861. *shift = pdshift;
  862. return ret_pte;
  863. }
  864. EXPORT_SYMBOL_GPL(__find_linux_pte_or_hugepte);
  865. int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
  866. unsigned long end, int write, struct page **pages, int *nr)
  867. {
  868. unsigned long mask;
  869. unsigned long pte_end;
  870. struct page *head, *page;
  871. pte_t pte;
  872. int refs;
  873. pte_end = (addr + sz) & ~(sz-1);
  874. if (pte_end < end)
  875. end = pte_end;
  876. pte = READ_ONCE(*ptep);
  877. mask = _PAGE_PRESENT | _PAGE_READ;
  878. if (write)
  879. mask |= _PAGE_WRITE;
  880. if ((pte_val(pte) & mask) != mask)
  881. return 0;
  882. /* hugepages are never "special" */
  883. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  884. refs = 0;
  885. head = pte_page(pte);
  886. page = head + ((addr & (sz-1)) >> PAGE_SHIFT);
  887. do {
  888. VM_BUG_ON(compound_head(page) != head);
  889. pages[*nr] = page;
  890. (*nr)++;
  891. page++;
  892. refs++;
  893. } while (addr += PAGE_SIZE, addr != end);
  894. if (!page_cache_add_speculative(head, refs)) {
  895. *nr -= refs;
  896. return 0;
  897. }
  898. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  899. /* Could be optimized better */
  900. *nr -= refs;
  901. while (refs--)
  902. put_page(head);
  903. return 0;
  904. }
  905. return 1;
  906. }