hugetlbpage.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * IA-32 Huge TLB Page Support for Kernel.
  3. *
  4. * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  5. */
  6. #include <linux/init.h>
  7. #include <linux/fs.h>
  8. #include <linux/mm.h>
  9. #include <linux/sched/mm.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/err.h>
  13. #include <linux/sysctl.h>
  14. #include <linux/compat.h>
  15. #include <asm/mman.h>
  16. #include <asm/tlb.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/pgalloc.h>
  19. #include <asm/elf.h>
  20. #if 0 /* This is just for testing */
  21. struct page *
  22. follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
  23. {
  24. unsigned long start = address;
  25. int length = 1;
  26. int nr;
  27. struct page *page;
  28. struct vm_area_struct *vma;
  29. vma = find_vma(mm, addr);
  30. if (!vma || !is_vm_hugetlb_page(vma))
  31. return ERR_PTR(-EINVAL);
  32. pte = huge_pte_offset(mm, address);
  33. /* hugetlb should be locked, and hence, prefaulted */
  34. WARN_ON(!pte || pte_none(*pte));
  35. page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
  36. WARN_ON(!PageHead(page));
  37. return page;
  38. }
  39. int pmd_huge(pmd_t pmd)
  40. {
  41. return 0;
  42. }
  43. int pud_huge(pud_t pud)
  44. {
  45. return 0;
  46. }
  47. #else
  48. /*
  49. * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
  50. * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
  51. * Otherwise, returns 0.
  52. */
  53. int pmd_huge(pmd_t pmd)
  54. {
  55. return !pmd_none(pmd) &&
  56. (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
  57. }
  58. int pud_huge(pud_t pud)
  59. {
  60. return !!(pud_val(pud) & _PAGE_PSE);
  61. }
  62. #endif
  63. #ifdef CONFIG_HUGETLB_PAGE
  64. static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
  65. unsigned long addr, unsigned long len,
  66. unsigned long pgoff, unsigned long flags)
  67. {
  68. struct hstate *h = hstate_file(file);
  69. struct vm_unmapped_area_info info;
  70. info.flags = 0;
  71. info.length = len;
  72. info.low_limit = get_mmap_base(1);
  73. info.high_limit = in_compat_syscall() ?
  74. tasksize_32bit() : tasksize_64bit();
  75. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  76. info.align_offset = 0;
  77. return vm_unmapped_area(&info);
  78. }
  79. static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
  80. unsigned long addr0, unsigned long len,
  81. unsigned long pgoff, unsigned long flags)
  82. {
  83. struct hstate *h = hstate_file(file);
  84. struct vm_unmapped_area_info info;
  85. unsigned long addr;
  86. info.flags = VM_UNMAPPED_AREA_TOPDOWN;
  87. info.length = len;
  88. info.low_limit = PAGE_SIZE;
  89. info.high_limit = get_mmap_base(0);
  90. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  91. info.align_offset = 0;
  92. addr = vm_unmapped_area(&info);
  93. /*
  94. * A failed mmap() very likely causes application failure,
  95. * so fall back to the bottom-up function here. This scenario
  96. * can happen with large stack limits and large mmap()
  97. * allocations.
  98. */
  99. if (addr & ~PAGE_MASK) {
  100. VM_BUG_ON(addr != -ENOMEM);
  101. info.flags = 0;
  102. info.low_limit = TASK_UNMAPPED_BASE;
  103. info.high_limit = TASK_SIZE;
  104. addr = vm_unmapped_area(&info);
  105. }
  106. return addr;
  107. }
  108. unsigned long
  109. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  110. unsigned long len, unsigned long pgoff, unsigned long flags)
  111. {
  112. struct hstate *h = hstate_file(file);
  113. struct mm_struct *mm = current->mm;
  114. struct vm_area_struct *vma;
  115. if (len & ~huge_page_mask(h))
  116. return -EINVAL;
  117. if (len > TASK_SIZE)
  118. return -ENOMEM;
  119. if (flags & MAP_FIXED) {
  120. if (prepare_hugepage_range(file, addr, len))
  121. return -EINVAL;
  122. return addr;
  123. }
  124. if (addr) {
  125. addr = ALIGN(addr, huge_page_size(h));
  126. vma = find_vma(mm, addr);
  127. if (TASK_SIZE - len >= addr &&
  128. (!vma || addr + len <= vma->vm_start))
  129. return addr;
  130. }
  131. if (mm->get_unmapped_area == arch_get_unmapped_area)
  132. return hugetlb_get_unmapped_area_bottomup(file, addr, len,
  133. pgoff, flags);
  134. else
  135. return hugetlb_get_unmapped_area_topdown(file, addr, len,
  136. pgoff, flags);
  137. }
  138. #endif /* CONFIG_HUGETLB_PAGE */
  139. #ifdef CONFIG_X86_64
  140. static __init int setup_hugepagesz(char *opt)
  141. {
  142. unsigned long ps = memparse(opt, &opt);
  143. if (ps == PMD_SIZE) {
  144. hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
  145. } else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
  146. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  147. } else {
  148. hugetlb_bad_size();
  149. printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
  150. ps >> 20);
  151. return 0;
  152. }
  153. return 1;
  154. }
  155. __setup("hugepagesz=", setup_hugepagesz);
  156. #if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
  157. static __init int gigantic_pages_init(void)
  158. {
  159. /* With compaction or CMA we can allocate gigantic pages at runtime */
  160. if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
  161. hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
  162. return 0;
  163. }
  164. arch_initcall(gigantic_pages_init);
  165. #endif
  166. #endif