hugetlbpage.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. /*
  2. * PARISC64 Huge TLB page support.
  3. *
  4. * This parisc implementation is heavily based on the SPARC and x86 code.
  5. *
  6. * Copyright (C) 2015 Helge Deller <deller@gmx.de>
  7. */
  8. #include <linux/fs.h>
  9. #include <linux/mm.h>
  10. #include <linux/sched/mm.h>
  11. #include <linux/hugetlb.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/sysctl.h>
  14. #include <asm/mman.h>
  15. #include <asm/pgalloc.h>
  16. #include <asm/tlb.h>
  17. #include <asm/tlbflush.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/mmu_context.h>
  20. unsigned long
  21. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  22. unsigned long len, unsigned long pgoff, unsigned long flags)
  23. {
  24. struct hstate *h = hstate_file(file);
  25. if (len & ~huge_page_mask(h))
  26. return -EINVAL;
  27. if (len > TASK_SIZE)
  28. return -ENOMEM;
  29. if (flags & MAP_FIXED)
  30. if (prepare_hugepage_range(file, addr, len))
  31. return -EINVAL;
  32. if (addr)
  33. addr = ALIGN(addr, huge_page_size(h));
  34. /* we need to make sure the colouring is OK */
  35. return arch_get_unmapped_area(file, addr, len, pgoff, flags);
  36. }
  37. pte_t *huge_pte_alloc(struct mm_struct *mm,
  38. unsigned long addr, unsigned long sz)
  39. {
  40. pgd_t *pgd;
  41. pud_t *pud;
  42. pmd_t *pmd;
  43. pte_t *pte = NULL;
  44. /* We must align the address, because our caller will run
  45. * set_huge_pte_at() on whatever we return, which writes out
  46. * all of the sub-ptes for the hugepage range. So we have
  47. * to give it the first such sub-pte.
  48. */
  49. addr &= HPAGE_MASK;
  50. pgd = pgd_offset(mm, addr);
  51. pud = pud_alloc(mm, pgd, addr);
  52. if (pud) {
  53. pmd = pmd_alloc(mm, pud, addr);
  54. if (pmd)
  55. pte = pte_alloc_map(mm, pmd, addr);
  56. }
  57. return pte;
  58. }
  59. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
  60. {
  61. pgd_t *pgd;
  62. pud_t *pud;
  63. pmd_t *pmd;
  64. pte_t *pte = NULL;
  65. addr &= HPAGE_MASK;
  66. pgd = pgd_offset(mm, addr);
  67. if (!pgd_none(*pgd)) {
  68. pud = pud_offset(pgd, addr);
  69. if (!pud_none(*pud)) {
  70. pmd = pmd_offset(pud, addr);
  71. if (!pmd_none(*pmd))
  72. pte = pte_offset_map(pmd, addr);
  73. }
  74. }
  75. return pte;
  76. }
  77. /* Purge data and instruction TLB entries. Must be called holding
  78. * the pa_tlb_lock. The TLB purge instructions are slow on SMP
  79. * machines since the purge must be broadcast to all CPUs.
  80. */
  81. static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
  82. {
  83. int i;
  84. /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
  85. * Linux standard huge pages (e.g. 2 MB) */
  86. BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
  87. addr &= HPAGE_MASK;
  88. addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
  89. for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
  90. purge_tlb_entries(mm, addr);
  91. addr += (1UL << REAL_HPAGE_SHIFT);
  92. }
  93. }
  94. /* __set_huge_pte_at() must be called holding the pa_tlb_lock. */
  95. static void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  96. pte_t *ptep, pte_t entry)
  97. {
  98. unsigned long addr_start;
  99. int i;
  100. addr &= HPAGE_MASK;
  101. addr_start = addr;
  102. for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
  103. set_pte(ptep, entry);
  104. ptep++;
  105. addr += PAGE_SIZE;
  106. pte_val(entry) += PAGE_SIZE;
  107. }
  108. purge_tlb_entries_huge(mm, addr_start);
  109. }
  110. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  111. pte_t *ptep, pte_t entry)
  112. {
  113. unsigned long flags;
  114. purge_tlb_start(flags);
  115. __set_huge_pte_at(mm, addr, ptep, entry);
  116. purge_tlb_end(flags);
  117. }
  118. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  119. pte_t *ptep)
  120. {
  121. unsigned long flags;
  122. pte_t entry;
  123. purge_tlb_start(flags);
  124. entry = *ptep;
  125. __set_huge_pte_at(mm, addr, ptep, __pte(0));
  126. purge_tlb_end(flags);
  127. return entry;
  128. }
  129. void huge_ptep_set_wrprotect(struct mm_struct *mm,
  130. unsigned long addr, pte_t *ptep)
  131. {
  132. unsigned long flags;
  133. pte_t old_pte;
  134. purge_tlb_start(flags);
  135. old_pte = *ptep;
  136. __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  137. purge_tlb_end(flags);
  138. }
  139. int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  140. unsigned long addr, pte_t *ptep,
  141. pte_t pte, int dirty)
  142. {
  143. unsigned long flags;
  144. int changed;
  145. purge_tlb_start(flags);
  146. changed = !pte_same(*ptep, pte);
  147. if (changed) {
  148. __set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
  149. }
  150. purge_tlb_end(flags);
  151. return changed;
  152. }
  153. int pmd_huge(pmd_t pmd)
  154. {
  155. return 0;
  156. }
  157. int pud_huge(pud_t pud)
  158. {
  159. return 0;
  160. }