page_vma_mapped.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #include <linux/mm.h>
  2. #include <linux/rmap.h>
  3. #include <linux/hugetlb.h>
  4. #include <linux/swap.h>
  5. #include <linux/swapops.h>
  6. #include "internal.h"
  7. static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
  8. {
  9. pmd_t pmde;
  10. /*
  11. * Make sure we don't re-load pmd between present and !trans_huge check.
  12. * We need a consistent view.
  13. */
  14. pmde = READ_ONCE(*pvmw->pmd);
  15. return pmd_present(pmde) && !pmd_trans_huge(pmde);
  16. }
  17. static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  18. {
  19. page_vma_mapped_walk_done(pvmw);
  20. return false;
  21. }
  22. static bool map_pte(struct page_vma_mapped_walk *pvmw)
  23. {
  24. pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
  25. if (!(pvmw->flags & PVMW_SYNC)) {
  26. if (pvmw->flags & PVMW_MIGRATION) {
  27. if (!is_swap_pte(*pvmw->pte))
  28. return false;
  29. } else {
  30. if (!pte_present(*pvmw->pte))
  31. return false;
  32. }
  33. }
  34. pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
  35. spin_lock(pvmw->ptl);
  36. return true;
  37. }
  38. static bool check_pte(struct page_vma_mapped_walk *pvmw)
  39. {
  40. if (pvmw->flags & PVMW_MIGRATION) {
  41. #ifdef CONFIG_MIGRATION
  42. swp_entry_t entry;
  43. if (!is_swap_pte(*pvmw->pte))
  44. return false;
  45. entry = pte_to_swp_entry(*pvmw->pte);
  46. if (!is_migration_entry(entry))
  47. return false;
  48. if (migration_entry_to_page(entry) - pvmw->page >=
  49. hpage_nr_pages(pvmw->page)) {
  50. return false;
  51. }
  52. if (migration_entry_to_page(entry) < pvmw->page)
  53. return false;
  54. #else
  55. WARN_ON_ONCE(1);
  56. #endif
  57. } else {
  58. if (!pte_present(*pvmw->pte))
  59. return false;
  60. /* THP can be referenced by any subpage */
  61. if (pte_page(*pvmw->pte) - pvmw->page >=
  62. hpage_nr_pages(pvmw->page)) {
  63. return false;
  64. }
  65. if (pte_page(*pvmw->pte) < pvmw->page)
  66. return false;
  67. }
  68. return true;
  69. }
  70. /**
  71. * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
  72. * @pvmw->address
  73. * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
  74. * must be set. pmd, pte and ptl must be NULL.
  75. *
  76. * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
  77. * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
  78. * adjusted if needed (for PTE-mapped THPs).
  79. *
  80. * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
  81. * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
  82. * a loop to find all PTEs that map the THP.
  83. *
  84. * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
  85. * regardless of which page table level the page is mapped at. @pvmw->pmd is
  86. * NULL.
  87. *
  88. * Retruns false if there are no more page table entries for the page in
  89. * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
  90. *
  91. * If you need to stop the walk before page_vma_mapped_walk() returned false,
  92. * use page_vma_mapped_walk_done(). It will do the housekeeping.
  93. */
  94. bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  95. {
  96. struct mm_struct *mm = pvmw->vma->vm_mm;
  97. struct page *page = pvmw->page;
  98. pgd_t *pgd;
  99. p4d_t *p4d;
  100. pud_t *pud;
  101. /* The only possible pmd mapping has been handled on last iteration */
  102. if (pvmw->pmd && !pvmw->pte)
  103. return not_found(pvmw);
  104. if (pvmw->pte)
  105. goto next_pte;
  106. if (unlikely(PageHuge(pvmw->page))) {
  107. /* when pud is not present, pte will be NULL */
  108. pvmw->pte = huge_pte_offset(mm, pvmw->address,
  109. PAGE_SIZE << compound_order(page));
  110. if (!pvmw->pte)
  111. return false;
  112. pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
  113. spin_lock(pvmw->ptl);
  114. if (!check_pte(pvmw))
  115. return not_found(pvmw);
  116. return true;
  117. }
  118. restart:
  119. pgd = pgd_offset(mm, pvmw->address);
  120. if (!pgd_present(*pgd))
  121. return false;
  122. p4d = p4d_offset(pgd, pvmw->address);
  123. if (!p4d_present(*p4d))
  124. return false;
  125. pud = pud_offset(p4d, pvmw->address);
  126. if (!pud_present(*pud))
  127. return false;
  128. pvmw->pmd = pmd_offset(pud, pvmw->address);
  129. if (pmd_trans_huge(*pvmw->pmd)) {
  130. pvmw->ptl = pmd_lock(mm, pvmw->pmd);
  131. if (!pmd_present(*pvmw->pmd))
  132. return not_found(pvmw);
  133. if (likely(pmd_trans_huge(*pvmw->pmd))) {
  134. if (pvmw->flags & PVMW_MIGRATION)
  135. return not_found(pvmw);
  136. if (pmd_page(*pvmw->pmd) != page)
  137. return not_found(pvmw);
  138. return true;
  139. } else {
  140. /* THP pmd was split under us: handle on pte level */
  141. spin_unlock(pvmw->ptl);
  142. pvmw->ptl = NULL;
  143. }
  144. } else {
  145. if (!check_pmd(pvmw))
  146. return false;
  147. }
  148. if (!map_pte(pvmw))
  149. goto next_pte;
  150. while (1) {
  151. if (check_pte(pvmw))
  152. return true;
  153. next_pte:
  154. /* Seek to next pte only makes sense for THP */
  155. if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
  156. return not_found(pvmw);
  157. do {
  158. pvmw->address += PAGE_SIZE;
  159. if (pvmw->address >= pvmw->vma->vm_end ||
  160. pvmw->address >=
  161. __vma_address(pvmw->page, pvmw->vma) +
  162. hpage_nr_pages(pvmw->page) * PAGE_SIZE)
  163. return not_found(pvmw);
  164. /* Did we cross page table boundary? */
  165. if (pvmw->address % PMD_SIZE == 0) {
  166. pte_unmap(pvmw->pte);
  167. if (pvmw->ptl) {
  168. spin_unlock(pvmw->ptl);
  169. pvmw->ptl = NULL;
  170. }
  171. goto restart;
  172. } else {
  173. pvmw->pte++;
  174. }
  175. } while (pte_none(*pvmw->pte));
  176. if (!pvmw->ptl) {
  177. pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
  178. spin_lock(pvmw->ptl);
  179. }
  180. }
  181. }
  182. /**
  183. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  184. * @page: the page to test
  185. * @vma: the VMA to test
  186. *
  187. * Returns 1 if the page is mapped into the page tables of the VMA, 0
  188. * if the page is not mapped into the page tables of this VMA. Only
  189. * valid for normal file or anonymous VMAs.
  190. */
  191. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  192. {
  193. struct page_vma_mapped_walk pvmw = {
  194. .page = page,
  195. .vma = vma,
  196. .flags = PVMW_SYNC,
  197. };
  198. unsigned long start, end;
  199. start = __vma_address(page, vma);
  200. end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
  201. if (unlikely(end < vma->vm_start || start >= vma->vm_end))
  202. return 0;
  203. pvmw.address = max(start, vma->vm_start);
  204. if (!page_vma_mapped_walk(&pvmw))
  205. return 0;
  206. page_vma_mapped_walk_done(&pvmw);
  207. return 1;
  208. }