page_vma_mapped.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/mm.h>
  3. #include <linux/rmap.h>
  4. #include <linux/hugetlb.h>
  5. #include <linux/swap.h>
  6. #include <linux/swapops.h>
  7. #include "internal.h"
  8. static inline bool not_found(struct page_vma_mapped_walk *pvmw)
  9. {
  10. page_vma_mapped_walk_done(pvmw);
  11. return false;
  12. }
  13. static bool map_pte(struct page_vma_mapped_walk *pvmw)
  14. {
  15. pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
  16. if (!(pvmw->flags & PVMW_SYNC)) {
  17. if (pvmw->flags & PVMW_MIGRATION) {
  18. if (!is_swap_pte(*pvmw->pte))
  19. return false;
  20. } else {
  21. if (!pte_present(*pvmw->pte))
  22. return false;
  23. }
  24. }
  25. pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
  26. spin_lock(pvmw->ptl);
  27. return true;
  28. }
  29. static inline bool pfn_in_hpage(struct page *hpage, unsigned long pfn)
  30. {
  31. unsigned long hpage_pfn = page_to_pfn(hpage);
  32. /* THP can be referenced by any subpage */
  33. return pfn >= hpage_pfn && pfn - hpage_pfn < hpage_nr_pages(hpage);
  34. }
  35. /**
  36. * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
  37. *
  38. * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
  39. * mapped. check_pte() has to validate this.
  40. *
  41. * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
  42. * page.
  43. *
  44. * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
  45. * entry that points to @pvmw->page or any subpage in case of THP.
  46. *
  47. * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
  48. * @pvmw->page or any subpage in case of THP.
  49. *
  50. * Otherwise, return false.
  51. *
  52. */
  53. static bool check_pte(struct page_vma_mapped_walk *pvmw)
  54. {
  55. unsigned long pfn;
  56. if (pvmw->flags & PVMW_MIGRATION) {
  57. swp_entry_t entry;
  58. if (!is_swap_pte(*pvmw->pte))
  59. return false;
  60. entry = pte_to_swp_entry(*pvmw->pte);
  61. if (!is_migration_entry(entry))
  62. return false;
  63. pfn = migration_entry_to_pfn(entry);
  64. } else if (is_swap_pte(*pvmw->pte)) {
  65. swp_entry_t entry;
  66. /* Handle un-addressable ZONE_DEVICE memory */
  67. entry = pte_to_swp_entry(*pvmw->pte);
  68. if (!is_device_private_entry(entry))
  69. return false;
  70. pfn = device_private_entry_to_pfn(entry);
  71. } else {
  72. if (!pte_present(*pvmw->pte))
  73. return false;
  74. pfn = pte_pfn(*pvmw->pte);
  75. }
  76. return pfn_in_hpage(pvmw->page, pfn);
  77. }
  78. /**
  79. * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
  80. * @pvmw->address
  81. * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
  82. * must be set. pmd, pte and ptl must be NULL.
  83. *
  84. * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
  85. * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
  86. * adjusted if needed (for PTE-mapped THPs).
  87. *
  88. * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
  89. * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
  90. * a loop to find all PTEs that map the THP.
  91. *
  92. * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
  93. * regardless of which page table level the page is mapped at. @pvmw->pmd is
  94. * NULL.
  95. *
  96. * Retruns false if there are no more page table entries for the page in
  97. * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
  98. *
  99. * If you need to stop the walk before page_vma_mapped_walk() returned false,
  100. * use page_vma_mapped_walk_done(). It will do the housekeeping.
  101. */
  102. bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
  103. {
  104. struct mm_struct *mm = pvmw->vma->vm_mm;
  105. struct page *page = pvmw->page;
  106. pgd_t *pgd;
  107. p4d_t *p4d;
  108. pud_t *pud;
  109. pmd_t pmde;
  110. /* The only possible pmd mapping has been handled on last iteration */
  111. if (pvmw->pmd && !pvmw->pte)
  112. return not_found(pvmw);
  113. if (pvmw->pte)
  114. goto next_pte;
  115. if (unlikely(PageHuge(pvmw->page))) {
  116. /* when pud is not present, pte will be NULL */
  117. pvmw->pte = huge_pte_offset(mm, pvmw->address,
  118. PAGE_SIZE << compound_order(page));
  119. if (!pvmw->pte)
  120. return false;
  121. pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
  122. spin_lock(pvmw->ptl);
  123. if (!check_pte(pvmw))
  124. return not_found(pvmw);
  125. return true;
  126. }
  127. restart:
  128. pgd = pgd_offset(mm, pvmw->address);
  129. if (!pgd_present(*pgd))
  130. return false;
  131. p4d = p4d_offset(pgd, pvmw->address);
  132. if (!p4d_present(*p4d))
  133. return false;
  134. pud = pud_offset(p4d, pvmw->address);
  135. if (!pud_present(*pud))
  136. return false;
  137. pvmw->pmd = pmd_offset(pud, pvmw->address);
  138. /*
  139. * Make sure the pmd value isn't cached in a register by the
  140. * compiler and used as a stale value after we've observed a
  141. * subsequent update.
  142. */
  143. pmde = READ_ONCE(*pvmw->pmd);
  144. if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
  145. pvmw->ptl = pmd_lock(mm, pvmw->pmd);
  146. if (likely(pmd_trans_huge(*pvmw->pmd))) {
  147. if (pvmw->flags & PVMW_MIGRATION)
  148. return not_found(pvmw);
  149. if (pmd_page(*pvmw->pmd) != page)
  150. return not_found(pvmw);
  151. return true;
  152. } else if (!pmd_present(*pvmw->pmd)) {
  153. if (thp_migration_supported()) {
  154. if (!(pvmw->flags & PVMW_MIGRATION))
  155. return not_found(pvmw);
  156. if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
  157. swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
  158. if (migration_entry_to_page(entry) != page)
  159. return not_found(pvmw);
  160. return true;
  161. }
  162. }
  163. return not_found(pvmw);
  164. } else {
  165. /* THP pmd was split under us: handle on pte level */
  166. spin_unlock(pvmw->ptl);
  167. pvmw->ptl = NULL;
  168. }
  169. } else if (!pmd_present(pmde)) {
  170. return false;
  171. }
  172. if (!map_pte(pvmw))
  173. goto next_pte;
  174. while (1) {
  175. if (check_pte(pvmw))
  176. return true;
  177. next_pte:
  178. /* Seek to next pte only makes sense for THP */
  179. if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
  180. return not_found(pvmw);
  181. do {
  182. pvmw->address += PAGE_SIZE;
  183. if (pvmw->address >= pvmw->vma->vm_end ||
  184. pvmw->address >=
  185. __vma_address(pvmw->page, pvmw->vma) +
  186. hpage_nr_pages(pvmw->page) * PAGE_SIZE)
  187. return not_found(pvmw);
  188. /* Did we cross page table boundary? */
  189. if (pvmw->address % PMD_SIZE == 0) {
  190. pte_unmap(pvmw->pte);
  191. if (pvmw->ptl) {
  192. spin_unlock(pvmw->ptl);
  193. pvmw->ptl = NULL;
  194. }
  195. goto restart;
  196. } else {
  197. pvmw->pte++;
  198. }
  199. } while (pte_none(*pvmw->pte));
  200. if (!pvmw->ptl) {
  201. pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
  202. spin_lock(pvmw->ptl);
  203. }
  204. }
  205. }
  206. /**
  207. * page_mapped_in_vma - check whether a page is really mapped in a VMA
  208. * @page: the page to test
  209. * @vma: the VMA to test
  210. *
  211. * Returns 1 if the page is mapped into the page tables of the VMA, 0
  212. * if the page is not mapped into the page tables of this VMA. Only
  213. * valid for normal file or anonymous VMAs.
  214. */
  215. int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
  216. {
  217. struct page_vma_mapped_walk pvmw = {
  218. .page = page,
  219. .vma = vma,
  220. .flags = PVMW_SYNC,
  221. };
  222. unsigned long start, end;
  223. start = __vma_address(page, vma);
  224. end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
  225. if (unlikely(end < vma->vm_start || start >= vma->vm_end))
  226. return 0;
  227. pvmw.address = max(start, vma->vm_start);
  228. if (!page_vma_mapped_walk(&pvmw))
  229. return 0;
  230. page_vma_mapped_walk_done(&pvmw);
  231. return 1;
  232. }