pagewalk.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. #include <linux/mm.h>
  2. #include <linux/highmem.h>
  3. #include <linux/sched.h>
  4. #include <linux/hugetlb.h>
  5. static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  6. struct mm_walk *walk)
  7. {
  8. pte_t *pte;
  9. int err = 0;
  10. pte = pte_offset_map(pmd, addr);
  11. for (;;) {
  12. err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
  13. if (err)
  14. break;
  15. addr += PAGE_SIZE;
  16. if (addr == end)
  17. break;
  18. pte++;
  19. }
  20. pte_unmap(pte);
  21. return err;
  22. }
  23. static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
  24. struct mm_walk *walk)
  25. {
  26. pmd_t *pmd;
  27. unsigned long next;
  28. int err = 0;
  29. pmd = pmd_offset(pud, addr);
  30. do {
  31. again:
  32. next = pmd_addr_end(addr, end);
  33. if (pmd_none(*pmd) || !walk->vma) {
  34. if (walk->pte_hole)
  35. err = walk->pte_hole(addr, next, walk);
  36. if (err)
  37. break;
  38. continue;
  39. }
  40. /*
  41. * This implies that each ->pmd_entry() handler
  42. * needs to know about pmd_trans_huge() pmds
  43. */
  44. if (walk->pmd_entry)
  45. err = walk->pmd_entry(pmd, addr, next, walk);
  46. if (err)
  47. break;
  48. /*
  49. * Check this here so we only break down trans_huge
  50. * pages when we _need_ to
  51. */
  52. if (!walk->pte_entry)
  53. continue;
  54. split_huge_pmd(walk->vma, pmd, addr);
  55. if (pmd_trans_unstable(pmd))
  56. goto again;
  57. err = walk_pte_range(pmd, addr, next, walk);
  58. if (err)
  59. break;
  60. } while (pmd++, addr = next, addr != end);
  61. return err;
  62. }
  63. static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
  64. struct mm_walk *walk)
  65. {
  66. pud_t *pud;
  67. unsigned long next;
  68. int err = 0;
  69. pud = pud_offset(p4d, addr);
  70. do {
  71. again:
  72. next = pud_addr_end(addr, end);
  73. if (pud_none(*pud) || !walk->vma) {
  74. if (walk->pte_hole)
  75. err = walk->pte_hole(addr, next, walk);
  76. if (err)
  77. break;
  78. continue;
  79. }
  80. if (walk->pud_entry) {
  81. spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
  82. if (ptl) {
  83. err = walk->pud_entry(pud, addr, next, walk);
  84. spin_unlock(ptl);
  85. if (err)
  86. break;
  87. continue;
  88. }
  89. }
  90. split_huge_pud(walk->vma, pud, addr);
  91. if (pud_none(*pud))
  92. goto again;
  93. if (walk->pmd_entry || walk->pte_entry)
  94. err = walk_pmd_range(pud, addr, next, walk);
  95. if (err)
  96. break;
  97. } while (pud++, addr = next, addr != end);
  98. return err;
  99. }
  100. static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
  101. struct mm_walk *walk)
  102. {
  103. p4d_t *p4d;
  104. unsigned long next;
  105. int err = 0;
  106. p4d = p4d_offset(pgd, addr);
  107. do {
  108. next = p4d_addr_end(addr, end);
  109. if (p4d_none_or_clear_bad(p4d)) {
  110. if (walk->pte_hole)
  111. err = walk->pte_hole(addr, next, walk);
  112. if (err)
  113. break;
  114. continue;
  115. }
  116. if (walk->pmd_entry || walk->pte_entry)
  117. err = walk_pud_range(p4d, addr, next, walk);
  118. if (err)
  119. break;
  120. } while (p4d++, addr = next, addr != end);
  121. return err;
  122. }
  123. static int walk_pgd_range(unsigned long addr, unsigned long end,
  124. struct mm_walk *walk)
  125. {
  126. pgd_t *pgd;
  127. unsigned long next;
  128. int err = 0;
  129. pgd = pgd_offset(walk->mm, addr);
  130. do {
  131. next = pgd_addr_end(addr, end);
  132. if (pgd_none_or_clear_bad(pgd)) {
  133. if (walk->pte_hole)
  134. err = walk->pte_hole(addr, next, walk);
  135. if (err)
  136. break;
  137. continue;
  138. }
  139. if (walk->pmd_entry || walk->pte_entry)
  140. err = walk_p4d_range(pgd, addr, next, walk);
  141. if (err)
  142. break;
  143. } while (pgd++, addr = next, addr != end);
  144. return err;
  145. }
  146. #ifdef CONFIG_HUGETLB_PAGE
  147. static unsigned long hugetlb_entry_end(struct hstate *h, unsigned long addr,
  148. unsigned long end)
  149. {
  150. unsigned long boundary = (addr & huge_page_mask(h)) + huge_page_size(h);
  151. return boundary < end ? boundary : end;
  152. }
  153. static int walk_hugetlb_range(unsigned long addr, unsigned long end,
  154. struct mm_walk *walk)
  155. {
  156. struct vm_area_struct *vma = walk->vma;
  157. struct hstate *h = hstate_vma(vma);
  158. unsigned long next;
  159. unsigned long hmask = huge_page_mask(h);
  160. unsigned long sz = huge_page_size(h);
  161. pte_t *pte;
  162. int err = 0;
  163. do {
  164. next = hugetlb_entry_end(h, addr, end);
  165. pte = huge_pte_offset(walk->mm, addr & hmask, sz);
  166. if (pte && walk->hugetlb_entry)
  167. err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
  168. if (err)
  169. break;
  170. } while (addr = next, addr != end);
  171. return err;
  172. }
  173. #else /* CONFIG_HUGETLB_PAGE */
  174. static int walk_hugetlb_range(unsigned long addr, unsigned long end,
  175. struct mm_walk *walk)
  176. {
  177. return 0;
  178. }
  179. #endif /* CONFIG_HUGETLB_PAGE */
  180. /*
  181. * Decide whether we really walk over the current vma on [@start, @end)
  182. * or skip it via the returned value. Return 0 if we do walk over the
  183. * current vma, and return 1 if we skip the vma. Negative values means
  184. * error, where we abort the current walk.
  185. */
  186. static int walk_page_test(unsigned long start, unsigned long end,
  187. struct mm_walk *walk)
  188. {
  189. struct vm_area_struct *vma = walk->vma;
  190. if (walk->test_walk)
  191. return walk->test_walk(start, end, walk);
  192. /*
  193. * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
  194. * range, so we don't walk over it as we do for normal vmas. However,
  195. * Some callers are interested in handling hole range and they don't
  196. * want to just ignore any single address range. Such users certainly
  197. * define their ->pte_hole() callbacks, so let's delegate them to handle
  198. * vma(VM_PFNMAP).
  199. */
  200. if (vma->vm_flags & VM_PFNMAP) {
  201. int err = 1;
  202. if (walk->pte_hole)
  203. err = walk->pte_hole(start, end, walk);
  204. return err ? err : 1;
  205. }
  206. return 0;
  207. }
  208. static int __walk_page_range(unsigned long start, unsigned long end,
  209. struct mm_walk *walk)
  210. {
  211. int err = 0;
  212. struct vm_area_struct *vma = walk->vma;
  213. if (vma && is_vm_hugetlb_page(vma)) {
  214. if (walk->hugetlb_entry)
  215. err = walk_hugetlb_range(start, end, walk);
  216. } else
  217. err = walk_pgd_range(start, end, walk);
  218. return err;
  219. }
  220. /**
  221. * walk_page_range - walk page table with caller specific callbacks
  222. *
  223. * Recursively walk the page table tree of the process represented by @walk->mm
  224. * within the virtual address range [@start, @end). During walking, we can do
  225. * some caller-specific works for each entry, by setting up pmd_entry(),
  226. * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
  227. * callbacks, the associated entries/pages are just ignored.
  228. * The return values of these callbacks are commonly defined like below:
  229. * - 0 : succeeded to handle the current entry, and if you don't reach the
  230. * end address yet, continue to walk.
  231. * - >0 : succeeded to handle the current entry, and return to the caller
  232. * with caller specific value.
  233. * - <0 : failed to handle the current entry, and return to the caller
  234. * with error code.
  235. *
  236. * Before starting to walk page table, some callers want to check whether
  237. * they really want to walk over the current vma, typically by checking
  238. * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
  239. * purpose.
  240. *
  241. * struct mm_walk keeps current values of some common data like vma and pmd,
  242. * which are useful for the access from callbacks. If you want to pass some
  243. * caller-specific data to callbacks, @walk->private should be helpful.
  244. *
  245. * Locking:
  246. * Callers of walk_page_range() and walk_page_vma() should hold
  247. * @walk->mm->mmap_sem, because these function traverse vma list and/or
  248. * access to vma's data.
  249. */
  250. int walk_page_range(unsigned long start, unsigned long end,
  251. struct mm_walk *walk)
  252. {
  253. int err = 0;
  254. unsigned long next;
  255. struct vm_area_struct *vma;
  256. if (start >= end)
  257. return -EINVAL;
  258. if (!walk->mm)
  259. return -EINVAL;
  260. VM_BUG_ON_MM(!rwsem_is_locked(&walk->mm->mmap_sem), walk->mm);
  261. vma = find_vma(walk->mm, start);
  262. do {
  263. if (!vma) { /* after the last vma */
  264. walk->vma = NULL;
  265. next = end;
  266. } else if (start < vma->vm_start) { /* outside vma */
  267. walk->vma = NULL;
  268. next = min(end, vma->vm_start);
  269. } else { /* inside vma */
  270. walk->vma = vma;
  271. next = min(end, vma->vm_end);
  272. vma = vma->vm_next;
  273. err = walk_page_test(start, next, walk);
  274. if (err > 0) {
  275. /*
  276. * positive return values are purely for
  277. * controlling the pagewalk, so should never
  278. * be passed to the callers.
  279. */
  280. err = 0;
  281. continue;
  282. }
  283. if (err < 0)
  284. break;
  285. }
  286. if (walk->vma || walk->pte_hole)
  287. err = __walk_page_range(start, next, walk);
  288. if (err)
  289. break;
  290. } while (start = next, start < end);
  291. return err;
  292. }
  293. int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk)
  294. {
  295. int err;
  296. if (!walk->mm)
  297. return -EINVAL;
  298. VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
  299. VM_BUG_ON(!vma);
  300. walk->vma = vma;
  301. err = walk_page_test(vma->vm_start, vma->vm_end, walk);
  302. if (err > 0)
  303. return 0;
  304. if (err < 0)
  305. return err;
  306. return __walk_page_range(vma->vm_start, vma->vm_end, walk);
  307. }