gup.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. /*
  2. * Lockless get_user_pages_fast for x86
  3. *
  4. * Copyright (C) 2008 Nick Piggin
  5. * Copyright (C) 2008 Novell Inc.
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/vmstat.h>
  10. #include <linux/highmem.h>
  11. #include <linux/swap.h>
  12. #include <linux/memremap.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/pgtable.h>
  15. static inline pte_t gup_get_pte(pte_t *ptep)
  16. {
  17. #ifndef CONFIG_X86_PAE
  18. return READ_ONCE(*ptep);
  19. #else
  20. /*
  21. * With get_user_pages_fast, we walk down the pagetables without taking
  22. * any locks. For this we would like to load the pointers atomically,
  23. * but that is not possible (without expensive cmpxchg8b) on PAE. What
  24. * we do have is the guarantee that a pte will only either go from not
  25. * present to present, or present to not present or both -- it will not
  26. * switch to a completely different present page without a TLB flush in
  27. * between; something that we are blocking by holding interrupts off.
  28. *
  29. * Setting ptes from not present to present goes:
  30. * ptep->pte_high = h;
  31. * smp_wmb();
  32. * ptep->pte_low = l;
  33. *
  34. * And present to not present goes:
  35. * ptep->pte_low = 0;
  36. * smp_wmb();
  37. * ptep->pte_high = 0;
  38. *
  39. * We must ensure here that the load of pte_low sees l iff pte_high
  40. * sees h. We load pte_high *after* loading pte_low, which ensures we
  41. * don't see an older value of pte_high. *Then* we recheck pte_low,
  42. * which ensures that we haven't picked up a changed pte high. We might
  43. * have got rubbish values from pte_low and pte_high, but we are
  44. * guaranteed that pte_low will not have the present bit set *unless*
  45. * it is 'l'. And get_user_pages_fast only operates on present ptes, so
  46. * we're safe.
  47. *
  48. * gup_get_pte should not be used or copied outside gup.c without being
  49. * very careful -- it does not atomically load the pte or anything that
  50. * is likely to be useful for you.
  51. */
  52. pte_t pte;
  53. retry:
  54. pte.pte_low = ptep->pte_low;
  55. smp_rmb();
  56. pte.pte_high = ptep->pte_high;
  57. smp_rmb();
  58. if (unlikely(pte.pte_low != ptep->pte_low))
  59. goto retry;
  60. return pte;
  61. #endif
  62. }
  63. static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
  64. {
  65. while ((*nr) - nr_start) {
  66. struct page *page = pages[--(*nr)];
  67. ClearPageReferenced(page);
  68. put_page(page);
  69. }
  70. }
  71. /*
  72. * 'pteval' can come from a pte, pmd or pud. We only check
  73. * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
  74. * same value on all 3 types.
  75. */
  76. static inline int pte_allows_gup(unsigned long pteval, int write)
  77. {
  78. unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
  79. if (write)
  80. need_pte_bits |= _PAGE_RW;
  81. if ((pteval & need_pte_bits) != need_pte_bits)
  82. return 0;
  83. /* Check memory protection keys permissions. */
  84. if (!__pkru_allows_pkey(pte_flags_pkey(pteval), write))
  85. return 0;
  86. return 1;
  87. }
  88. /*
  89. * The performance critical leaf functions are made noinline otherwise gcc
  90. * inlines everything into a single function which results in too much
  91. * register pressure.
  92. */
  93. static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
  94. unsigned long end, int write, struct page **pages, int *nr)
  95. {
  96. struct dev_pagemap *pgmap = NULL;
  97. int nr_start = *nr;
  98. pte_t *ptep;
  99. ptep = pte_offset_map(&pmd, addr);
  100. do {
  101. pte_t pte = gup_get_pte(ptep);
  102. struct page *page;
  103. /* Similar to the PMD case, NUMA hinting must take slow path */
  104. if (pte_protnone(pte)) {
  105. pte_unmap(ptep);
  106. return 0;
  107. }
  108. if (pte_devmap(pte)) {
  109. pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
  110. if (unlikely(!pgmap)) {
  111. undo_dev_pagemap(nr, nr_start, pages);
  112. pte_unmap(ptep);
  113. return 0;
  114. }
  115. } else if (!pte_allows_gup(pte_val(pte), write) ||
  116. pte_special(pte)) {
  117. pte_unmap(ptep);
  118. return 0;
  119. }
  120. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  121. page = pte_page(pte);
  122. get_page(page);
  123. put_dev_pagemap(pgmap);
  124. SetPageReferenced(page);
  125. pages[*nr] = page;
  126. (*nr)++;
  127. } while (ptep++, addr += PAGE_SIZE, addr != end);
  128. pte_unmap(ptep - 1);
  129. return 1;
  130. }
  131. static inline void get_head_page_multiple(struct page *page, int nr)
  132. {
  133. VM_BUG_ON_PAGE(page != compound_head(page), page);
  134. VM_BUG_ON_PAGE(page_count(page) == 0, page);
  135. page_ref_add(page, nr);
  136. SetPageReferenced(page);
  137. }
  138. static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
  139. unsigned long end, struct page **pages, int *nr)
  140. {
  141. int nr_start = *nr;
  142. unsigned long pfn = pmd_pfn(pmd);
  143. struct dev_pagemap *pgmap = NULL;
  144. pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
  145. do {
  146. struct page *page = pfn_to_page(pfn);
  147. pgmap = get_dev_pagemap(pfn, pgmap);
  148. if (unlikely(!pgmap)) {
  149. undo_dev_pagemap(nr, nr_start, pages);
  150. return 0;
  151. }
  152. SetPageReferenced(page);
  153. pages[*nr] = page;
  154. get_page(page);
  155. put_dev_pagemap(pgmap);
  156. (*nr)++;
  157. pfn++;
  158. } while (addr += PAGE_SIZE, addr != end);
  159. return 1;
  160. }
  161. static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
  162. unsigned long end, int write, struct page **pages, int *nr)
  163. {
  164. struct page *head, *page;
  165. int refs;
  166. if (!pte_allows_gup(pmd_val(pmd), write))
  167. return 0;
  168. VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
  169. if (pmd_devmap(pmd))
  170. return __gup_device_huge_pmd(pmd, addr, end, pages, nr);
  171. /* hugepages are never "special" */
  172. VM_BUG_ON(pmd_flags(pmd) & _PAGE_SPECIAL);
  173. refs = 0;
  174. head = pmd_page(pmd);
  175. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  176. do {
  177. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  178. pages[*nr] = page;
  179. (*nr)++;
  180. page++;
  181. refs++;
  182. } while (addr += PAGE_SIZE, addr != end);
  183. get_head_page_multiple(head, refs);
  184. return 1;
  185. }
  186. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  187. int write, struct page **pages, int *nr)
  188. {
  189. unsigned long next;
  190. pmd_t *pmdp;
  191. pmdp = pmd_offset(&pud, addr);
  192. do {
  193. pmd_t pmd = *pmdp;
  194. next = pmd_addr_end(addr, end);
  195. if (pmd_none(pmd))
  196. return 0;
  197. if (unlikely(pmd_large(pmd) || !pmd_present(pmd))) {
  198. /*
  199. * NUMA hinting faults need to be handled in the GUP
  200. * slowpath for accounting purposes and so that they
  201. * can be serialised against THP migration.
  202. */
  203. if (pmd_protnone(pmd))
  204. return 0;
  205. if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
  206. return 0;
  207. } else {
  208. if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  209. return 0;
  210. }
  211. } while (pmdp++, addr = next, addr != end);
  212. return 1;
  213. }
  214. static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
  215. unsigned long end, int write, struct page **pages, int *nr)
  216. {
  217. struct page *head, *page;
  218. int refs;
  219. if (!pte_allows_gup(pud_val(pud), write))
  220. return 0;
  221. /* hugepages are never "special" */
  222. VM_BUG_ON(pud_flags(pud) & _PAGE_SPECIAL);
  223. VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
  224. refs = 0;
  225. head = pud_page(pud);
  226. page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  227. do {
  228. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  229. pages[*nr] = page;
  230. (*nr)++;
  231. page++;
  232. refs++;
  233. } while (addr += PAGE_SIZE, addr != end);
  234. get_head_page_multiple(head, refs);
  235. return 1;
  236. }
  237. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  238. int write, struct page **pages, int *nr)
  239. {
  240. unsigned long next;
  241. pud_t *pudp;
  242. pudp = pud_offset(&pgd, addr);
  243. do {
  244. pud_t pud = *pudp;
  245. next = pud_addr_end(addr, end);
  246. if (pud_none(pud))
  247. return 0;
  248. if (unlikely(pud_large(pud))) {
  249. if (!gup_huge_pud(pud, addr, next, write, pages, nr))
  250. return 0;
  251. } else {
  252. if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  253. return 0;
  254. }
  255. } while (pudp++, addr = next, addr != end);
  256. return 1;
  257. }
  258. /*
  259. * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
  260. * back to the regular GUP.
  261. */
  262. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  263. struct page **pages)
  264. {
  265. struct mm_struct *mm = current->mm;
  266. unsigned long addr, len, end;
  267. unsigned long next;
  268. unsigned long flags;
  269. pgd_t *pgdp;
  270. int nr = 0;
  271. start &= PAGE_MASK;
  272. addr = start;
  273. len = (unsigned long) nr_pages << PAGE_SHIFT;
  274. end = start + len;
  275. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  276. (void __user *)start, len)))
  277. return 0;
  278. /*
  279. * XXX: batch / limit 'nr', to avoid large irq off latency
  280. * needs some instrumenting to determine the common sizes used by
  281. * important workloads (eg. DB2), and whether limiting the batch size
  282. * will decrease performance.
  283. *
  284. * It seems like we're in the clear for the moment. Direct-IO is
  285. * the main guy that batches up lots of get_user_pages, and even
  286. * they are limited to 64-at-a-time which is not so many.
  287. */
  288. /*
  289. * This doesn't prevent pagetable teardown, but does prevent
  290. * the pagetables and pages from being freed on x86.
  291. *
  292. * So long as we atomically load page table pointers versus teardown
  293. * (which we do on x86, with the above PAE exception), we can follow the
  294. * address down to the the page and take a ref on it.
  295. */
  296. local_irq_save(flags);
  297. pgdp = pgd_offset(mm, addr);
  298. do {
  299. pgd_t pgd = *pgdp;
  300. next = pgd_addr_end(addr, end);
  301. if (pgd_none(pgd))
  302. break;
  303. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  304. break;
  305. } while (pgdp++, addr = next, addr != end);
  306. local_irq_restore(flags);
  307. return nr;
  308. }
  309. /**
  310. * get_user_pages_fast() - pin user pages in memory
  311. * @start: starting user address
  312. * @nr_pages: number of pages from start to pin
  313. * @write: whether pages will be written to
  314. * @pages: array that receives pointers to the pages pinned.
  315. * Should be at least nr_pages long.
  316. *
  317. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  318. * If not successful, it will fall back to taking the lock and
  319. * calling get_user_pages().
  320. *
  321. * Returns number of pages pinned. This may be fewer than the number
  322. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  323. * were pinned, returns -errno.
  324. */
  325. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  326. struct page **pages)
  327. {
  328. struct mm_struct *mm = current->mm;
  329. unsigned long addr, len, end;
  330. unsigned long next;
  331. pgd_t *pgdp;
  332. int nr = 0;
  333. start &= PAGE_MASK;
  334. addr = start;
  335. len = (unsigned long) nr_pages << PAGE_SHIFT;
  336. end = start + len;
  337. if (end < start)
  338. goto slow_irqon;
  339. #ifdef CONFIG_X86_64
  340. if (end >> __VIRTUAL_MASK_SHIFT)
  341. goto slow_irqon;
  342. #endif
  343. /*
  344. * XXX: batch / limit 'nr', to avoid large irq off latency
  345. * needs some instrumenting to determine the common sizes used by
  346. * important workloads (eg. DB2), and whether limiting the batch size
  347. * will decrease performance.
  348. *
  349. * It seems like we're in the clear for the moment. Direct-IO is
  350. * the main guy that batches up lots of get_user_pages, and even
  351. * they are limited to 64-at-a-time which is not so many.
  352. */
  353. /*
  354. * This doesn't prevent pagetable teardown, but does prevent
  355. * the pagetables and pages from being freed on x86.
  356. *
  357. * So long as we atomically load page table pointers versus teardown
  358. * (which we do on x86, with the above PAE exception), we can follow the
  359. * address down to the the page and take a ref on it.
  360. */
  361. local_irq_disable();
  362. pgdp = pgd_offset(mm, addr);
  363. do {
  364. pgd_t pgd = *pgdp;
  365. next = pgd_addr_end(addr, end);
  366. if (pgd_none(pgd))
  367. goto slow;
  368. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  369. goto slow;
  370. } while (pgdp++, addr = next, addr != end);
  371. local_irq_enable();
  372. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  373. return nr;
  374. {
  375. int ret;
  376. slow:
  377. local_irq_enable();
  378. slow_irqon:
  379. /* Try to get the remaining pages with get_user_pages */
  380. start += nr << PAGE_SHIFT;
  381. pages += nr;
  382. ret = get_user_pages_unlocked(start,
  383. (end - start) >> PAGE_SHIFT,
  384. pages, write ? FOLL_WRITE : 0);
  385. /* Have to be a bit careful with return values */
  386. if (nr > 0) {
  387. if (ret < 0)
  388. ret = nr;
  389. else
  390. ret += nr;
  391. }
  392. return ret;
  393. }
  394. }