gup.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097
  1. #include <linux/kernel.h>
  2. #include <linux/errno.h>
  3. #include <linux/err.h>
  4. #include <linux/spinlock.h>
  5. #include <linux/mm.h>
  6. #include <linux/pagemap.h>
  7. #include <linux/rmap.h>
  8. #include <linux/swap.h>
  9. #include <linux/swapops.h>
  10. #include <linux/sched.h>
  11. #include <linux/rwsem.h>
  12. #include <linux/hugetlb.h>
  13. #include <asm/pgtable.h>
  14. #include "internal.h"
  15. static struct page *no_page_table(struct vm_area_struct *vma,
  16. unsigned int flags)
  17. {
  18. /*
  19. * When core dumping an enormous anonymous area that nobody
  20. * has touched so far, we don't want to allocate unnecessary pages or
  21. * page tables. Return error instead of NULL to skip handle_mm_fault,
  22. * then get_dump_page() will return NULL to leave a hole in the dump.
  23. * But we can only make this optimization where a hole would surely
  24. * be zero-filled if handle_mm_fault() actually did handle it.
  25. */
  26. if ((flags & FOLL_DUMP) && (!vma->vm_ops || !vma->vm_ops->fault))
  27. return ERR_PTR(-EFAULT);
  28. return NULL;
  29. }
  30. static struct page *follow_page_pte(struct vm_area_struct *vma,
  31. unsigned long address, pmd_t *pmd, unsigned int flags)
  32. {
  33. struct mm_struct *mm = vma->vm_mm;
  34. struct page *page;
  35. spinlock_t *ptl;
  36. pte_t *ptep, pte;
  37. retry:
  38. if (unlikely(pmd_bad(*pmd)))
  39. return no_page_table(vma, flags);
  40. ptep = pte_offset_map_lock(mm, pmd, address, &ptl);
  41. pte = *ptep;
  42. if (!pte_present(pte)) {
  43. swp_entry_t entry;
  44. /*
  45. * KSM's break_ksm() relies upon recognizing a ksm page
  46. * even while it is being migrated, so for that case we
  47. * need migration_entry_wait().
  48. */
  49. if (likely(!(flags & FOLL_MIGRATION)))
  50. goto no_page;
  51. if (pte_none(pte) || pte_file(pte))
  52. goto no_page;
  53. entry = pte_to_swp_entry(pte);
  54. if (!is_migration_entry(entry))
  55. goto no_page;
  56. pte_unmap_unlock(ptep, ptl);
  57. migration_entry_wait(mm, pmd, address);
  58. goto retry;
  59. }
  60. if ((flags & FOLL_NUMA) && pte_numa(pte))
  61. goto no_page;
  62. if ((flags & FOLL_WRITE) && !pte_write(pte)) {
  63. pte_unmap_unlock(ptep, ptl);
  64. return NULL;
  65. }
  66. page = vm_normal_page(vma, address, pte);
  67. if (unlikely(!page)) {
  68. if ((flags & FOLL_DUMP) ||
  69. !is_zero_pfn(pte_pfn(pte)))
  70. goto bad_page;
  71. page = pte_page(pte);
  72. }
  73. if (flags & FOLL_GET)
  74. get_page_foll(page);
  75. if (flags & FOLL_TOUCH) {
  76. if ((flags & FOLL_WRITE) &&
  77. !pte_dirty(pte) && !PageDirty(page))
  78. set_page_dirty(page);
  79. /*
  80. * pte_mkyoung() would be more correct here, but atomic care
  81. * is needed to avoid losing the dirty bit: it is easier to use
  82. * mark_page_accessed().
  83. */
  84. mark_page_accessed(page);
  85. }
  86. if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
  87. /*
  88. * The preliminary mapping check is mainly to avoid the
  89. * pointless overhead of lock_page on the ZERO_PAGE
  90. * which might bounce very badly if there is contention.
  91. *
  92. * If the page is already locked, we don't need to
  93. * handle it now - vmscan will handle it later if and
  94. * when it attempts to reclaim the page.
  95. */
  96. if (page->mapping && trylock_page(page)) {
  97. lru_add_drain(); /* push cached pages to LRU */
  98. /*
  99. * Because we lock page here, and migration is
  100. * blocked by the pte's page reference, and we
  101. * know the page is still mapped, we don't even
  102. * need to check for file-cache page truncation.
  103. */
  104. mlock_vma_page(page);
  105. unlock_page(page);
  106. }
  107. }
  108. pte_unmap_unlock(ptep, ptl);
  109. return page;
  110. bad_page:
  111. pte_unmap_unlock(ptep, ptl);
  112. return ERR_PTR(-EFAULT);
  113. no_page:
  114. pte_unmap_unlock(ptep, ptl);
  115. if (!pte_none(pte))
  116. return NULL;
  117. return no_page_table(vma, flags);
  118. }
  119. /**
  120. * follow_page_mask - look up a page descriptor from a user-virtual address
  121. * @vma: vm_area_struct mapping @address
  122. * @address: virtual address to look up
  123. * @flags: flags modifying lookup behaviour
  124. * @page_mask: on output, *page_mask is set according to the size of the page
  125. *
  126. * @flags can have FOLL_ flags set, defined in <linux/mm.h>
  127. *
  128. * Returns the mapped (struct page *), %NULL if no mapping exists, or
  129. * an error pointer if there is a mapping to something not represented
  130. * by a page descriptor (see also vm_normal_page()).
  131. */
  132. struct page *follow_page_mask(struct vm_area_struct *vma,
  133. unsigned long address, unsigned int flags,
  134. unsigned int *page_mask)
  135. {
  136. pgd_t *pgd;
  137. pud_t *pud;
  138. pmd_t *pmd;
  139. spinlock_t *ptl;
  140. struct page *page;
  141. struct mm_struct *mm = vma->vm_mm;
  142. *page_mask = 0;
  143. page = follow_huge_addr(mm, address, flags & FOLL_WRITE);
  144. if (!IS_ERR(page)) {
  145. BUG_ON(flags & FOLL_GET);
  146. return page;
  147. }
  148. pgd = pgd_offset(mm, address);
  149. if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
  150. return no_page_table(vma, flags);
  151. pud = pud_offset(pgd, address);
  152. if (pud_none(*pud))
  153. return no_page_table(vma, flags);
  154. if (pud_huge(*pud) && vma->vm_flags & VM_HUGETLB) {
  155. if (flags & FOLL_GET)
  156. return NULL;
  157. page = follow_huge_pud(mm, address, pud, flags & FOLL_WRITE);
  158. return page;
  159. }
  160. if (unlikely(pud_bad(*pud)))
  161. return no_page_table(vma, flags);
  162. pmd = pmd_offset(pud, address);
  163. if (pmd_none(*pmd))
  164. return no_page_table(vma, flags);
  165. if (pmd_huge(*pmd) && vma->vm_flags & VM_HUGETLB) {
  166. page = follow_huge_pmd(mm, address, pmd, flags & FOLL_WRITE);
  167. if (flags & FOLL_GET) {
  168. /*
  169. * Refcount on tail pages are not well-defined and
  170. * shouldn't be taken. The caller should handle a NULL
  171. * return when trying to follow tail pages.
  172. */
  173. if (PageHead(page))
  174. get_page(page);
  175. else
  176. page = NULL;
  177. }
  178. return page;
  179. }
  180. if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
  181. return no_page_table(vma, flags);
  182. if (pmd_trans_huge(*pmd)) {
  183. if (flags & FOLL_SPLIT) {
  184. split_huge_page_pmd(vma, address, pmd);
  185. return follow_page_pte(vma, address, pmd, flags);
  186. }
  187. ptl = pmd_lock(mm, pmd);
  188. if (likely(pmd_trans_huge(*pmd))) {
  189. if (unlikely(pmd_trans_splitting(*pmd))) {
  190. spin_unlock(ptl);
  191. wait_split_huge_page(vma->anon_vma, pmd);
  192. } else {
  193. page = follow_trans_huge_pmd(vma, address,
  194. pmd, flags);
  195. spin_unlock(ptl);
  196. *page_mask = HPAGE_PMD_NR - 1;
  197. return page;
  198. }
  199. } else
  200. spin_unlock(ptl);
  201. }
  202. return follow_page_pte(vma, address, pmd, flags);
  203. }
  204. static int get_gate_page(struct mm_struct *mm, unsigned long address,
  205. unsigned int gup_flags, struct vm_area_struct **vma,
  206. struct page **page)
  207. {
  208. pgd_t *pgd;
  209. pud_t *pud;
  210. pmd_t *pmd;
  211. pte_t *pte;
  212. int ret = -EFAULT;
  213. /* user gate pages are read-only */
  214. if (gup_flags & FOLL_WRITE)
  215. return -EFAULT;
  216. if (address > TASK_SIZE)
  217. pgd = pgd_offset_k(address);
  218. else
  219. pgd = pgd_offset_gate(mm, address);
  220. BUG_ON(pgd_none(*pgd));
  221. pud = pud_offset(pgd, address);
  222. BUG_ON(pud_none(*pud));
  223. pmd = pmd_offset(pud, address);
  224. if (pmd_none(*pmd))
  225. return -EFAULT;
  226. VM_BUG_ON(pmd_trans_huge(*pmd));
  227. pte = pte_offset_map(pmd, address);
  228. if (pte_none(*pte))
  229. goto unmap;
  230. *vma = get_gate_vma(mm);
  231. if (!page)
  232. goto out;
  233. *page = vm_normal_page(*vma, address, *pte);
  234. if (!*page) {
  235. if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte)))
  236. goto unmap;
  237. *page = pte_page(*pte);
  238. }
  239. get_page(*page);
  240. out:
  241. ret = 0;
  242. unmap:
  243. pte_unmap(pte);
  244. return ret;
  245. }
  246. /*
  247. * mmap_sem must be held on entry. If @nonblocking != NULL and
  248. * *@flags does not include FOLL_NOWAIT, the mmap_sem may be released.
  249. * If it is, *@nonblocking will be set to 0 and -EBUSY returned.
  250. */
  251. static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
  252. unsigned long address, unsigned int *flags, int *nonblocking)
  253. {
  254. struct mm_struct *mm = vma->vm_mm;
  255. unsigned int fault_flags = 0;
  256. int ret;
  257. /* For mlock, just skip the stack guard page. */
  258. if ((*flags & FOLL_MLOCK) &&
  259. (stack_guard_page_start(vma, address) ||
  260. stack_guard_page_end(vma, address + PAGE_SIZE)))
  261. return -ENOENT;
  262. if (*flags & FOLL_WRITE)
  263. fault_flags |= FAULT_FLAG_WRITE;
  264. if (nonblocking)
  265. fault_flags |= FAULT_FLAG_ALLOW_RETRY;
  266. if (*flags & FOLL_NOWAIT)
  267. fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT;
  268. if (*flags & FOLL_TRIED) {
  269. VM_WARN_ON_ONCE(fault_flags & FAULT_FLAG_ALLOW_RETRY);
  270. fault_flags |= FAULT_FLAG_TRIED;
  271. }
  272. ret = handle_mm_fault(mm, vma, address, fault_flags);
  273. if (ret & VM_FAULT_ERROR) {
  274. if (ret & VM_FAULT_OOM)
  275. return -ENOMEM;
  276. if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
  277. return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
  278. if (ret & VM_FAULT_SIGBUS)
  279. return -EFAULT;
  280. BUG();
  281. }
  282. if (tsk) {
  283. if (ret & VM_FAULT_MAJOR)
  284. tsk->maj_flt++;
  285. else
  286. tsk->min_flt++;
  287. }
  288. if (ret & VM_FAULT_RETRY) {
  289. if (nonblocking)
  290. *nonblocking = 0;
  291. return -EBUSY;
  292. }
  293. /*
  294. * The VM_FAULT_WRITE bit tells us that do_wp_page has broken COW when
  295. * necessary, even if maybe_mkwrite decided not to set pte_write. We
  296. * can thus safely do subsequent page lookups as if they were reads.
  297. * But only do so when looping for pte_write is futile: in some cases
  298. * userspace may also be wanting to write to the gotten user page,
  299. * which a read fault here might prevent (a readonly page might get
  300. * reCOWed by userspace write).
  301. */
  302. if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
  303. *flags &= ~FOLL_WRITE;
  304. return 0;
  305. }
  306. static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
  307. {
  308. vm_flags_t vm_flags = vma->vm_flags;
  309. if (vm_flags & (VM_IO | VM_PFNMAP))
  310. return -EFAULT;
  311. if (gup_flags & FOLL_WRITE) {
  312. if (!(vm_flags & VM_WRITE)) {
  313. if (!(gup_flags & FOLL_FORCE))
  314. return -EFAULT;
  315. /*
  316. * We used to let the write,force case do COW in a
  317. * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
  318. * set a breakpoint in a read-only mapping of an
  319. * executable, without corrupting the file (yet only
  320. * when that file had been opened for writing!).
  321. * Anon pages in shared mappings are surprising: now
  322. * just reject it.
  323. */
  324. if (!is_cow_mapping(vm_flags)) {
  325. WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
  326. return -EFAULT;
  327. }
  328. }
  329. } else if (!(vm_flags & VM_READ)) {
  330. if (!(gup_flags & FOLL_FORCE))
  331. return -EFAULT;
  332. /*
  333. * Is there actually any vma we can reach here which does not
  334. * have VM_MAYREAD set?
  335. */
  336. if (!(vm_flags & VM_MAYREAD))
  337. return -EFAULT;
  338. }
  339. return 0;
  340. }
  341. /**
  342. * __get_user_pages() - pin user pages in memory
  343. * @tsk: task_struct of target task
  344. * @mm: mm_struct of target mm
  345. * @start: starting user address
  346. * @nr_pages: number of pages from start to pin
  347. * @gup_flags: flags modifying pin behaviour
  348. * @pages: array that receives pointers to the pages pinned.
  349. * Should be at least nr_pages long. Or NULL, if caller
  350. * only intends to ensure the pages are faulted in.
  351. * @vmas: array of pointers to vmas corresponding to each page.
  352. * Or NULL if the caller does not require them.
  353. * @nonblocking: whether waiting for disk IO or mmap_sem contention
  354. *
  355. * Returns number of pages pinned. This may be fewer than the number
  356. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  357. * were pinned, returns -errno. Each page returned must be released
  358. * with a put_page() call when it is finished with. vmas will only
  359. * remain valid while mmap_sem is held.
  360. *
  361. * Must be called with mmap_sem held. It may be released. See below.
  362. *
  363. * __get_user_pages walks a process's page tables and takes a reference to
  364. * each struct page that each user address corresponds to at a given
  365. * instant. That is, it takes the page that would be accessed if a user
  366. * thread accesses the given user virtual address at that instant.
  367. *
  368. * This does not guarantee that the page exists in the user mappings when
  369. * __get_user_pages returns, and there may even be a completely different
  370. * page there in some cases (eg. if mmapped pagecache has been invalidated
  371. * and subsequently re faulted). However it does guarantee that the page
  372. * won't be freed completely. And mostly callers simply care that the page
  373. * contains data that was valid *at some point in time*. Typically, an IO
  374. * or similar operation cannot guarantee anything stronger anyway because
  375. * locks can't be held over the syscall boundary.
  376. *
  377. * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
  378. * the page is written to, set_page_dirty (or set_page_dirty_lock, as
  379. * appropriate) must be called after the page is finished with, and
  380. * before put_page is called.
  381. *
  382. * If @nonblocking != NULL, __get_user_pages will not wait for disk IO
  383. * or mmap_sem contention, and if waiting is needed to pin all pages,
  384. * *@nonblocking will be set to 0. Further, if @gup_flags does not
  385. * include FOLL_NOWAIT, the mmap_sem will be released via up_read() in
  386. * this case.
  387. *
  388. * A caller using such a combination of @nonblocking and @gup_flags
  389. * must therefore hold the mmap_sem for reading only, and recognize
  390. * when it's been released. Otherwise, it must be held for either
  391. * reading or writing and will not be released.
  392. *
  393. * In most cases, get_user_pages or get_user_pages_fast should be used
  394. * instead of __get_user_pages. __get_user_pages should be used only if
  395. * you need some special @gup_flags.
  396. */
  397. long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  398. unsigned long start, unsigned long nr_pages,
  399. unsigned int gup_flags, struct page **pages,
  400. struct vm_area_struct **vmas, int *nonblocking)
  401. {
  402. long i = 0;
  403. unsigned int page_mask;
  404. struct vm_area_struct *vma = NULL;
  405. if (!nr_pages)
  406. return 0;
  407. VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
  408. /*
  409. * If FOLL_FORCE is set then do not force a full fault as the hinting
  410. * fault information is unrelated to the reference behaviour of a task
  411. * using the address space
  412. */
  413. if (!(gup_flags & FOLL_FORCE))
  414. gup_flags |= FOLL_NUMA;
  415. do {
  416. struct page *page;
  417. unsigned int foll_flags = gup_flags;
  418. unsigned int page_increm;
  419. /* first iteration or cross vma bound */
  420. if (!vma || start >= vma->vm_end) {
  421. vma = find_extend_vma(mm, start);
  422. if (!vma && in_gate_area(mm, start)) {
  423. int ret;
  424. ret = get_gate_page(mm, start & PAGE_MASK,
  425. gup_flags, &vma,
  426. pages ? &pages[i] : NULL);
  427. if (ret)
  428. return i ? : ret;
  429. page_mask = 0;
  430. goto next_page;
  431. }
  432. if (!vma || check_vma_flags(vma, gup_flags))
  433. return i ? : -EFAULT;
  434. if (is_vm_hugetlb_page(vma)) {
  435. i = follow_hugetlb_page(mm, vma, pages, vmas,
  436. &start, &nr_pages, i,
  437. gup_flags);
  438. continue;
  439. }
  440. }
  441. retry:
  442. /*
  443. * If we have a pending SIGKILL, don't keep faulting pages and
  444. * potentially allocating memory.
  445. */
  446. if (unlikely(fatal_signal_pending(current)))
  447. return i ? i : -ERESTARTSYS;
  448. cond_resched();
  449. page = follow_page_mask(vma, start, foll_flags, &page_mask);
  450. if (!page) {
  451. int ret;
  452. ret = faultin_page(tsk, vma, start, &foll_flags,
  453. nonblocking);
  454. switch (ret) {
  455. case 0:
  456. goto retry;
  457. case -EFAULT:
  458. case -ENOMEM:
  459. case -EHWPOISON:
  460. return i ? i : ret;
  461. case -EBUSY:
  462. return i;
  463. case -ENOENT:
  464. goto next_page;
  465. }
  466. BUG();
  467. }
  468. if (IS_ERR(page))
  469. return i ? i : PTR_ERR(page);
  470. if (pages) {
  471. pages[i] = page;
  472. flush_anon_page(vma, page, start);
  473. flush_dcache_page(page);
  474. page_mask = 0;
  475. }
  476. next_page:
  477. if (vmas) {
  478. vmas[i] = vma;
  479. page_mask = 0;
  480. }
  481. page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
  482. if (page_increm > nr_pages)
  483. page_increm = nr_pages;
  484. i += page_increm;
  485. start += page_increm * PAGE_SIZE;
  486. nr_pages -= page_increm;
  487. } while (nr_pages);
  488. return i;
  489. }
  490. EXPORT_SYMBOL(__get_user_pages);
  491. /*
  492. * fixup_user_fault() - manually resolve a user page fault
  493. * @tsk: the task_struct to use for page fault accounting, or
  494. * NULL if faults are not to be recorded.
  495. * @mm: mm_struct of target mm
  496. * @address: user address
  497. * @fault_flags:flags to pass down to handle_mm_fault()
  498. *
  499. * This is meant to be called in the specific scenario where for locking reasons
  500. * we try to access user memory in atomic context (within a pagefault_disable()
  501. * section), this returns -EFAULT, and we want to resolve the user fault before
  502. * trying again.
  503. *
  504. * Typically this is meant to be used by the futex code.
  505. *
  506. * The main difference with get_user_pages() is that this function will
  507. * unconditionally call handle_mm_fault() which will in turn perform all the
  508. * necessary SW fixup of the dirty and young bits in the PTE, while
  509. * handle_mm_fault() only guarantees to update these in the struct page.
  510. *
  511. * This is important for some architectures where those bits also gate the
  512. * access permission to the page because they are maintained in software. On
  513. * such architectures, gup() will not be enough to make a subsequent access
  514. * succeed.
  515. *
  516. * This has the same semantics wrt the @mm->mmap_sem as does filemap_fault().
  517. */
  518. int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
  519. unsigned long address, unsigned int fault_flags)
  520. {
  521. struct vm_area_struct *vma;
  522. vm_flags_t vm_flags;
  523. int ret;
  524. vma = find_extend_vma(mm, address);
  525. if (!vma || address < vma->vm_start)
  526. return -EFAULT;
  527. vm_flags = (fault_flags & FAULT_FLAG_WRITE) ? VM_WRITE : VM_READ;
  528. if (!(vm_flags & vma->vm_flags))
  529. return -EFAULT;
  530. ret = handle_mm_fault(mm, vma, address, fault_flags);
  531. if (ret & VM_FAULT_ERROR) {
  532. if (ret & VM_FAULT_OOM)
  533. return -ENOMEM;
  534. if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
  535. return -EHWPOISON;
  536. if (ret & VM_FAULT_SIGBUS)
  537. return -EFAULT;
  538. BUG();
  539. }
  540. if (tsk) {
  541. if (ret & VM_FAULT_MAJOR)
  542. tsk->maj_flt++;
  543. else
  544. tsk->min_flt++;
  545. }
  546. return 0;
  547. }
  548. /*
  549. * get_user_pages() - pin user pages in memory
  550. * @tsk: the task_struct to use for page fault accounting, or
  551. * NULL if faults are not to be recorded.
  552. * @mm: mm_struct of target mm
  553. * @start: starting user address
  554. * @nr_pages: number of pages from start to pin
  555. * @write: whether pages will be written to by the caller
  556. * @force: whether to force access even when user mapping is currently
  557. * protected (but never forces write access to shared mapping).
  558. * @pages: array that receives pointers to the pages pinned.
  559. * Should be at least nr_pages long. Or NULL, if caller
  560. * only intends to ensure the pages are faulted in.
  561. * @vmas: array of pointers to vmas corresponding to each page.
  562. * Or NULL if the caller does not require them.
  563. *
  564. * Returns number of pages pinned. This may be fewer than the number
  565. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  566. * were pinned, returns -errno. Each page returned must be released
  567. * with a put_page() call when it is finished with. vmas will only
  568. * remain valid while mmap_sem is held.
  569. *
  570. * Must be called with mmap_sem held for read or write.
  571. *
  572. * get_user_pages walks a process's page tables and takes a reference to
  573. * each struct page that each user address corresponds to at a given
  574. * instant. That is, it takes the page that would be accessed if a user
  575. * thread accesses the given user virtual address at that instant.
  576. *
  577. * This does not guarantee that the page exists in the user mappings when
  578. * get_user_pages returns, and there may even be a completely different
  579. * page there in some cases (eg. if mmapped pagecache has been invalidated
  580. * and subsequently re faulted). However it does guarantee that the page
  581. * won't be freed completely. And mostly callers simply care that the page
  582. * contains data that was valid *at some point in time*. Typically, an IO
  583. * or similar operation cannot guarantee anything stronger anyway because
  584. * locks can't be held over the syscall boundary.
  585. *
  586. * If write=0, the page must not be written to. If the page is written to,
  587. * set_page_dirty (or set_page_dirty_lock, as appropriate) must be called
  588. * after the page is finished with, and before put_page is called.
  589. *
  590. * get_user_pages is typically used for fewer-copy IO operations, to get a
  591. * handle on the memory by some means other than accesses via the user virtual
  592. * addresses. The pages may be submitted for DMA to devices or accessed via
  593. * their kernel linear mapping (via the kmap APIs). Care should be taken to
  594. * use the correct cache flushing APIs.
  595. *
  596. * See also get_user_pages_fast, for performance critical applications.
  597. */
  598. long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  599. unsigned long start, unsigned long nr_pages, int write,
  600. int force, struct page **pages, struct vm_area_struct **vmas)
  601. {
  602. int flags = FOLL_TOUCH;
  603. if (pages)
  604. flags |= FOLL_GET;
  605. if (write)
  606. flags |= FOLL_WRITE;
  607. if (force)
  608. flags |= FOLL_FORCE;
  609. return __get_user_pages(tsk, mm, start, nr_pages, flags, pages, vmas,
  610. NULL);
  611. }
  612. EXPORT_SYMBOL(get_user_pages);
  613. /**
  614. * get_dump_page() - pin user page in memory while writing it to core dump
  615. * @addr: user address
  616. *
  617. * Returns struct page pointer of user page pinned for dump,
  618. * to be freed afterwards by page_cache_release() or put_page().
  619. *
  620. * Returns NULL on any kind of failure - a hole must then be inserted into
  621. * the corefile, to preserve alignment with its headers; and also returns
  622. * NULL wherever the ZERO_PAGE, or an anonymous pte_none, has been found -
  623. * allowing a hole to be left in the corefile to save diskspace.
  624. *
  625. * Called without mmap_sem, but after all other threads have been killed.
  626. */
  627. #ifdef CONFIG_ELF_CORE
  628. struct page *get_dump_page(unsigned long addr)
  629. {
  630. struct vm_area_struct *vma;
  631. struct page *page;
  632. if (__get_user_pages(current, current->mm, addr, 1,
  633. FOLL_FORCE | FOLL_DUMP | FOLL_GET, &page, &vma,
  634. NULL) < 1)
  635. return NULL;
  636. flush_cache_page(vma, addr, page_to_pfn(page));
  637. return page;
  638. }
  639. #endif /* CONFIG_ELF_CORE */
  640. /*
  641. * Generic RCU Fast GUP
  642. *
  643. * get_user_pages_fast attempts to pin user pages by walking the page
  644. * tables directly and avoids taking locks. Thus the walker needs to be
  645. * protected from page table pages being freed from under it, and should
  646. * block any THP splits.
  647. *
  648. * One way to achieve this is to have the walker disable interrupts, and
  649. * rely on IPIs from the TLB flushing code blocking before the page table
  650. * pages are freed. This is unsuitable for architectures that do not need
  651. * to broadcast an IPI when invalidating TLBs.
  652. *
  653. * Another way to achieve this is to batch up page table containing pages
  654. * belonging to more than one mm_user, then rcu_sched a callback to free those
  655. * pages. Disabling interrupts will allow the fast_gup walker to both block
  656. * the rcu_sched callback, and an IPI that we broadcast for splitting THPs
  657. * (which is a relatively rare event). The code below adopts this strategy.
  658. *
  659. * Before activating this code, please be aware that the following assumptions
  660. * are currently made:
  661. *
  662. * *) HAVE_RCU_TABLE_FREE is enabled, and tlb_remove_table is used to free
  663. * pages containing page tables.
  664. *
  665. * *) THP splits will broadcast an IPI, this can be achieved by overriding
  666. * pmdp_splitting_flush.
  667. *
  668. * *) ptes can be read atomically by the architecture.
  669. *
  670. * *) access_ok is sufficient to validate userspace address ranges.
  671. *
  672. * The last two assumptions can be relaxed by the addition of helper functions.
  673. *
  674. * This code is based heavily on the PowerPC implementation by Nick Piggin.
  675. */
  676. #ifdef CONFIG_HAVE_GENERIC_RCU_GUP
  677. #ifdef __HAVE_ARCH_PTE_SPECIAL
  678. static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
  679. int write, struct page **pages, int *nr)
  680. {
  681. pte_t *ptep, *ptem;
  682. int ret = 0;
  683. ptem = ptep = pte_offset_map(&pmd, addr);
  684. do {
  685. /*
  686. * In the line below we are assuming that the pte can be read
  687. * atomically. If this is not the case for your architecture,
  688. * please wrap this in a helper function!
  689. *
  690. * for an example see gup_get_pte in arch/x86/mm/gup.c
  691. */
  692. pte_t pte = ACCESS_ONCE(*ptep);
  693. struct page *page;
  694. /*
  695. * Similar to the PMD case below, NUMA hinting must take slow
  696. * path
  697. */
  698. if (!pte_present(pte) || pte_special(pte) ||
  699. pte_numa(pte) || (write && !pte_write(pte)))
  700. goto pte_unmap;
  701. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  702. page = pte_page(pte);
  703. if (!page_cache_get_speculative(page))
  704. goto pte_unmap;
  705. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  706. put_page(page);
  707. goto pte_unmap;
  708. }
  709. pages[*nr] = page;
  710. (*nr)++;
  711. } while (ptep++, addr += PAGE_SIZE, addr != end);
  712. ret = 1;
  713. pte_unmap:
  714. pte_unmap(ptem);
  715. return ret;
  716. }
  717. #else
  718. /*
  719. * If we can't determine whether or not a pte is special, then fail immediately
  720. * for ptes. Note, we can still pin HugeTLB and THP as these are guaranteed not
  721. * to be special.
  722. *
  723. * For a futex to be placed on a THP tail page, get_futex_key requires a
  724. * __get_user_pages_fast implementation that can pin pages. Thus it's still
  725. * useful to have gup_huge_pmd even if we can't operate on ptes.
  726. */
  727. static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
  728. int write, struct page **pages, int *nr)
  729. {
  730. return 0;
  731. }
  732. #endif /* __HAVE_ARCH_PTE_SPECIAL */
  733. static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
  734. unsigned long end, int write, struct page **pages, int *nr)
  735. {
  736. struct page *head, *page, *tail;
  737. int refs;
  738. if (write && !pmd_write(orig))
  739. return 0;
  740. refs = 0;
  741. head = pmd_page(orig);
  742. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  743. tail = page;
  744. do {
  745. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  746. pages[*nr] = page;
  747. (*nr)++;
  748. page++;
  749. refs++;
  750. } while (addr += PAGE_SIZE, addr != end);
  751. if (!page_cache_add_speculative(head, refs)) {
  752. *nr -= refs;
  753. return 0;
  754. }
  755. if (unlikely(pmd_val(orig) != pmd_val(*pmdp))) {
  756. *nr -= refs;
  757. while (refs--)
  758. put_page(head);
  759. return 0;
  760. }
  761. /*
  762. * Any tail pages need their mapcount reference taken before we
  763. * return. (This allows the THP code to bump their ref count when
  764. * they are split into base pages).
  765. */
  766. while (refs--) {
  767. if (PageTail(tail))
  768. get_huge_page_tail(tail);
  769. tail++;
  770. }
  771. return 1;
  772. }
  773. static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
  774. unsigned long end, int write, struct page **pages, int *nr)
  775. {
  776. struct page *head, *page, *tail;
  777. int refs;
  778. if (write && !pud_write(orig))
  779. return 0;
  780. refs = 0;
  781. head = pud_page(orig);
  782. page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
  783. tail = page;
  784. do {
  785. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  786. pages[*nr] = page;
  787. (*nr)++;
  788. page++;
  789. refs++;
  790. } while (addr += PAGE_SIZE, addr != end);
  791. if (!page_cache_add_speculative(head, refs)) {
  792. *nr -= refs;
  793. return 0;
  794. }
  795. if (unlikely(pud_val(orig) != pud_val(*pudp))) {
  796. *nr -= refs;
  797. while (refs--)
  798. put_page(head);
  799. return 0;
  800. }
  801. while (refs--) {
  802. if (PageTail(tail))
  803. get_huge_page_tail(tail);
  804. tail++;
  805. }
  806. return 1;
  807. }
  808. static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
  809. unsigned long end, int write,
  810. struct page **pages, int *nr)
  811. {
  812. int refs;
  813. struct page *head, *page, *tail;
  814. if (write && !pgd_write(orig))
  815. return 0;
  816. refs = 0;
  817. head = pgd_page(orig);
  818. page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
  819. tail = page;
  820. do {
  821. VM_BUG_ON_PAGE(compound_head(page) != head, page);
  822. pages[*nr] = page;
  823. (*nr)++;
  824. page++;
  825. refs++;
  826. } while (addr += PAGE_SIZE, addr != end);
  827. if (!page_cache_add_speculative(head, refs)) {
  828. *nr -= refs;
  829. return 0;
  830. }
  831. if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
  832. *nr -= refs;
  833. while (refs--)
  834. put_page(head);
  835. return 0;
  836. }
  837. while (refs--) {
  838. if (PageTail(tail))
  839. get_huge_page_tail(tail);
  840. tail++;
  841. }
  842. return 1;
  843. }
  844. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  845. int write, struct page **pages, int *nr)
  846. {
  847. unsigned long next;
  848. pmd_t *pmdp;
  849. pmdp = pmd_offset(&pud, addr);
  850. do {
  851. pmd_t pmd = ACCESS_ONCE(*pmdp);
  852. next = pmd_addr_end(addr, end);
  853. if (pmd_none(pmd) || pmd_trans_splitting(pmd))
  854. return 0;
  855. if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {
  856. /*
  857. * NUMA hinting faults need to be handled in the GUP
  858. * slowpath for accounting purposes and so that they
  859. * can be serialised against THP migration.
  860. */
  861. if (pmd_numa(pmd))
  862. return 0;
  863. if (!gup_huge_pmd(pmd, pmdp, addr, next, write,
  864. pages, nr))
  865. return 0;
  866. } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
  867. /*
  868. * architecture have different format for hugetlbfs
  869. * pmd format and THP pmd format
  870. */
  871. if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
  872. PMD_SHIFT, next, write, pages, nr))
  873. return 0;
  874. } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  875. return 0;
  876. } while (pmdp++, addr = next, addr != end);
  877. return 1;
  878. }
  879. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  880. int write, struct page **pages, int *nr)
  881. {
  882. unsigned long next;
  883. pud_t *pudp;
  884. pudp = pud_offset(&pgd, addr);
  885. do {
  886. pud_t pud = READ_ONCE(*pudp);
  887. next = pud_addr_end(addr, end);
  888. if (pud_none(pud))
  889. return 0;
  890. if (unlikely(pud_huge(pud))) {
  891. if (!gup_huge_pud(pud, pudp, addr, next, write,
  892. pages, nr))
  893. return 0;
  894. } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
  895. if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
  896. PUD_SHIFT, next, write, pages, nr))
  897. return 0;
  898. } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  899. return 0;
  900. } while (pudp++, addr = next, addr != end);
  901. return 1;
  902. }
  903. /*
  904. * Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
  905. * the regular GUP. It will only return non-negative values.
  906. */
  907. int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
  908. struct page **pages)
  909. {
  910. struct mm_struct *mm = current->mm;
  911. unsigned long addr, len, end;
  912. unsigned long next, flags;
  913. pgd_t *pgdp;
  914. int nr = 0;
  915. start &= PAGE_MASK;
  916. addr = start;
  917. len = (unsigned long) nr_pages << PAGE_SHIFT;
  918. end = start + len;
  919. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  920. start, len)))
  921. return 0;
  922. /*
  923. * Disable interrupts. We use the nested form as we can already have
  924. * interrupts disabled by get_futex_key.
  925. *
  926. * With interrupts disabled, we block page table pages from being
  927. * freed from under us. See mmu_gather_tlb in asm-generic/tlb.h
  928. * for more details.
  929. *
  930. * We do not adopt an rcu_read_lock(.) here as we also want to
  931. * block IPIs that come from THPs splitting.
  932. */
  933. local_irq_save(flags);
  934. pgdp = pgd_offset(mm, addr);
  935. do {
  936. pgd_t pgd = ACCESS_ONCE(*pgdp);
  937. next = pgd_addr_end(addr, end);
  938. if (pgd_none(pgd))
  939. break;
  940. if (unlikely(pgd_huge(pgd))) {
  941. if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
  942. pages, &nr))
  943. break;
  944. } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
  945. if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
  946. PGDIR_SHIFT, next, write, pages, &nr))
  947. break;
  948. } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  949. break;
  950. } while (pgdp++, addr = next, addr != end);
  951. local_irq_restore(flags);
  952. return nr;
  953. }
  954. /**
  955. * get_user_pages_fast() - pin user pages in memory
  956. * @start: starting user address
  957. * @nr_pages: number of pages from start to pin
  958. * @write: whether pages will be written to
  959. * @pages: array that receives pointers to the pages pinned.
  960. * Should be at least nr_pages long.
  961. *
  962. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  963. * If not successful, it will fall back to taking the lock and
  964. * calling get_user_pages().
  965. *
  966. * Returns number of pages pinned. This may be fewer than the number
  967. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  968. * were pinned, returns -errno.
  969. */
  970. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  971. struct page **pages)
  972. {
  973. struct mm_struct *mm = current->mm;
  974. int nr, ret;
  975. start &= PAGE_MASK;
  976. nr = __get_user_pages_fast(start, nr_pages, write, pages);
  977. ret = nr;
  978. if (nr < nr_pages) {
  979. /* Try to get the remaining pages with get_user_pages */
  980. start += nr << PAGE_SHIFT;
  981. pages += nr;
  982. down_read(&mm->mmap_sem);
  983. ret = get_user_pages(current, mm, start,
  984. nr_pages - nr, write, 0, pages, NULL);
  985. up_read(&mm->mmap_sem);
  986. /* Have to be a bit careful with return values */
  987. if (nr > 0) {
  988. if (ret < 0)
  989. ret = nr;
  990. else
  991. ret += nr;
  992. }
  993. }
  994. return ret;
  995. }
  996. #endif /* CONFIG_HAVE_GENERIC_RCU_GUP */