userfaultfd.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945
  1. /*
  2. * fs/userfaultfd.c
  3. *
  4. * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
  5. * Copyright (C) 2008-2009 Red Hat, Inc.
  6. * Copyright (C) 2015 Red Hat, Inc.
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2. See
  9. * the COPYING file in the top-level directory.
  10. *
  11. * Some part derived from fs/eventfd.c (anon inode setup) and
  12. * mm/ksm.c (mm hashing).
  13. */
  14. #include <linux/list.h>
  15. #include <linux/hashtable.h>
  16. #include <linux/sched/signal.h>
  17. #include <linux/sched/mm.h>
  18. #include <linux/mm.h>
  19. #include <linux/poll.h>
  20. #include <linux/slab.h>
  21. #include <linux/seq_file.h>
  22. #include <linux/file.h>
  23. #include <linux/bug.h>
  24. #include <linux/anon_inodes.h>
  25. #include <linux/syscalls.h>
  26. #include <linux/userfaultfd_k.h>
  27. #include <linux/mempolicy.h>
  28. #include <linux/ioctl.h>
  29. #include <linux/security.h>
  30. #include <linux/hugetlb.h>
  31. static struct kmem_cache *userfaultfd_ctx_cachep __read_mostly;
  32. enum userfaultfd_state {
  33. UFFD_STATE_WAIT_API,
  34. UFFD_STATE_RUNNING,
  35. };
  36. /*
  37. * Start with fault_pending_wqh and fault_wqh so they're more likely
  38. * to be in the same cacheline.
  39. */
  40. struct userfaultfd_ctx {
  41. /* waitqueue head for the pending (i.e. not read) userfaults */
  42. wait_queue_head_t fault_pending_wqh;
  43. /* waitqueue head for the userfaults */
  44. wait_queue_head_t fault_wqh;
  45. /* waitqueue head for the pseudo fd to wakeup poll/read */
  46. wait_queue_head_t fd_wqh;
  47. /* waitqueue head for events */
  48. wait_queue_head_t event_wqh;
  49. /* a refile sequence protected by fault_pending_wqh lock */
  50. struct seqcount refile_seq;
  51. /* pseudo fd refcounting */
  52. atomic_t refcount;
  53. /* userfaultfd syscall flags */
  54. unsigned int flags;
  55. /* features requested from the userspace */
  56. unsigned int features;
  57. /* state machine */
  58. enum userfaultfd_state state;
  59. /* released */
  60. bool released;
  61. /* memory mappings are changing because of non-cooperative event */
  62. bool mmap_changing;
  63. /* mm with one ore more vmas attached to this userfaultfd_ctx */
  64. struct mm_struct *mm;
  65. };
  66. struct userfaultfd_fork_ctx {
  67. struct userfaultfd_ctx *orig;
  68. struct userfaultfd_ctx *new;
  69. struct list_head list;
  70. };
  71. struct userfaultfd_unmap_ctx {
  72. struct userfaultfd_ctx *ctx;
  73. unsigned long start;
  74. unsigned long end;
  75. struct list_head list;
  76. };
  77. struct userfaultfd_wait_queue {
  78. struct uffd_msg msg;
  79. wait_queue_entry_t wq;
  80. struct userfaultfd_ctx *ctx;
  81. bool waken;
  82. };
  83. struct userfaultfd_wake_range {
  84. unsigned long start;
  85. unsigned long len;
  86. };
  87. static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
  88. int wake_flags, void *key)
  89. {
  90. struct userfaultfd_wake_range *range = key;
  91. int ret;
  92. struct userfaultfd_wait_queue *uwq;
  93. unsigned long start, len;
  94. uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  95. ret = 0;
  96. /* len == 0 means wake all */
  97. start = range->start;
  98. len = range->len;
  99. if (len && (start > uwq->msg.arg.pagefault.address ||
  100. start + len <= uwq->msg.arg.pagefault.address))
  101. goto out;
  102. WRITE_ONCE(uwq->waken, true);
  103. /*
  104. * The Program-Order guarantees provided by the scheduler
  105. * ensure uwq->waken is visible before the task is woken.
  106. */
  107. ret = wake_up_state(wq->private, mode);
  108. if (ret) {
  109. /*
  110. * Wake only once, autoremove behavior.
  111. *
  112. * After the effect of list_del_init is visible to the other
  113. * CPUs, the waitqueue may disappear from under us, see the
  114. * !list_empty_careful() in handle_userfault().
  115. *
  116. * try_to_wake_up() has an implicit smp_mb(), and the
  117. * wq->private is read before calling the extern function
  118. * "wake_up_state" (which in turns calls try_to_wake_up).
  119. */
  120. list_del_init(&wq->entry);
  121. }
  122. out:
  123. return ret;
  124. }
  125. /**
  126. * userfaultfd_ctx_get - Acquires a reference to the internal userfaultfd
  127. * context.
  128. * @ctx: [in] Pointer to the userfaultfd context.
  129. */
  130. static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx)
  131. {
  132. if (!atomic_inc_not_zero(&ctx->refcount))
  133. BUG();
  134. }
  135. /**
  136. * userfaultfd_ctx_put - Releases a reference to the internal userfaultfd
  137. * context.
  138. * @ctx: [in] Pointer to userfaultfd context.
  139. *
  140. * The userfaultfd context reference must have been previously acquired either
  141. * with userfaultfd_ctx_get() or userfaultfd_ctx_fdget().
  142. */
  143. static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx)
  144. {
  145. if (atomic_dec_and_test(&ctx->refcount)) {
  146. VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock));
  147. VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh));
  148. VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock));
  149. VM_BUG_ON(waitqueue_active(&ctx->fault_wqh));
  150. VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock));
  151. VM_BUG_ON(waitqueue_active(&ctx->event_wqh));
  152. VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock));
  153. VM_BUG_ON(waitqueue_active(&ctx->fd_wqh));
  154. mmdrop(ctx->mm);
  155. kmem_cache_free(userfaultfd_ctx_cachep, ctx);
  156. }
  157. }
  158. static inline void msg_init(struct uffd_msg *msg)
  159. {
  160. BUILD_BUG_ON(sizeof(struct uffd_msg) != 32);
  161. /*
  162. * Must use memset to zero out the paddings or kernel data is
  163. * leaked to userland.
  164. */
  165. memset(msg, 0, sizeof(struct uffd_msg));
  166. }
  167. static inline struct uffd_msg userfault_msg(unsigned long address,
  168. unsigned int flags,
  169. unsigned long reason,
  170. unsigned int features)
  171. {
  172. struct uffd_msg msg;
  173. msg_init(&msg);
  174. msg.event = UFFD_EVENT_PAGEFAULT;
  175. msg.arg.pagefault.address = address;
  176. if (flags & FAULT_FLAG_WRITE)
  177. /*
  178. * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
  179. * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WRITE
  180. * was not set in a UFFD_EVENT_PAGEFAULT, it means it
  181. * was a read fault, otherwise if set it means it's
  182. * a write fault.
  183. */
  184. msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WRITE;
  185. if (reason & VM_UFFD_WP)
  186. /*
  187. * If UFFD_FEATURE_PAGEFAULT_FLAG_WP was set in the
  188. * uffdio_api.features and UFFD_PAGEFAULT_FLAG_WP was
  189. * not set in a UFFD_EVENT_PAGEFAULT, it means it was
  190. * a missing fault, otherwise if set it means it's a
  191. * write protect fault.
  192. */
  193. msg.arg.pagefault.flags |= UFFD_PAGEFAULT_FLAG_WP;
  194. if (features & UFFD_FEATURE_THREAD_ID)
  195. msg.arg.pagefault.feat.ptid = task_pid_vnr(current);
  196. return msg;
  197. }
  198. #ifdef CONFIG_HUGETLB_PAGE
  199. /*
  200. * Same functionality as userfaultfd_must_wait below with modifications for
  201. * hugepmd ranges.
  202. */
  203. static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
  204. struct vm_area_struct *vma,
  205. unsigned long address,
  206. unsigned long flags,
  207. unsigned long reason)
  208. {
  209. struct mm_struct *mm = ctx->mm;
  210. pte_t *ptep, pte;
  211. bool ret = true;
  212. VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
  213. ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
  214. if (!ptep)
  215. goto out;
  216. ret = false;
  217. pte = huge_ptep_get(ptep);
  218. /*
  219. * Lockless access: we're in a wait_event so it's ok if it
  220. * changes under us.
  221. */
  222. if (huge_pte_none(pte))
  223. ret = true;
  224. if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
  225. ret = true;
  226. out:
  227. return ret;
  228. }
  229. #else
  230. static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
  231. struct vm_area_struct *vma,
  232. unsigned long address,
  233. unsigned long flags,
  234. unsigned long reason)
  235. {
  236. return false; /* should never get here */
  237. }
  238. #endif /* CONFIG_HUGETLB_PAGE */
  239. /*
  240. * Verify the pagetables are still not ok after having reigstered into
  241. * the fault_pending_wqh to avoid userland having to UFFDIO_WAKE any
  242. * userfault that has already been resolved, if userfaultfd_read and
  243. * UFFDIO_COPY|ZEROPAGE are being run simultaneously on two different
  244. * threads.
  245. */
  246. static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx,
  247. unsigned long address,
  248. unsigned long flags,
  249. unsigned long reason)
  250. {
  251. struct mm_struct *mm = ctx->mm;
  252. pgd_t *pgd;
  253. p4d_t *p4d;
  254. pud_t *pud;
  255. pmd_t *pmd, _pmd;
  256. pte_t *pte;
  257. bool ret = true;
  258. VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
  259. pgd = pgd_offset(mm, address);
  260. if (!pgd_present(*pgd))
  261. goto out;
  262. p4d = p4d_offset(pgd, address);
  263. if (!p4d_present(*p4d))
  264. goto out;
  265. pud = pud_offset(p4d, address);
  266. if (!pud_present(*pud))
  267. goto out;
  268. pmd = pmd_offset(pud, address);
  269. /*
  270. * READ_ONCE must function as a barrier with narrower scope
  271. * and it must be equivalent to:
  272. * _pmd = *pmd; barrier();
  273. *
  274. * This is to deal with the instability (as in
  275. * pmd_trans_unstable) of the pmd.
  276. */
  277. _pmd = READ_ONCE(*pmd);
  278. if (pmd_none(_pmd))
  279. goto out;
  280. ret = false;
  281. if (!pmd_present(_pmd))
  282. goto out;
  283. if (pmd_trans_huge(_pmd))
  284. goto out;
  285. /*
  286. * the pmd is stable (as in !pmd_trans_unstable) so we can re-read it
  287. * and use the standard pte_offset_map() instead of parsing _pmd.
  288. */
  289. pte = pte_offset_map(pmd, address);
  290. /*
  291. * Lockless access: we're in a wait_event so it's ok if it
  292. * changes under us.
  293. */
  294. if (pte_none(*pte))
  295. ret = true;
  296. pte_unmap(pte);
  297. out:
  298. return ret;
  299. }
  300. /*
  301. * The locking rules involved in returning VM_FAULT_RETRY depending on
  302. * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and
  303. * FAULT_FLAG_KILLABLE are not straightforward. The "Caution"
  304. * recommendation in __lock_page_or_retry is not an understatement.
  305. *
  306. * If FAULT_FLAG_ALLOW_RETRY is set, the mmap_sem must be released
  307. * before returning VM_FAULT_RETRY only if FAULT_FLAG_RETRY_NOWAIT is
  308. * not set.
  309. *
  310. * If FAULT_FLAG_ALLOW_RETRY is set but FAULT_FLAG_KILLABLE is not
  311. * set, VM_FAULT_RETRY can still be returned if and only if there are
  312. * fatal_signal_pending()s, and the mmap_sem must be released before
  313. * returning it.
  314. */
  315. int handle_userfault(struct vm_fault *vmf, unsigned long reason)
  316. {
  317. struct mm_struct *mm = vmf->vma->vm_mm;
  318. struct userfaultfd_ctx *ctx;
  319. struct userfaultfd_wait_queue uwq;
  320. int ret;
  321. bool must_wait, return_to_userland;
  322. long blocking_state;
  323. ret = VM_FAULT_SIGBUS;
  324. /*
  325. * We don't do userfault handling for the final child pid update.
  326. *
  327. * We also don't do userfault handling during
  328. * coredumping. hugetlbfs has the special
  329. * follow_hugetlb_page() to skip missing pages in the
  330. * FOLL_DUMP case, anon memory also checks for FOLL_DUMP with
  331. * the no_page_table() helper in follow_page_mask(), but the
  332. * shmem_vm_ops->fault method is invoked even during
  333. * coredumping without mmap_sem and it ends up here.
  334. */
  335. if (current->flags & (PF_EXITING|PF_DUMPCORE))
  336. goto out;
  337. /*
  338. * Coredumping runs without mmap_sem so we can only check that
  339. * the mmap_sem is held, if PF_DUMPCORE was not set.
  340. */
  341. WARN_ON_ONCE(!rwsem_is_locked(&mm->mmap_sem));
  342. ctx = vmf->vma->vm_userfaultfd_ctx.ctx;
  343. if (!ctx)
  344. goto out;
  345. BUG_ON(ctx->mm != mm);
  346. VM_BUG_ON(reason & ~(VM_UFFD_MISSING|VM_UFFD_WP));
  347. VM_BUG_ON(!(reason & VM_UFFD_MISSING) ^ !!(reason & VM_UFFD_WP));
  348. if (ctx->features & UFFD_FEATURE_SIGBUS)
  349. goto out;
  350. /*
  351. * If it's already released don't get it. This avoids to loop
  352. * in __get_user_pages if userfaultfd_release waits on the
  353. * caller of handle_userfault to release the mmap_sem.
  354. */
  355. if (unlikely(READ_ONCE(ctx->released))) {
  356. /*
  357. * Don't return VM_FAULT_SIGBUS in this case, so a non
  358. * cooperative manager can close the uffd after the
  359. * last UFFDIO_COPY, without risking to trigger an
  360. * involuntary SIGBUS if the process was starting the
  361. * userfaultfd while the userfaultfd was still armed
  362. * (but after the last UFFDIO_COPY). If the uffd
  363. * wasn't already closed when the userfault reached
  364. * this point, that would normally be solved by
  365. * userfaultfd_must_wait returning 'false'.
  366. *
  367. * If we were to return VM_FAULT_SIGBUS here, the non
  368. * cooperative manager would be instead forced to
  369. * always call UFFDIO_UNREGISTER before it can safely
  370. * close the uffd.
  371. */
  372. ret = VM_FAULT_NOPAGE;
  373. goto out;
  374. }
  375. /*
  376. * Check that we can return VM_FAULT_RETRY.
  377. *
  378. * NOTE: it should become possible to return VM_FAULT_RETRY
  379. * even if FAULT_FLAG_TRIED is set without leading to gup()
  380. * -EBUSY failures, if the userfaultfd is to be extended for
  381. * VM_UFFD_WP tracking and we intend to arm the userfault
  382. * without first stopping userland access to the memory. For
  383. * VM_UFFD_MISSING userfaults this is enough for now.
  384. */
  385. if (unlikely(!(vmf->flags & FAULT_FLAG_ALLOW_RETRY))) {
  386. /*
  387. * Validate the invariant that nowait must allow retry
  388. * to be sure not to return SIGBUS erroneously on
  389. * nowait invocations.
  390. */
  391. BUG_ON(vmf->flags & FAULT_FLAG_RETRY_NOWAIT);
  392. #ifdef CONFIG_DEBUG_VM
  393. if (printk_ratelimit()) {
  394. printk(KERN_WARNING
  395. "FAULT_FLAG_ALLOW_RETRY missing %x\n",
  396. vmf->flags);
  397. dump_stack();
  398. }
  399. #endif
  400. goto out;
  401. }
  402. /*
  403. * Handle nowait, not much to do other than tell it to retry
  404. * and wait.
  405. */
  406. ret = VM_FAULT_RETRY;
  407. if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
  408. goto out;
  409. /* take the reference before dropping the mmap_sem */
  410. userfaultfd_ctx_get(ctx);
  411. init_waitqueue_func_entry(&uwq.wq, userfaultfd_wake_function);
  412. uwq.wq.private = current;
  413. uwq.msg = userfault_msg(vmf->address, vmf->flags, reason,
  414. ctx->features);
  415. uwq.ctx = ctx;
  416. uwq.waken = false;
  417. return_to_userland =
  418. (vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
  419. (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
  420. blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
  421. TASK_KILLABLE;
  422. spin_lock(&ctx->fault_pending_wqh.lock);
  423. /*
  424. * After the __add_wait_queue the uwq is visible to userland
  425. * through poll/read().
  426. */
  427. __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq);
  428. /*
  429. * The smp_mb() after __set_current_state prevents the reads
  430. * following the spin_unlock to happen before the list_add in
  431. * __add_wait_queue.
  432. */
  433. set_current_state(blocking_state);
  434. spin_unlock(&ctx->fault_pending_wqh.lock);
  435. if (!is_vm_hugetlb_page(vmf->vma))
  436. must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
  437. reason);
  438. else
  439. must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma,
  440. vmf->address,
  441. vmf->flags, reason);
  442. up_read(&mm->mmap_sem);
  443. if (likely(must_wait && !READ_ONCE(ctx->released) &&
  444. (return_to_userland ? !signal_pending(current) :
  445. !fatal_signal_pending(current)))) {
  446. wake_up_poll(&ctx->fd_wqh, EPOLLIN);
  447. schedule();
  448. ret |= VM_FAULT_MAJOR;
  449. /*
  450. * False wakeups can orginate even from rwsem before
  451. * up_read() however userfaults will wait either for a
  452. * targeted wakeup on the specific uwq waitqueue from
  453. * wake_userfault() or for signals or for uffd
  454. * release.
  455. */
  456. while (!READ_ONCE(uwq.waken)) {
  457. /*
  458. * This needs the full smp_store_mb()
  459. * guarantee as the state write must be
  460. * visible to other CPUs before reading
  461. * uwq.waken from other CPUs.
  462. */
  463. set_current_state(blocking_state);
  464. if (READ_ONCE(uwq.waken) ||
  465. READ_ONCE(ctx->released) ||
  466. (return_to_userland ? signal_pending(current) :
  467. fatal_signal_pending(current)))
  468. break;
  469. schedule();
  470. }
  471. }
  472. __set_current_state(TASK_RUNNING);
  473. if (return_to_userland) {
  474. if (signal_pending(current) &&
  475. !fatal_signal_pending(current)) {
  476. /*
  477. * If we got a SIGSTOP or SIGCONT and this is
  478. * a normal userland page fault, just let
  479. * userland return so the signal will be
  480. * handled and gdb debugging works. The page
  481. * fault code immediately after we return from
  482. * this function is going to release the
  483. * mmap_sem and it's not depending on it
  484. * (unlike gup would if we were not to return
  485. * VM_FAULT_RETRY).
  486. *
  487. * If a fatal signal is pending we still take
  488. * the streamlined VM_FAULT_RETRY failure path
  489. * and there's no need to retake the mmap_sem
  490. * in such case.
  491. */
  492. down_read(&mm->mmap_sem);
  493. ret = VM_FAULT_NOPAGE;
  494. }
  495. }
  496. /*
  497. * Here we race with the list_del; list_add in
  498. * userfaultfd_ctx_read(), however because we don't ever run
  499. * list_del_init() to refile across the two lists, the prev
  500. * and next pointers will never point to self. list_add also
  501. * would never let any of the two pointers to point to
  502. * self. So list_empty_careful won't risk to see both pointers
  503. * pointing to self at any time during the list refile. The
  504. * only case where list_del_init() is called is the full
  505. * removal in the wake function and there we don't re-list_add
  506. * and it's fine not to block on the spinlock. The uwq on this
  507. * kernel stack can be released after the list_del_init.
  508. */
  509. if (!list_empty_careful(&uwq.wq.entry)) {
  510. spin_lock(&ctx->fault_pending_wqh.lock);
  511. /*
  512. * No need of list_del_init(), the uwq on the stack
  513. * will be freed shortly anyway.
  514. */
  515. list_del(&uwq.wq.entry);
  516. spin_unlock(&ctx->fault_pending_wqh.lock);
  517. }
  518. /*
  519. * ctx may go away after this if the userfault pseudo fd is
  520. * already released.
  521. */
  522. userfaultfd_ctx_put(ctx);
  523. out:
  524. return ret;
  525. }
  526. static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx,
  527. struct userfaultfd_wait_queue *ewq)
  528. {
  529. struct userfaultfd_ctx *release_new_ctx;
  530. if (WARN_ON_ONCE(current->flags & PF_EXITING))
  531. goto out;
  532. ewq->ctx = ctx;
  533. init_waitqueue_entry(&ewq->wq, current);
  534. release_new_ctx = NULL;
  535. spin_lock(&ctx->event_wqh.lock);
  536. /*
  537. * After the __add_wait_queue the uwq is visible to userland
  538. * through poll/read().
  539. */
  540. __add_wait_queue(&ctx->event_wqh, &ewq->wq);
  541. for (;;) {
  542. set_current_state(TASK_KILLABLE);
  543. if (ewq->msg.event == 0)
  544. break;
  545. if (READ_ONCE(ctx->released) ||
  546. fatal_signal_pending(current)) {
  547. /*
  548. * &ewq->wq may be queued in fork_event, but
  549. * __remove_wait_queue ignores the head
  550. * parameter. It would be a problem if it
  551. * didn't.
  552. */
  553. __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
  554. if (ewq->msg.event == UFFD_EVENT_FORK) {
  555. struct userfaultfd_ctx *new;
  556. new = (struct userfaultfd_ctx *)
  557. (unsigned long)
  558. ewq->msg.arg.reserved.reserved1;
  559. release_new_ctx = new;
  560. }
  561. break;
  562. }
  563. spin_unlock(&ctx->event_wqh.lock);
  564. wake_up_poll(&ctx->fd_wqh, EPOLLIN);
  565. schedule();
  566. spin_lock(&ctx->event_wqh.lock);
  567. }
  568. __set_current_state(TASK_RUNNING);
  569. spin_unlock(&ctx->event_wqh.lock);
  570. if (release_new_ctx) {
  571. struct vm_area_struct *vma;
  572. struct mm_struct *mm = release_new_ctx->mm;
  573. /* the various vma->vm_userfaultfd_ctx still points to it */
  574. down_write(&mm->mmap_sem);
  575. for (vma = mm->mmap; vma; vma = vma->vm_next)
  576. if (vma->vm_userfaultfd_ctx.ctx == release_new_ctx)
  577. vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  578. up_write(&mm->mmap_sem);
  579. userfaultfd_ctx_put(release_new_ctx);
  580. }
  581. /*
  582. * ctx may go away after this if the userfault pseudo fd is
  583. * already released.
  584. */
  585. out:
  586. WRITE_ONCE(ctx->mmap_changing, false);
  587. userfaultfd_ctx_put(ctx);
  588. }
  589. static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx,
  590. struct userfaultfd_wait_queue *ewq)
  591. {
  592. ewq->msg.event = 0;
  593. wake_up_locked(&ctx->event_wqh);
  594. __remove_wait_queue(&ctx->event_wqh, &ewq->wq);
  595. }
  596. int dup_userfaultfd(struct vm_area_struct *vma, struct list_head *fcs)
  597. {
  598. struct userfaultfd_ctx *ctx = NULL, *octx;
  599. struct userfaultfd_fork_ctx *fctx;
  600. octx = vma->vm_userfaultfd_ctx.ctx;
  601. if (!octx || !(octx->features & UFFD_FEATURE_EVENT_FORK)) {
  602. vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  603. vma->vm_flags &= ~(VM_UFFD_WP | VM_UFFD_MISSING);
  604. return 0;
  605. }
  606. list_for_each_entry(fctx, fcs, list)
  607. if (fctx->orig == octx) {
  608. ctx = fctx->new;
  609. break;
  610. }
  611. if (!ctx) {
  612. fctx = kmalloc(sizeof(*fctx), GFP_KERNEL);
  613. if (!fctx)
  614. return -ENOMEM;
  615. ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
  616. if (!ctx) {
  617. kfree(fctx);
  618. return -ENOMEM;
  619. }
  620. atomic_set(&ctx->refcount, 1);
  621. ctx->flags = octx->flags;
  622. ctx->state = UFFD_STATE_RUNNING;
  623. ctx->features = octx->features;
  624. ctx->released = false;
  625. ctx->mmap_changing = false;
  626. ctx->mm = vma->vm_mm;
  627. mmgrab(ctx->mm);
  628. userfaultfd_ctx_get(octx);
  629. WRITE_ONCE(octx->mmap_changing, true);
  630. fctx->orig = octx;
  631. fctx->new = ctx;
  632. list_add_tail(&fctx->list, fcs);
  633. }
  634. vma->vm_userfaultfd_ctx.ctx = ctx;
  635. return 0;
  636. }
  637. static void dup_fctx(struct userfaultfd_fork_ctx *fctx)
  638. {
  639. struct userfaultfd_ctx *ctx = fctx->orig;
  640. struct userfaultfd_wait_queue ewq;
  641. msg_init(&ewq.msg);
  642. ewq.msg.event = UFFD_EVENT_FORK;
  643. ewq.msg.arg.reserved.reserved1 = (unsigned long)fctx->new;
  644. userfaultfd_event_wait_completion(ctx, &ewq);
  645. }
  646. void dup_userfaultfd_complete(struct list_head *fcs)
  647. {
  648. struct userfaultfd_fork_ctx *fctx, *n;
  649. list_for_each_entry_safe(fctx, n, fcs, list) {
  650. dup_fctx(fctx);
  651. list_del(&fctx->list);
  652. kfree(fctx);
  653. }
  654. }
  655. void mremap_userfaultfd_prep(struct vm_area_struct *vma,
  656. struct vm_userfaultfd_ctx *vm_ctx)
  657. {
  658. struct userfaultfd_ctx *ctx;
  659. ctx = vma->vm_userfaultfd_ctx.ctx;
  660. if (ctx && (ctx->features & UFFD_FEATURE_EVENT_REMAP)) {
  661. vm_ctx->ctx = ctx;
  662. userfaultfd_ctx_get(ctx);
  663. WRITE_ONCE(ctx->mmap_changing, true);
  664. }
  665. }
  666. void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *vm_ctx,
  667. unsigned long from, unsigned long to,
  668. unsigned long len)
  669. {
  670. struct userfaultfd_ctx *ctx = vm_ctx->ctx;
  671. struct userfaultfd_wait_queue ewq;
  672. if (!ctx)
  673. return;
  674. if (to & ~PAGE_MASK) {
  675. userfaultfd_ctx_put(ctx);
  676. return;
  677. }
  678. msg_init(&ewq.msg);
  679. ewq.msg.event = UFFD_EVENT_REMAP;
  680. ewq.msg.arg.remap.from = from;
  681. ewq.msg.arg.remap.to = to;
  682. ewq.msg.arg.remap.len = len;
  683. userfaultfd_event_wait_completion(ctx, &ewq);
  684. }
  685. bool userfaultfd_remove(struct vm_area_struct *vma,
  686. unsigned long start, unsigned long end)
  687. {
  688. struct mm_struct *mm = vma->vm_mm;
  689. struct userfaultfd_ctx *ctx;
  690. struct userfaultfd_wait_queue ewq;
  691. ctx = vma->vm_userfaultfd_ctx.ctx;
  692. if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE))
  693. return true;
  694. userfaultfd_ctx_get(ctx);
  695. WRITE_ONCE(ctx->mmap_changing, true);
  696. up_read(&mm->mmap_sem);
  697. msg_init(&ewq.msg);
  698. ewq.msg.event = UFFD_EVENT_REMOVE;
  699. ewq.msg.arg.remove.start = start;
  700. ewq.msg.arg.remove.end = end;
  701. userfaultfd_event_wait_completion(ctx, &ewq);
  702. return false;
  703. }
  704. static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps,
  705. unsigned long start, unsigned long end)
  706. {
  707. struct userfaultfd_unmap_ctx *unmap_ctx;
  708. list_for_each_entry(unmap_ctx, unmaps, list)
  709. if (unmap_ctx->ctx == ctx && unmap_ctx->start == start &&
  710. unmap_ctx->end == end)
  711. return true;
  712. return false;
  713. }
  714. int userfaultfd_unmap_prep(struct vm_area_struct *vma,
  715. unsigned long start, unsigned long end,
  716. struct list_head *unmaps)
  717. {
  718. for ( ; vma && vma->vm_start < end; vma = vma->vm_next) {
  719. struct userfaultfd_unmap_ctx *unmap_ctx;
  720. struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx;
  721. if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) ||
  722. has_unmap_ctx(ctx, unmaps, start, end))
  723. continue;
  724. unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL);
  725. if (!unmap_ctx)
  726. return -ENOMEM;
  727. userfaultfd_ctx_get(ctx);
  728. WRITE_ONCE(ctx->mmap_changing, true);
  729. unmap_ctx->ctx = ctx;
  730. unmap_ctx->start = start;
  731. unmap_ctx->end = end;
  732. list_add_tail(&unmap_ctx->list, unmaps);
  733. }
  734. return 0;
  735. }
  736. void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf)
  737. {
  738. struct userfaultfd_unmap_ctx *ctx, *n;
  739. struct userfaultfd_wait_queue ewq;
  740. list_for_each_entry_safe(ctx, n, uf, list) {
  741. msg_init(&ewq.msg);
  742. ewq.msg.event = UFFD_EVENT_UNMAP;
  743. ewq.msg.arg.remove.start = ctx->start;
  744. ewq.msg.arg.remove.end = ctx->end;
  745. userfaultfd_event_wait_completion(ctx->ctx, &ewq);
  746. list_del(&ctx->list);
  747. kfree(ctx);
  748. }
  749. }
  750. static int userfaultfd_release(struct inode *inode, struct file *file)
  751. {
  752. struct userfaultfd_ctx *ctx = file->private_data;
  753. struct mm_struct *mm = ctx->mm;
  754. struct vm_area_struct *vma, *prev;
  755. /* len == 0 means wake all */
  756. struct userfaultfd_wake_range range = { .len = 0, };
  757. unsigned long new_flags;
  758. WRITE_ONCE(ctx->released, true);
  759. if (!mmget_not_zero(mm))
  760. goto wakeup;
  761. /*
  762. * Flush page faults out of all CPUs. NOTE: all page faults
  763. * must be retried without returning VM_FAULT_SIGBUS if
  764. * userfaultfd_ctx_get() succeeds but vma->vma_userfault_ctx
  765. * changes while handle_userfault released the mmap_sem. So
  766. * it's critical that released is set to true (above), before
  767. * taking the mmap_sem for writing.
  768. */
  769. down_write(&mm->mmap_sem);
  770. prev = NULL;
  771. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  772. cond_resched();
  773. BUG_ON(!!vma->vm_userfaultfd_ctx.ctx ^
  774. !!(vma->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  775. if (vma->vm_userfaultfd_ctx.ctx != ctx) {
  776. prev = vma;
  777. continue;
  778. }
  779. new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
  780. prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
  781. new_flags, vma->anon_vma,
  782. vma->vm_file, vma->vm_pgoff,
  783. vma_policy(vma),
  784. NULL_VM_UFFD_CTX);
  785. if (prev)
  786. vma = prev;
  787. else
  788. prev = vma;
  789. vma->vm_flags = new_flags;
  790. vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  791. }
  792. up_write(&mm->mmap_sem);
  793. mmput(mm);
  794. wakeup:
  795. /*
  796. * After no new page faults can wait on this fault_*wqh, flush
  797. * the last page faults that may have been already waiting on
  798. * the fault_*wqh.
  799. */
  800. spin_lock(&ctx->fault_pending_wqh.lock);
  801. __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range);
  802. __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, &range);
  803. spin_unlock(&ctx->fault_pending_wqh.lock);
  804. /* Flush pending events that may still wait on event_wqh */
  805. wake_up_all(&ctx->event_wqh);
  806. wake_up_poll(&ctx->fd_wqh, EPOLLHUP);
  807. userfaultfd_ctx_put(ctx);
  808. return 0;
  809. }
  810. /* fault_pending_wqh.lock must be hold by the caller */
  811. static inline struct userfaultfd_wait_queue *find_userfault_in(
  812. wait_queue_head_t *wqh)
  813. {
  814. wait_queue_entry_t *wq;
  815. struct userfaultfd_wait_queue *uwq;
  816. VM_BUG_ON(!spin_is_locked(&wqh->lock));
  817. uwq = NULL;
  818. if (!waitqueue_active(wqh))
  819. goto out;
  820. /* walk in reverse to provide FIFO behavior to read userfaults */
  821. wq = list_last_entry(&wqh->head, typeof(*wq), entry);
  822. uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  823. out:
  824. return uwq;
  825. }
  826. static inline struct userfaultfd_wait_queue *find_userfault(
  827. struct userfaultfd_ctx *ctx)
  828. {
  829. return find_userfault_in(&ctx->fault_pending_wqh);
  830. }
  831. static inline struct userfaultfd_wait_queue *find_userfault_evt(
  832. struct userfaultfd_ctx *ctx)
  833. {
  834. return find_userfault_in(&ctx->event_wqh);
  835. }
  836. static __poll_t userfaultfd_poll(struct file *file, poll_table *wait)
  837. {
  838. struct userfaultfd_ctx *ctx = file->private_data;
  839. __poll_t ret;
  840. poll_wait(file, &ctx->fd_wqh, wait);
  841. switch (ctx->state) {
  842. case UFFD_STATE_WAIT_API:
  843. return EPOLLERR;
  844. case UFFD_STATE_RUNNING:
  845. /*
  846. * poll() never guarantees that read won't block.
  847. * userfaults can be waken before they're read().
  848. */
  849. if (unlikely(!(file->f_flags & O_NONBLOCK)))
  850. return EPOLLERR;
  851. /*
  852. * lockless access to see if there are pending faults
  853. * __pollwait last action is the add_wait_queue but
  854. * the spin_unlock would allow the waitqueue_active to
  855. * pass above the actual list_add inside
  856. * add_wait_queue critical section. So use a full
  857. * memory barrier to serialize the list_add write of
  858. * add_wait_queue() with the waitqueue_active read
  859. * below.
  860. */
  861. ret = 0;
  862. smp_mb();
  863. if (waitqueue_active(&ctx->fault_pending_wqh))
  864. ret = EPOLLIN;
  865. else if (waitqueue_active(&ctx->event_wqh))
  866. ret = EPOLLIN;
  867. return ret;
  868. default:
  869. WARN_ON_ONCE(1);
  870. return EPOLLERR;
  871. }
  872. }
  873. static const struct file_operations userfaultfd_fops;
  874. static int resolve_userfault_fork(struct userfaultfd_ctx *ctx,
  875. struct userfaultfd_ctx *new,
  876. struct uffd_msg *msg)
  877. {
  878. int fd;
  879. fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, new,
  880. O_RDWR | (new->flags & UFFD_SHARED_FCNTL_FLAGS));
  881. if (fd < 0)
  882. return fd;
  883. msg->arg.reserved.reserved1 = 0;
  884. msg->arg.fork.ufd = fd;
  885. return 0;
  886. }
  887. static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
  888. struct uffd_msg *msg)
  889. {
  890. ssize_t ret;
  891. DECLARE_WAITQUEUE(wait, current);
  892. struct userfaultfd_wait_queue *uwq;
  893. /*
  894. * Handling fork event requires sleeping operations, so
  895. * we drop the event_wqh lock, then do these ops, then
  896. * lock it back and wake up the waiter. While the lock is
  897. * dropped the ewq may go away so we keep track of it
  898. * carefully.
  899. */
  900. LIST_HEAD(fork_event);
  901. struct userfaultfd_ctx *fork_nctx = NULL;
  902. /* always take the fd_wqh lock before the fault_pending_wqh lock */
  903. spin_lock(&ctx->fd_wqh.lock);
  904. __add_wait_queue(&ctx->fd_wqh, &wait);
  905. for (;;) {
  906. set_current_state(TASK_INTERRUPTIBLE);
  907. spin_lock(&ctx->fault_pending_wqh.lock);
  908. uwq = find_userfault(ctx);
  909. if (uwq) {
  910. /*
  911. * Use a seqcount to repeat the lockless check
  912. * in wake_userfault() to avoid missing
  913. * wakeups because during the refile both
  914. * waitqueue could become empty if this is the
  915. * only userfault.
  916. */
  917. write_seqcount_begin(&ctx->refile_seq);
  918. /*
  919. * The fault_pending_wqh.lock prevents the uwq
  920. * to disappear from under us.
  921. *
  922. * Refile this userfault from
  923. * fault_pending_wqh to fault_wqh, it's not
  924. * pending anymore after we read it.
  925. *
  926. * Use list_del() by hand (as
  927. * userfaultfd_wake_function also uses
  928. * list_del_init() by hand) to be sure nobody
  929. * changes __remove_wait_queue() to use
  930. * list_del_init() in turn breaking the
  931. * !list_empty_careful() check in
  932. * handle_userfault(). The uwq->wq.head list
  933. * must never be empty at any time during the
  934. * refile, or the waitqueue could disappear
  935. * from under us. The "wait_queue_head_t"
  936. * parameter of __remove_wait_queue() is unused
  937. * anyway.
  938. */
  939. list_del(&uwq->wq.entry);
  940. __add_wait_queue(&ctx->fault_wqh, &uwq->wq);
  941. write_seqcount_end(&ctx->refile_seq);
  942. /* careful to always initialize msg if ret == 0 */
  943. *msg = uwq->msg;
  944. spin_unlock(&ctx->fault_pending_wqh.lock);
  945. ret = 0;
  946. break;
  947. }
  948. spin_unlock(&ctx->fault_pending_wqh.lock);
  949. spin_lock(&ctx->event_wqh.lock);
  950. uwq = find_userfault_evt(ctx);
  951. if (uwq) {
  952. *msg = uwq->msg;
  953. if (uwq->msg.event == UFFD_EVENT_FORK) {
  954. fork_nctx = (struct userfaultfd_ctx *)
  955. (unsigned long)
  956. uwq->msg.arg.reserved.reserved1;
  957. list_move(&uwq->wq.entry, &fork_event);
  958. /*
  959. * fork_nctx can be freed as soon as
  960. * we drop the lock, unless we take a
  961. * reference on it.
  962. */
  963. userfaultfd_ctx_get(fork_nctx);
  964. spin_unlock(&ctx->event_wqh.lock);
  965. ret = 0;
  966. break;
  967. }
  968. userfaultfd_event_complete(ctx, uwq);
  969. spin_unlock(&ctx->event_wqh.lock);
  970. ret = 0;
  971. break;
  972. }
  973. spin_unlock(&ctx->event_wqh.lock);
  974. if (signal_pending(current)) {
  975. ret = -ERESTARTSYS;
  976. break;
  977. }
  978. if (no_wait) {
  979. ret = -EAGAIN;
  980. break;
  981. }
  982. spin_unlock(&ctx->fd_wqh.lock);
  983. schedule();
  984. spin_lock(&ctx->fd_wqh.lock);
  985. }
  986. __remove_wait_queue(&ctx->fd_wqh, &wait);
  987. __set_current_state(TASK_RUNNING);
  988. spin_unlock(&ctx->fd_wqh.lock);
  989. if (!ret && msg->event == UFFD_EVENT_FORK) {
  990. ret = resolve_userfault_fork(ctx, fork_nctx, msg);
  991. spin_lock(&ctx->event_wqh.lock);
  992. if (!list_empty(&fork_event)) {
  993. /*
  994. * The fork thread didn't abort, so we can
  995. * drop the temporary refcount.
  996. */
  997. userfaultfd_ctx_put(fork_nctx);
  998. uwq = list_first_entry(&fork_event,
  999. typeof(*uwq),
  1000. wq.entry);
  1001. /*
  1002. * If fork_event list wasn't empty and in turn
  1003. * the event wasn't already released by fork
  1004. * (the event is allocated on fork kernel
  1005. * stack), put the event back to its place in
  1006. * the event_wq. fork_event head will be freed
  1007. * as soon as we return so the event cannot
  1008. * stay queued there no matter the current
  1009. * "ret" value.
  1010. */
  1011. list_del(&uwq->wq.entry);
  1012. __add_wait_queue(&ctx->event_wqh, &uwq->wq);
  1013. /*
  1014. * Leave the event in the waitqueue and report
  1015. * error to userland if we failed to resolve
  1016. * the userfault fork.
  1017. */
  1018. if (likely(!ret))
  1019. userfaultfd_event_complete(ctx, uwq);
  1020. } else {
  1021. /*
  1022. * Here the fork thread aborted and the
  1023. * refcount from the fork thread on fork_nctx
  1024. * has already been released. We still hold
  1025. * the reference we took before releasing the
  1026. * lock above. If resolve_userfault_fork
  1027. * failed we've to drop it because the
  1028. * fork_nctx has to be freed in such case. If
  1029. * it succeeded we'll hold it because the new
  1030. * uffd references it.
  1031. */
  1032. if (ret)
  1033. userfaultfd_ctx_put(fork_nctx);
  1034. }
  1035. spin_unlock(&ctx->event_wqh.lock);
  1036. }
  1037. return ret;
  1038. }
  1039. static ssize_t userfaultfd_read(struct file *file, char __user *buf,
  1040. size_t count, loff_t *ppos)
  1041. {
  1042. struct userfaultfd_ctx *ctx = file->private_data;
  1043. ssize_t _ret, ret = 0;
  1044. struct uffd_msg msg;
  1045. int no_wait = file->f_flags & O_NONBLOCK;
  1046. if (ctx->state == UFFD_STATE_WAIT_API)
  1047. return -EINVAL;
  1048. for (;;) {
  1049. if (count < sizeof(msg))
  1050. return ret ? ret : -EINVAL;
  1051. _ret = userfaultfd_ctx_read(ctx, no_wait, &msg);
  1052. if (_ret < 0)
  1053. return ret ? ret : _ret;
  1054. if (copy_to_user((__u64 __user *) buf, &msg, sizeof(msg)))
  1055. return ret ? ret : -EFAULT;
  1056. ret += sizeof(msg);
  1057. buf += sizeof(msg);
  1058. count -= sizeof(msg);
  1059. /*
  1060. * Allow to read more than one fault at time but only
  1061. * block if waiting for the very first one.
  1062. */
  1063. no_wait = O_NONBLOCK;
  1064. }
  1065. }
  1066. static void __wake_userfault(struct userfaultfd_ctx *ctx,
  1067. struct userfaultfd_wake_range *range)
  1068. {
  1069. spin_lock(&ctx->fault_pending_wqh.lock);
  1070. /* wake all in the range and autoremove */
  1071. if (waitqueue_active(&ctx->fault_pending_wqh))
  1072. __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL,
  1073. range);
  1074. if (waitqueue_active(&ctx->fault_wqh))
  1075. __wake_up_locked_key(&ctx->fault_wqh, TASK_NORMAL, range);
  1076. spin_unlock(&ctx->fault_pending_wqh.lock);
  1077. }
  1078. static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx,
  1079. struct userfaultfd_wake_range *range)
  1080. {
  1081. unsigned seq;
  1082. bool need_wakeup;
  1083. /*
  1084. * To be sure waitqueue_active() is not reordered by the CPU
  1085. * before the pagetable update, use an explicit SMP memory
  1086. * barrier here. PT lock release or up_read(mmap_sem) still
  1087. * have release semantics that can allow the
  1088. * waitqueue_active() to be reordered before the pte update.
  1089. */
  1090. smp_mb();
  1091. /*
  1092. * Use waitqueue_active because it's very frequent to
  1093. * change the address space atomically even if there are no
  1094. * userfaults yet. So we take the spinlock only when we're
  1095. * sure we've userfaults to wake.
  1096. */
  1097. do {
  1098. seq = read_seqcount_begin(&ctx->refile_seq);
  1099. need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) ||
  1100. waitqueue_active(&ctx->fault_wqh);
  1101. cond_resched();
  1102. } while (read_seqcount_retry(&ctx->refile_seq, seq));
  1103. if (need_wakeup)
  1104. __wake_userfault(ctx, range);
  1105. }
  1106. static __always_inline int validate_range(struct mm_struct *mm,
  1107. __u64 start, __u64 len)
  1108. {
  1109. __u64 task_size = mm->task_size;
  1110. if (start & ~PAGE_MASK)
  1111. return -EINVAL;
  1112. if (len & ~PAGE_MASK)
  1113. return -EINVAL;
  1114. if (!len)
  1115. return -EINVAL;
  1116. if (start < mmap_min_addr)
  1117. return -EINVAL;
  1118. if (start >= task_size)
  1119. return -EINVAL;
  1120. if (len > task_size - start)
  1121. return -EINVAL;
  1122. return 0;
  1123. }
  1124. static inline bool vma_can_userfault(struct vm_area_struct *vma)
  1125. {
  1126. return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
  1127. vma_is_shmem(vma);
  1128. }
  1129. static int userfaultfd_register(struct userfaultfd_ctx *ctx,
  1130. unsigned long arg)
  1131. {
  1132. struct mm_struct *mm = ctx->mm;
  1133. struct vm_area_struct *vma, *prev, *cur;
  1134. int ret;
  1135. struct uffdio_register uffdio_register;
  1136. struct uffdio_register __user *user_uffdio_register;
  1137. unsigned long vm_flags, new_flags;
  1138. bool found;
  1139. bool basic_ioctls;
  1140. unsigned long start, end, vma_end;
  1141. user_uffdio_register = (struct uffdio_register __user *) arg;
  1142. ret = -EFAULT;
  1143. if (copy_from_user(&uffdio_register, user_uffdio_register,
  1144. sizeof(uffdio_register)-sizeof(__u64)))
  1145. goto out;
  1146. ret = -EINVAL;
  1147. if (!uffdio_register.mode)
  1148. goto out;
  1149. if (uffdio_register.mode & ~(UFFDIO_REGISTER_MODE_MISSING|
  1150. UFFDIO_REGISTER_MODE_WP))
  1151. goto out;
  1152. vm_flags = 0;
  1153. if (uffdio_register.mode & UFFDIO_REGISTER_MODE_MISSING)
  1154. vm_flags |= VM_UFFD_MISSING;
  1155. if (uffdio_register.mode & UFFDIO_REGISTER_MODE_WP) {
  1156. vm_flags |= VM_UFFD_WP;
  1157. /*
  1158. * FIXME: remove the below error constraint by
  1159. * implementing the wprotect tracking mode.
  1160. */
  1161. ret = -EINVAL;
  1162. goto out;
  1163. }
  1164. ret = validate_range(mm, uffdio_register.range.start,
  1165. uffdio_register.range.len);
  1166. if (ret)
  1167. goto out;
  1168. start = uffdio_register.range.start;
  1169. end = start + uffdio_register.range.len;
  1170. ret = -ENOMEM;
  1171. if (!mmget_not_zero(mm))
  1172. goto out;
  1173. down_write(&mm->mmap_sem);
  1174. vma = find_vma_prev(mm, start, &prev);
  1175. if (!vma)
  1176. goto out_unlock;
  1177. /* check that there's at least one vma in the range */
  1178. ret = -EINVAL;
  1179. if (vma->vm_start >= end)
  1180. goto out_unlock;
  1181. /*
  1182. * If the first vma contains huge pages, make sure start address
  1183. * is aligned to huge page size.
  1184. */
  1185. if (is_vm_hugetlb_page(vma)) {
  1186. unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
  1187. if (start & (vma_hpagesize - 1))
  1188. goto out_unlock;
  1189. }
  1190. /*
  1191. * Search for not compatible vmas.
  1192. */
  1193. found = false;
  1194. basic_ioctls = false;
  1195. for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
  1196. cond_resched();
  1197. BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
  1198. !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  1199. /* check not compatible vmas */
  1200. ret = -EINVAL;
  1201. if (!vma_can_userfault(cur))
  1202. goto out_unlock;
  1203. /*
  1204. * If this vma contains ending address, and huge pages
  1205. * check alignment.
  1206. */
  1207. if (is_vm_hugetlb_page(cur) && end <= cur->vm_end &&
  1208. end > cur->vm_start) {
  1209. unsigned long vma_hpagesize = vma_kernel_pagesize(cur);
  1210. ret = -EINVAL;
  1211. if (end & (vma_hpagesize - 1))
  1212. goto out_unlock;
  1213. }
  1214. /*
  1215. * Check that this vma isn't already owned by a
  1216. * different userfaultfd. We can't allow more than one
  1217. * userfaultfd to own a single vma simultaneously or we
  1218. * wouldn't know which one to deliver the userfaults to.
  1219. */
  1220. ret = -EBUSY;
  1221. if (cur->vm_userfaultfd_ctx.ctx &&
  1222. cur->vm_userfaultfd_ctx.ctx != ctx)
  1223. goto out_unlock;
  1224. /*
  1225. * Note vmas containing huge pages
  1226. */
  1227. if (is_vm_hugetlb_page(cur))
  1228. basic_ioctls = true;
  1229. found = true;
  1230. }
  1231. BUG_ON(!found);
  1232. if (vma->vm_start < start)
  1233. prev = vma;
  1234. ret = 0;
  1235. do {
  1236. cond_resched();
  1237. BUG_ON(!vma_can_userfault(vma));
  1238. BUG_ON(vma->vm_userfaultfd_ctx.ctx &&
  1239. vma->vm_userfaultfd_ctx.ctx != ctx);
  1240. /*
  1241. * Nothing to do: this vma is already registered into this
  1242. * userfaultfd and with the right tracking mode too.
  1243. */
  1244. if (vma->vm_userfaultfd_ctx.ctx == ctx &&
  1245. (vma->vm_flags & vm_flags) == vm_flags)
  1246. goto skip;
  1247. if (vma->vm_start > start)
  1248. start = vma->vm_start;
  1249. vma_end = min(end, vma->vm_end);
  1250. new_flags = (vma->vm_flags & ~vm_flags) | vm_flags;
  1251. prev = vma_merge(mm, prev, start, vma_end, new_flags,
  1252. vma->anon_vma, vma->vm_file, vma->vm_pgoff,
  1253. vma_policy(vma),
  1254. ((struct vm_userfaultfd_ctx){ ctx }));
  1255. if (prev) {
  1256. vma = prev;
  1257. goto next;
  1258. }
  1259. if (vma->vm_start < start) {
  1260. ret = split_vma(mm, vma, start, 1);
  1261. if (ret)
  1262. break;
  1263. }
  1264. if (vma->vm_end > end) {
  1265. ret = split_vma(mm, vma, end, 0);
  1266. if (ret)
  1267. break;
  1268. }
  1269. next:
  1270. /*
  1271. * In the vma_merge() successful mprotect-like case 8:
  1272. * the next vma was merged into the current one and
  1273. * the current one has not been updated yet.
  1274. */
  1275. vma->vm_flags = new_flags;
  1276. vma->vm_userfaultfd_ctx.ctx = ctx;
  1277. skip:
  1278. prev = vma;
  1279. start = vma->vm_end;
  1280. vma = vma->vm_next;
  1281. } while (vma && vma->vm_start < end);
  1282. out_unlock:
  1283. up_write(&mm->mmap_sem);
  1284. mmput(mm);
  1285. if (!ret) {
  1286. /*
  1287. * Now that we scanned all vmas we can already tell
  1288. * userland which ioctls methods are guaranteed to
  1289. * succeed on this range.
  1290. */
  1291. if (put_user(basic_ioctls ? UFFD_API_RANGE_IOCTLS_BASIC :
  1292. UFFD_API_RANGE_IOCTLS,
  1293. &user_uffdio_register->ioctls))
  1294. ret = -EFAULT;
  1295. }
  1296. out:
  1297. return ret;
  1298. }
  1299. static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
  1300. unsigned long arg)
  1301. {
  1302. struct mm_struct *mm = ctx->mm;
  1303. struct vm_area_struct *vma, *prev, *cur;
  1304. int ret;
  1305. struct uffdio_range uffdio_unregister;
  1306. unsigned long new_flags;
  1307. bool found;
  1308. unsigned long start, end, vma_end;
  1309. const void __user *buf = (void __user *)arg;
  1310. ret = -EFAULT;
  1311. if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
  1312. goto out;
  1313. ret = validate_range(mm, uffdio_unregister.start,
  1314. uffdio_unregister.len);
  1315. if (ret)
  1316. goto out;
  1317. start = uffdio_unregister.start;
  1318. end = start + uffdio_unregister.len;
  1319. ret = -ENOMEM;
  1320. if (!mmget_not_zero(mm))
  1321. goto out;
  1322. down_write(&mm->mmap_sem);
  1323. vma = find_vma_prev(mm, start, &prev);
  1324. if (!vma)
  1325. goto out_unlock;
  1326. /* check that there's at least one vma in the range */
  1327. ret = -EINVAL;
  1328. if (vma->vm_start >= end)
  1329. goto out_unlock;
  1330. /*
  1331. * If the first vma contains huge pages, make sure start address
  1332. * is aligned to huge page size.
  1333. */
  1334. if (is_vm_hugetlb_page(vma)) {
  1335. unsigned long vma_hpagesize = vma_kernel_pagesize(vma);
  1336. if (start & (vma_hpagesize - 1))
  1337. goto out_unlock;
  1338. }
  1339. /*
  1340. * Search for not compatible vmas.
  1341. */
  1342. found = false;
  1343. ret = -EINVAL;
  1344. for (cur = vma; cur && cur->vm_start < end; cur = cur->vm_next) {
  1345. cond_resched();
  1346. BUG_ON(!!cur->vm_userfaultfd_ctx.ctx ^
  1347. !!(cur->vm_flags & (VM_UFFD_MISSING | VM_UFFD_WP)));
  1348. /*
  1349. * Check not compatible vmas, not strictly required
  1350. * here as not compatible vmas cannot have an
  1351. * userfaultfd_ctx registered on them, but this
  1352. * provides for more strict behavior to notice
  1353. * unregistration errors.
  1354. */
  1355. if (!vma_can_userfault(cur))
  1356. goto out_unlock;
  1357. found = true;
  1358. }
  1359. BUG_ON(!found);
  1360. if (vma->vm_start < start)
  1361. prev = vma;
  1362. ret = 0;
  1363. do {
  1364. cond_resched();
  1365. BUG_ON(!vma_can_userfault(vma));
  1366. /*
  1367. * Nothing to do: this vma is already registered into this
  1368. * userfaultfd and with the right tracking mode too.
  1369. */
  1370. if (!vma->vm_userfaultfd_ctx.ctx)
  1371. goto skip;
  1372. if (vma->vm_start > start)
  1373. start = vma->vm_start;
  1374. vma_end = min(end, vma->vm_end);
  1375. if (userfaultfd_missing(vma)) {
  1376. /*
  1377. * Wake any concurrent pending userfault while
  1378. * we unregister, so they will not hang
  1379. * permanently and it avoids userland to call
  1380. * UFFDIO_WAKE explicitly.
  1381. */
  1382. struct userfaultfd_wake_range range;
  1383. range.start = start;
  1384. range.len = vma_end - start;
  1385. wake_userfault(vma->vm_userfaultfd_ctx.ctx, &range);
  1386. }
  1387. new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
  1388. prev = vma_merge(mm, prev, start, vma_end, new_flags,
  1389. vma->anon_vma, vma->vm_file, vma->vm_pgoff,
  1390. vma_policy(vma),
  1391. NULL_VM_UFFD_CTX);
  1392. if (prev) {
  1393. vma = prev;
  1394. goto next;
  1395. }
  1396. if (vma->vm_start < start) {
  1397. ret = split_vma(mm, vma, start, 1);
  1398. if (ret)
  1399. break;
  1400. }
  1401. if (vma->vm_end > end) {
  1402. ret = split_vma(mm, vma, end, 0);
  1403. if (ret)
  1404. break;
  1405. }
  1406. next:
  1407. /*
  1408. * In the vma_merge() successful mprotect-like case 8:
  1409. * the next vma was merged into the current one and
  1410. * the current one has not been updated yet.
  1411. */
  1412. vma->vm_flags = new_flags;
  1413. vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
  1414. skip:
  1415. prev = vma;
  1416. start = vma->vm_end;
  1417. vma = vma->vm_next;
  1418. } while (vma && vma->vm_start < end);
  1419. out_unlock:
  1420. up_write(&mm->mmap_sem);
  1421. mmput(mm);
  1422. out:
  1423. return ret;
  1424. }
  1425. /*
  1426. * userfaultfd_wake may be used in combination with the
  1427. * UFFDIO_*_MODE_DONTWAKE to wakeup userfaults in batches.
  1428. */
  1429. static int userfaultfd_wake(struct userfaultfd_ctx *ctx,
  1430. unsigned long arg)
  1431. {
  1432. int ret;
  1433. struct uffdio_range uffdio_wake;
  1434. struct userfaultfd_wake_range range;
  1435. const void __user *buf = (void __user *)arg;
  1436. ret = -EFAULT;
  1437. if (copy_from_user(&uffdio_wake, buf, sizeof(uffdio_wake)))
  1438. goto out;
  1439. ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len);
  1440. if (ret)
  1441. goto out;
  1442. range.start = uffdio_wake.start;
  1443. range.len = uffdio_wake.len;
  1444. /*
  1445. * len == 0 means wake all and we don't want to wake all here,
  1446. * so check it again to be sure.
  1447. */
  1448. VM_BUG_ON(!range.len);
  1449. wake_userfault(ctx, &range);
  1450. ret = 0;
  1451. out:
  1452. return ret;
  1453. }
  1454. static int userfaultfd_copy(struct userfaultfd_ctx *ctx,
  1455. unsigned long arg)
  1456. {
  1457. __s64 ret;
  1458. struct uffdio_copy uffdio_copy;
  1459. struct uffdio_copy __user *user_uffdio_copy;
  1460. struct userfaultfd_wake_range range;
  1461. user_uffdio_copy = (struct uffdio_copy __user *) arg;
  1462. ret = -EAGAIN;
  1463. if (READ_ONCE(ctx->mmap_changing))
  1464. goto out;
  1465. ret = -EFAULT;
  1466. if (copy_from_user(&uffdio_copy, user_uffdio_copy,
  1467. /* don't copy "copy" last field */
  1468. sizeof(uffdio_copy)-sizeof(__s64)))
  1469. goto out;
  1470. ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len);
  1471. if (ret)
  1472. goto out;
  1473. /*
  1474. * double check for wraparound just in case. copy_from_user()
  1475. * will later check uffdio_copy.src + uffdio_copy.len to fit
  1476. * in the userland range.
  1477. */
  1478. ret = -EINVAL;
  1479. if (uffdio_copy.src + uffdio_copy.len <= uffdio_copy.src)
  1480. goto out;
  1481. if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE)
  1482. goto out;
  1483. if (mmget_not_zero(ctx->mm)) {
  1484. ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src,
  1485. uffdio_copy.len, &ctx->mmap_changing);
  1486. mmput(ctx->mm);
  1487. } else {
  1488. return -ESRCH;
  1489. }
  1490. if (unlikely(put_user(ret, &user_uffdio_copy->copy)))
  1491. return -EFAULT;
  1492. if (ret < 0)
  1493. goto out;
  1494. BUG_ON(!ret);
  1495. /* len == 0 would wake all */
  1496. range.len = ret;
  1497. if (!(uffdio_copy.mode & UFFDIO_COPY_MODE_DONTWAKE)) {
  1498. range.start = uffdio_copy.dst;
  1499. wake_userfault(ctx, &range);
  1500. }
  1501. ret = range.len == uffdio_copy.len ? 0 : -EAGAIN;
  1502. out:
  1503. return ret;
  1504. }
  1505. static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx,
  1506. unsigned long arg)
  1507. {
  1508. __s64 ret;
  1509. struct uffdio_zeropage uffdio_zeropage;
  1510. struct uffdio_zeropage __user *user_uffdio_zeropage;
  1511. struct userfaultfd_wake_range range;
  1512. user_uffdio_zeropage = (struct uffdio_zeropage __user *) arg;
  1513. ret = -EAGAIN;
  1514. if (READ_ONCE(ctx->mmap_changing))
  1515. goto out;
  1516. ret = -EFAULT;
  1517. if (copy_from_user(&uffdio_zeropage, user_uffdio_zeropage,
  1518. /* don't copy "zeropage" last field */
  1519. sizeof(uffdio_zeropage)-sizeof(__s64)))
  1520. goto out;
  1521. ret = validate_range(ctx->mm, uffdio_zeropage.range.start,
  1522. uffdio_zeropage.range.len);
  1523. if (ret)
  1524. goto out;
  1525. ret = -EINVAL;
  1526. if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE)
  1527. goto out;
  1528. if (mmget_not_zero(ctx->mm)) {
  1529. ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start,
  1530. uffdio_zeropage.range.len,
  1531. &ctx->mmap_changing);
  1532. mmput(ctx->mm);
  1533. } else {
  1534. return -ESRCH;
  1535. }
  1536. if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage)))
  1537. return -EFAULT;
  1538. if (ret < 0)
  1539. goto out;
  1540. /* len == 0 would wake all */
  1541. BUG_ON(!ret);
  1542. range.len = ret;
  1543. if (!(uffdio_zeropage.mode & UFFDIO_ZEROPAGE_MODE_DONTWAKE)) {
  1544. range.start = uffdio_zeropage.range.start;
  1545. wake_userfault(ctx, &range);
  1546. }
  1547. ret = range.len == uffdio_zeropage.range.len ? 0 : -EAGAIN;
  1548. out:
  1549. return ret;
  1550. }
  1551. static inline unsigned int uffd_ctx_features(__u64 user_features)
  1552. {
  1553. /*
  1554. * For the current set of features the bits just coincide
  1555. */
  1556. return (unsigned int)user_features;
  1557. }
  1558. /*
  1559. * userland asks for a certain API version and we return which bits
  1560. * and ioctl commands are implemented in this kernel for such API
  1561. * version or -EINVAL if unknown.
  1562. */
  1563. static int userfaultfd_api(struct userfaultfd_ctx *ctx,
  1564. unsigned long arg)
  1565. {
  1566. struct uffdio_api uffdio_api;
  1567. void __user *buf = (void __user *)arg;
  1568. int ret;
  1569. __u64 features;
  1570. ret = -EINVAL;
  1571. if (ctx->state != UFFD_STATE_WAIT_API)
  1572. goto out;
  1573. ret = -EFAULT;
  1574. if (copy_from_user(&uffdio_api, buf, sizeof(uffdio_api)))
  1575. goto out;
  1576. features = uffdio_api.features;
  1577. if (uffdio_api.api != UFFD_API || (features & ~UFFD_API_FEATURES)) {
  1578. memset(&uffdio_api, 0, sizeof(uffdio_api));
  1579. if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
  1580. goto out;
  1581. ret = -EINVAL;
  1582. goto out;
  1583. }
  1584. /* report all available features and ioctls to userland */
  1585. uffdio_api.features = UFFD_API_FEATURES;
  1586. uffdio_api.ioctls = UFFD_API_IOCTLS;
  1587. ret = -EFAULT;
  1588. if (copy_to_user(buf, &uffdio_api, sizeof(uffdio_api)))
  1589. goto out;
  1590. ctx->state = UFFD_STATE_RUNNING;
  1591. /* only enable the requested features for this uffd context */
  1592. ctx->features = uffd_ctx_features(features);
  1593. ret = 0;
  1594. out:
  1595. return ret;
  1596. }
  1597. static long userfaultfd_ioctl(struct file *file, unsigned cmd,
  1598. unsigned long arg)
  1599. {
  1600. int ret = -EINVAL;
  1601. struct userfaultfd_ctx *ctx = file->private_data;
  1602. if (cmd != UFFDIO_API && ctx->state == UFFD_STATE_WAIT_API)
  1603. return -EINVAL;
  1604. switch(cmd) {
  1605. case UFFDIO_API:
  1606. ret = userfaultfd_api(ctx, arg);
  1607. break;
  1608. case UFFDIO_REGISTER:
  1609. ret = userfaultfd_register(ctx, arg);
  1610. break;
  1611. case UFFDIO_UNREGISTER:
  1612. ret = userfaultfd_unregister(ctx, arg);
  1613. break;
  1614. case UFFDIO_WAKE:
  1615. ret = userfaultfd_wake(ctx, arg);
  1616. break;
  1617. case UFFDIO_COPY:
  1618. ret = userfaultfd_copy(ctx, arg);
  1619. break;
  1620. case UFFDIO_ZEROPAGE:
  1621. ret = userfaultfd_zeropage(ctx, arg);
  1622. break;
  1623. }
  1624. return ret;
  1625. }
  1626. #ifdef CONFIG_PROC_FS
  1627. static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
  1628. {
  1629. struct userfaultfd_ctx *ctx = f->private_data;
  1630. wait_queue_entry_t *wq;
  1631. struct userfaultfd_wait_queue *uwq;
  1632. unsigned long pending = 0, total = 0;
  1633. spin_lock(&ctx->fault_pending_wqh.lock);
  1634. list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
  1635. uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  1636. pending++;
  1637. total++;
  1638. }
  1639. list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
  1640. uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
  1641. total++;
  1642. }
  1643. spin_unlock(&ctx->fault_pending_wqh.lock);
  1644. /*
  1645. * If more protocols will be added, there will be all shown
  1646. * separated by a space. Like this:
  1647. * protocols: aa:... bb:...
  1648. */
  1649. seq_printf(m, "pending:\t%lu\ntotal:\t%lu\nAPI:\t%Lx:%x:%Lx\n",
  1650. pending, total, UFFD_API, ctx->features,
  1651. UFFD_API_IOCTLS|UFFD_API_RANGE_IOCTLS);
  1652. }
  1653. #endif
  1654. static const struct file_operations userfaultfd_fops = {
  1655. #ifdef CONFIG_PROC_FS
  1656. .show_fdinfo = userfaultfd_show_fdinfo,
  1657. #endif
  1658. .release = userfaultfd_release,
  1659. .poll = userfaultfd_poll,
  1660. .read = userfaultfd_read,
  1661. .unlocked_ioctl = userfaultfd_ioctl,
  1662. .compat_ioctl = userfaultfd_ioctl,
  1663. .llseek = noop_llseek,
  1664. };
  1665. static void init_once_userfaultfd_ctx(void *mem)
  1666. {
  1667. struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem;
  1668. init_waitqueue_head(&ctx->fault_pending_wqh);
  1669. init_waitqueue_head(&ctx->fault_wqh);
  1670. init_waitqueue_head(&ctx->event_wqh);
  1671. init_waitqueue_head(&ctx->fd_wqh);
  1672. seqcount_init(&ctx->refile_seq);
  1673. }
  1674. SYSCALL_DEFINE1(userfaultfd, int, flags)
  1675. {
  1676. struct userfaultfd_ctx *ctx;
  1677. int fd;
  1678. BUG_ON(!current->mm);
  1679. /* Check the UFFD_* constants for consistency. */
  1680. BUILD_BUG_ON(UFFD_CLOEXEC != O_CLOEXEC);
  1681. BUILD_BUG_ON(UFFD_NONBLOCK != O_NONBLOCK);
  1682. if (flags & ~UFFD_SHARED_FCNTL_FLAGS)
  1683. return -EINVAL;
  1684. ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL);
  1685. if (!ctx)
  1686. return -ENOMEM;
  1687. atomic_set(&ctx->refcount, 1);
  1688. ctx->flags = flags;
  1689. ctx->features = 0;
  1690. ctx->state = UFFD_STATE_WAIT_API;
  1691. ctx->released = false;
  1692. ctx->mmap_changing = false;
  1693. ctx->mm = current->mm;
  1694. /* prevent the mm struct to be freed */
  1695. mmgrab(ctx->mm);
  1696. fd = anon_inode_getfd("[userfaultfd]", &userfaultfd_fops, ctx,
  1697. O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS));
  1698. if (fd < 0) {
  1699. mmdrop(ctx->mm);
  1700. kmem_cache_free(userfaultfd_ctx_cachep, ctx);
  1701. }
  1702. return fd;
  1703. }
  1704. static int __init userfaultfd_init(void)
  1705. {
  1706. userfaultfd_ctx_cachep = kmem_cache_create("userfaultfd_ctx_cache",
  1707. sizeof(struct userfaultfd_ctx),
  1708. 0,
  1709. SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1710. init_once_userfaultfd_ctx);
  1711. return 0;
  1712. }
  1713. __initcall(userfaultfd_init);