mm.h 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. #ifndef _LINUX_SCHED_MM_H
  2. #define _LINUX_SCHED_MM_H
  3. #include <linux/sched.h>
  4. #include <linux/mm_types.h>
  5. #include <linux/gfp.h>
  6. /*
  7. * Routines for handling mm_structs
  8. */
  9. extern struct mm_struct * mm_alloc(void);
  10. /**
  11. * mmgrab() - Pin a &struct mm_struct.
  12. * @mm: The &struct mm_struct to pin.
  13. *
  14. * Make sure that @mm will not get freed even after the owning task
  15. * exits. This doesn't guarantee that the associated address space
  16. * will still exist later on and mmget_not_zero() has to be used before
  17. * accessing it.
  18. *
  19. * This is a preferred way to to pin @mm for a longer/unbounded amount
  20. * of time.
  21. *
  22. * Use mmdrop() to release the reference acquired by mmgrab().
  23. *
  24. * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
  25. * of &mm_struct.mm_count vs &mm_struct.mm_users.
  26. */
  27. static inline void mmgrab(struct mm_struct *mm)
  28. {
  29. atomic_inc(&mm->mm_count);
  30. }
  31. /* mmdrop drops the mm and the page tables */
  32. extern void __mmdrop(struct mm_struct *);
  33. static inline void mmdrop(struct mm_struct *mm)
  34. {
  35. if (unlikely(atomic_dec_and_test(&mm->mm_count)))
  36. __mmdrop(mm);
  37. }
  38. static inline void mmdrop_async_fn(struct work_struct *work)
  39. {
  40. struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
  41. __mmdrop(mm);
  42. }
  43. static inline void mmdrop_async(struct mm_struct *mm)
  44. {
  45. if (unlikely(atomic_dec_and_test(&mm->mm_count))) {
  46. INIT_WORK(&mm->async_put_work, mmdrop_async_fn);
  47. schedule_work(&mm->async_put_work);
  48. }
  49. }
  50. /**
  51. * mmget() - Pin the address space associated with a &struct mm_struct.
  52. * @mm: The address space to pin.
  53. *
  54. * Make sure that the address space of the given &struct mm_struct doesn't
  55. * go away. This does not protect against parts of the address space being
  56. * modified or freed, however.
  57. *
  58. * Never use this function to pin this address space for an
  59. * unbounded/indefinite amount of time.
  60. *
  61. * Use mmput() to release the reference acquired by mmget().
  62. *
  63. * See also <Documentation/vm/active_mm.txt> for an in-depth explanation
  64. * of &mm_struct.mm_count vs &mm_struct.mm_users.
  65. */
  66. static inline void mmget(struct mm_struct *mm)
  67. {
  68. atomic_inc(&mm->mm_users);
  69. }
  70. static inline bool mmget_not_zero(struct mm_struct *mm)
  71. {
  72. return atomic_inc_not_zero(&mm->mm_users);
  73. }
  74. /* mmput gets rid of the mappings and all user-space */
  75. extern void mmput(struct mm_struct *);
  76. #ifdef CONFIG_MMU
  77. /* same as above but performs the slow path from the async context. Can
  78. * be called from the atomic context as well
  79. */
  80. extern void mmput_async(struct mm_struct *);
  81. #endif
  82. /* Grab a reference to a task's mm, if it is not already going away */
  83. extern struct mm_struct *get_task_mm(struct task_struct *task);
  84. /*
  85. * Grab a reference to a task's mm, if it is not already going away
  86. * and ptrace_may_access with the mode parameter passed to it
  87. * succeeds.
  88. */
  89. extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
  90. /* Remove the current tasks stale references to the old mm_struct */
  91. extern void mm_release(struct task_struct *, struct mm_struct *);
  92. #ifdef CONFIG_MEMCG
  93. extern void mm_update_next_owner(struct mm_struct *mm);
  94. #else
  95. static inline void mm_update_next_owner(struct mm_struct *mm)
  96. {
  97. }
  98. #endif /* CONFIG_MEMCG */
  99. #ifdef CONFIG_MMU
  100. extern void arch_pick_mmap_layout(struct mm_struct *mm);
  101. extern unsigned long
  102. arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
  103. unsigned long, unsigned long);
  104. extern unsigned long
  105. arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
  106. unsigned long len, unsigned long pgoff,
  107. unsigned long flags);
  108. #else
  109. static inline void arch_pick_mmap_layout(struct mm_struct *mm) {}
  110. #endif
  111. static inline bool in_vfork(struct task_struct *tsk)
  112. {
  113. bool ret;
  114. /*
  115. * need RCU to access ->real_parent if CLONE_VM was used along with
  116. * CLONE_PARENT.
  117. *
  118. * We check real_parent->mm == tsk->mm because CLONE_VFORK does not
  119. * imply CLONE_VM
  120. *
  121. * CLONE_VFORK can be used with CLONE_PARENT/CLONE_THREAD and thus
  122. * ->real_parent is not necessarily the task doing vfork(), so in
  123. * theory we can't rely on task_lock() if we want to dereference it.
  124. *
  125. * And in this case we can't trust the real_parent->mm == tsk->mm
  126. * check, it can be false negative. But we do not care, if init or
  127. * another oom-unkillable task does this it should blame itself.
  128. */
  129. rcu_read_lock();
  130. ret = tsk->vfork_done && tsk->real_parent->mm == tsk->mm;
  131. rcu_read_unlock();
  132. return ret;
  133. }
  134. /* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
  135. * __GFP_FS is also cleared as it implies __GFP_IO.
  136. */
  137. static inline gfp_t memalloc_noio_flags(gfp_t flags)
  138. {
  139. if (unlikely(current->flags & PF_MEMALLOC_NOIO))
  140. flags &= ~(__GFP_IO | __GFP_FS);
  141. return flags;
  142. }
  143. static inline unsigned int memalloc_noio_save(void)
  144. {
  145. unsigned int flags = current->flags & PF_MEMALLOC_NOIO;
  146. current->flags |= PF_MEMALLOC_NOIO;
  147. return flags;
  148. }
  149. static inline void memalloc_noio_restore(unsigned int flags)
  150. {
  151. current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
  152. }
  153. #endif /* _LINUX_SCHED_MM_H */