huge_mm.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. #ifndef _LINUX_HUGE_MM_H
  2. #define _LINUX_HUGE_MM_H
  3. extern int do_huge_pmd_anonymous_page(struct fault_env *fe);
  4. extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  5. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  6. struct vm_area_struct *vma);
  7. extern void huge_pmd_set_accessed(struct fault_env *fe, pmd_t orig_pmd);
  8. extern int do_huge_pmd_wp_page(struct fault_env *fe, pmd_t orig_pmd);
  9. extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  10. unsigned long addr,
  11. pmd_t *pmd,
  12. unsigned int flags);
  13. extern int madvise_free_huge_pmd(struct mmu_gather *tlb,
  14. struct vm_area_struct *vma,
  15. pmd_t *pmd, unsigned long addr, unsigned long next);
  16. extern int zap_huge_pmd(struct mmu_gather *tlb,
  17. struct vm_area_struct *vma,
  18. pmd_t *pmd, unsigned long addr);
  19. extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  20. unsigned long addr, unsigned long end,
  21. unsigned char *vec);
  22. extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
  23. unsigned long new_addr, unsigned long old_end,
  24. pmd_t *old_pmd, pmd_t *new_pmd);
  25. extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  26. unsigned long addr, pgprot_t newprot,
  27. int prot_numa);
  28. int vmf_insert_pfn_pmd(struct vm_area_struct *, unsigned long addr, pmd_t *,
  29. pfn_t pfn, bool write);
  30. enum transparent_hugepage_flag {
  31. TRANSPARENT_HUGEPAGE_FLAG,
  32. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  33. TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
  34. TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
  35. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
  36. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
  37. TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
  38. #ifdef CONFIG_DEBUG_VM
  39. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
  40. #endif
  41. };
  42. #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
  43. #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
  44. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  45. struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
  46. pmd_t *pmd, int flags);
  47. #define HPAGE_PMD_SHIFT PMD_SHIFT
  48. #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
  49. #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
  50. extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
  51. #define transparent_hugepage_enabled(__vma) \
  52. ((transparent_hugepage_flags & \
  53. (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
  54. (transparent_hugepage_flags & \
  55. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
  56. ((__vma)->vm_flags & VM_HUGEPAGE))) && \
  57. !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
  58. !is_vma_temporary_stack(__vma))
  59. #define transparent_hugepage_use_zero_page() \
  60. (transparent_hugepage_flags & \
  61. (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
  62. #ifdef CONFIG_DEBUG_VM
  63. #define transparent_hugepage_debug_cow() \
  64. (transparent_hugepage_flags & \
  65. (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
  66. #else /* CONFIG_DEBUG_VM */
  67. #define transparent_hugepage_debug_cow() 0
  68. #endif /* CONFIG_DEBUG_VM */
  69. extern unsigned long transparent_hugepage_flags;
  70. extern void prep_transhuge_page(struct page *page);
  71. extern void free_transhuge_page(struct page *page);
  72. int split_huge_page_to_list(struct page *page, struct list_head *list);
  73. static inline int split_huge_page(struct page *page)
  74. {
  75. return split_huge_page_to_list(page, NULL);
  76. }
  77. void deferred_split_huge_page(struct page *page);
  78. void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  79. unsigned long address, bool freeze, struct page *page);
  80. #define split_huge_pmd(__vma, __pmd, __address) \
  81. do { \
  82. pmd_t *____pmd = (__pmd); \
  83. if (pmd_trans_huge(*____pmd) \
  84. || pmd_devmap(*____pmd)) \
  85. __split_huge_pmd(__vma, __pmd, __address, \
  86. false, NULL); \
  87. } while (0)
  88. void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
  89. bool freeze, struct page *page);
  90. extern int hugepage_madvise(struct vm_area_struct *vma,
  91. unsigned long *vm_flags, int advice);
  92. extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
  93. unsigned long start,
  94. unsigned long end,
  95. long adjust_next);
  96. extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
  97. struct vm_area_struct *vma);
  98. /* mmap_sem must be held on entry */
  99. static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
  100. struct vm_area_struct *vma)
  101. {
  102. VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
  103. if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
  104. return __pmd_trans_huge_lock(pmd, vma);
  105. else
  106. return NULL;
  107. }
  108. static inline int hpage_nr_pages(struct page *page)
  109. {
  110. if (unlikely(PageTransHuge(page)))
  111. return HPAGE_PMD_NR;
  112. return 1;
  113. }
  114. extern int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd);
  115. extern struct page *huge_zero_page;
  116. static inline bool is_huge_zero_page(struct page *page)
  117. {
  118. return ACCESS_ONCE(huge_zero_page) == page;
  119. }
  120. static inline bool is_huge_zero_pmd(pmd_t pmd)
  121. {
  122. return is_huge_zero_page(pmd_page(pmd));
  123. }
  124. struct page *get_huge_zero_page(void);
  125. void put_huge_zero_page(void);
  126. #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  127. #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
  128. #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
  129. #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
  130. #define hpage_nr_pages(x) 1
  131. #define transparent_hugepage_enabled(__vma) 0
  132. #define transparent_hugepage_flags 0UL
  133. static inline int
  134. split_huge_page_to_list(struct page *page, struct list_head *list)
  135. {
  136. return 0;
  137. }
  138. static inline int split_huge_page(struct page *page)
  139. {
  140. return 0;
  141. }
  142. static inline void deferred_split_huge_page(struct page *page) {}
  143. #define split_huge_pmd(__vma, __pmd, __address) \
  144. do { } while (0)
  145. static inline void split_huge_pmd_address(struct vm_area_struct *vma,
  146. unsigned long address, bool freeze, struct page *page) {}
  147. static inline int hugepage_madvise(struct vm_area_struct *vma,
  148. unsigned long *vm_flags, int advice)
  149. {
  150. BUG();
  151. return 0;
  152. }
  153. static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
  154. unsigned long start,
  155. unsigned long end,
  156. long adjust_next)
  157. {
  158. }
  159. static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
  160. struct vm_area_struct *vma)
  161. {
  162. return NULL;
  163. }
  164. static inline int do_huge_pmd_numa_page(struct fault_env *fe, pmd_t orig_pmd)
  165. {
  166. return 0;
  167. }
  168. static inline bool is_huge_zero_page(struct page *page)
  169. {
  170. return false;
  171. }
  172. static inline void put_huge_zero_page(void)
  173. {
  174. BUILD_BUG();
  175. }
  176. static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
  177. unsigned long addr, pmd_t *pmd, int flags)
  178. {
  179. return NULL;
  180. }
  181. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  182. #endif /* _LINUX_HUGE_MM_H */