huge_mm.h 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. #ifndef _LINUX_HUGE_MM_H
  2. #define _LINUX_HUGE_MM_H
  3. extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
  4. extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  5. pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
  6. struct vm_area_struct *vma);
  7. extern void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
  8. extern int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
  9. pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
  10. struct vm_area_struct *vma);
  11. #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
  12. extern void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
  13. #else
  14. static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
  15. {
  16. }
  17. #endif
  18. extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
  19. extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
  20. unsigned long addr,
  21. pmd_t *pmd,
  22. unsigned int flags);
  23. extern bool madvise_free_huge_pmd(struct mmu_gather *tlb,
  24. struct vm_area_struct *vma,
  25. pmd_t *pmd, unsigned long addr, unsigned long next);
  26. extern int zap_huge_pmd(struct mmu_gather *tlb,
  27. struct vm_area_struct *vma,
  28. pmd_t *pmd, unsigned long addr);
  29. extern int zap_huge_pud(struct mmu_gather *tlb,
  30. struct vm_area_struct *vma,
  31. pud_t *pud, unsigned long addr);
  32. extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  33. unsigned long addr, unsigned long end,
  34. unsigned char *vec);
  35. extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
  36. unsigned long new_addr, unsigned long old_end,
  37. pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
  38. extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  39. unsigned long addr, pgprot_t newprot,
  40. int prot_numa);
  41. int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
  42. pmd_t *pmd, pfn_t pfn, bool write);
  43. int vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
  44. pud_t *pud, pfn_t pfn, bool write);
  45. enum transparent_hugepage_flag {
  46. TRANSPARENT_HUGEPAGE_FLAG,
  47. TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
  48. TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
  49. TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
  50. TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
  51. TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
  52. TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
  53. TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
  54. #ifdef CONFIG_DEBUG_VM
  55. TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
  56. #endif
  57. };
  58. struct kobject;
  59. struct kobj_attribute;
  60. extern ssize_t single_hugepage_flag_store(struct kobject *kobj,
  61. struct kobj_attribute *attr,
  62. const char *buf, size_t count,
  63. enum transparent_hugepage_flag flag);
  64. extern ssize_t single_hugepage_flag_show(struct kobject *kobj,
  65. struct kobj_attribute *attr, char *buf,
  66. enum transparent_hugepage_flag flag);
  67. extern struct kobj_attribute shmem_enabled_attr;
  68. #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
  69. #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
  70. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  71. #define HPAGE_PMD_SHIFT PMD_SHIFT
  72. #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
  73. #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
  74. #define HPAGE_PUD_SHIFT PUD_SHIFT
  75. #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
  76. #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
  77. extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
  78. #define transparent_hugepage_enabled(__vma) \
  79. ((transparent_hugepage_flags & \
  80. (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
  81. (transparent_hugepage_flags & \
  82. (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
  83. ((__vma)->vm_flags & VM_HUGEPAGE))) && \
  84. !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
  85. !is_vma_temporary_stack(__vma))
  86. #define transparent_hugepage_use_zero_page() \
  87. (transparent_hugepage_flags & \
  88. (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
  89. #ifdef CONFIG_DEBUG_VM
  90. #define transparent_hugepage_debug_cow() \
  91. (transparent_hugepage_flags & \
  92. (1<<TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG))
  93. #else /* CONFIG_DEBUG_VM */
  94. #define transparent_hugepage_debug_cow() 0
  95. #endif /* CONFIG_DEBUG_VM */
  96. extern unsigned long transparent_hugepage_flags;
  97. extern unsigned long thp_get_unmapped_area(struct file *filp,
  98. unsigned long addr, unsigned long len, unsigned long pgoff,
  99. unsigned long flags);
  100. extern void prep_transhuge_page(struct page *page);
  101. extern void free_transhuge_page(struct page *page);
  102. bool can_split_huge_page(struct page *page, int *pextra_pins);
  103. int split_huge_page_to_list(struct page *page, struct list_head *list);
  104. static inline int split_huge_page(struct page *page)
  105. {
  106. return split_huge_page_to_list(page, NULL);
  107. }
  108. void deferred_split_huge_page(struct page *page);
  109. void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  110. unsigned long address, bool freeze, struct page *page);
  111. #define split_huge_pmd(__vma, __pmd, __address) \
  112. do { \
  113. pmd_t *____pmd = (__pmd); \
  114. if (pmd_trans_huge(*____pmd) \
  115. || pmd_devmap(*____pmd)) \
  116. __split_huge_pmd(__vma, __pmd, __address, \
  117. false, NULL); \
  118. } while (0)
  119. void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
  120. bool freeze, struct page *page);
  121. void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
  122. unsigned long address);
  123. #define split_huge_pud(__vma, __pud, __address) \
  124. do { \
  125. pud_t *____pud = (__pud); \
  126. if (pud_trans_huge(*____pud) \
  127. || pud_devmap(*____pud)) \
  128. __split_huge_pud(__vma, __pud, __address); \
  129. } while (0)
  130. extern int hugepage_madvise(struct vm_area_struct *vma,
  131. unsigned long *vm_flags, int advice);
  132. extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
  133. unsigned long start,
  134. unsigned long end,
  135. long adjust_next);
  136. extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
  137. struct vm_area_struct *vma);
  138. extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
  139. struct vm_area_struct *vma);
  140. /* mmap_sem must be held on entry */
  141. static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
  142. struct vm_area_struct *vma)
  143. {
  144. VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
  145. if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
  146. return __pmd_trans_huge_lock(pmd, vma);
  147. else
  148. return NULL;
  149. }
  150. static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
  151. struct vm_area_struct *vma)
  152. {
  153. VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
  154. if (pud_trans_huge(*pud) || pud_devmap(*pud))
  155. return __pud_trans_huge_lock(pud, vma);
  156. else
  157. return NULL;
  158. }
  159. static inline int hpage_nr_pages(struct page *page)
  160. {
  161. if (unlikely(PageTransHuge(page)))
  162. return HPAGE_PMD_NR;
  163. return 1;
  164. }
  165. struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
  166. pmd_t *pmd, int flags);
  167. struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
  168. pud_t *pud, int flags);
  169. extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
  170. extern struct page *huge_zero_page;
  171. static inline bool is_huge_zero_page(struct page *page)
  172. {
  173. return ACCESS_ONCE(huge_zero_page) == page;
  174. }
  175. static inline bool is_huge_zero_pmd(pmd_t pmd)
  176. {
  177. return is_huge_zero_page(pmd_page(pmd));
  178. }
  179. static inline bool is_huge_zero_pud(pud_t pud)
  180. {
  181. return false;
  182. }
  183. struct page *mm_get_huge_zero_page(struct mm_struct *mm);
  184. void mm_put_huge_zero_page(struct mm_struct *mm);
  185. #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
  186. #else /* CONFIG_TRANSPARENT_HUGEPAGE */
  187. #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
  188. #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
  189. #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
  190. #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
  191. #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
  192. #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
  193. #define hpage_nr_pages(x) 1
  194. #define transparent_hugepage_enabled(__vma) 0
  195. static inline void prep_transhuge_page(struct page *page) {}
  196. #define transparent_hugepage_flags 0UL
  197. #define thp_get_unmapped_area NULL
  198. static inline bool
  199. can_split_huge_page(struct page *page, int *pextra_pins)
  200. {
  201. BUILD_BUG();
  202. return false;
  203. }
  204. static inline int
  205. split_huge_page_to_list(struct page *page, struct list_head *list)
  206. {
  207. return 0;
  208. }
  209. static inline int split_huge_page(struct page *page)
  210. {
  211. return 0;
  212. }
  213. static inline void deferred_split_huge_page(struct page *page) {}
  214. #define split_huge_pmd(__vma, __pmd, __address) \
  215. do { } while (0)
  216. static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  217. unsigned long address, bool freeze, struct page *page) {}
  218. static inline void split_huge_pmd_address(struct vm_area_struct *vma,
  219. unsigned long address, bool freeze, struct page *page) {}
  220. #define split_huge_pud(__vma, __pmd, __address) \
  221. do { } while (0)
  222. static inline int hugepage_madvise(struct vm_area_struct *vma,
  223. unsigned long *vm_flags, int advice)
  224. {
  225. BUG();
  226. return 0;
  227. }
  228. static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
  229. unsigned long start,
  230. unsigned long end,
  231. long adjust_next)
  232. {
  233. }
  234. static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
  235. struct vm_area_struct *vma)
  236. {
  237. return NULL;
  238. }
  239. static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
  240. struct vm_area_struct *vma)
  241. {
  242. return NULL;
  243. }
  244. static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
  245. {
  246. return 0;
  247. }
  248. static inline bool is_huge_zero_page(struct page *page)
  249. {
  250. return false;
  251. }
  252. static inline bool is_huge_zero_pud(pud_t pud)
  253. {
  254. return false;
  255. }
  256. static inline void mm_put_huge_zero_page(struct mm_struct *mm)
  257. {
  258. return;
  259. }
  260. static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
  261. unsigned long addr, pmd_t *pmd, int flags)
  262. {
  263. return NULL;
  264. }
  265. static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
  266. unsigned long addr, pud_t *pud, int flags)
  267. {
  268. return NULL;
  269. }
  270. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  271. #endif /* _LINUX_HUGE_MM_H */