hugetlb.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_HUGETLB_H
  3. #define _LINUX_HUGETLB_H
  4. #include <linux/mm_types.h>
  5. #include <linux/mmdebug.h>
  6. #include <linux/fs.h>
  7. #include <linux/hugetlb_inline.h>
  8. #include <linux/cgroup.h>
  9. #include <linux/list.h>
  10. #include <linux/kref.h>
  11. #include <asm/pgtable.h>
  12. struct ctl_table;
  13. struct user_struct;
  14. struct mmu_gather;
  15. #ifndef is_hugepd
  16. /*
  17. * Some architectures requires a hugepage directory format that is
  18. * required to support multiple hugepage sizes. For example
  19. * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
  20. * introduced the same on powerpc. This allows for a more flexible hugepage
  21. * pagetable layout.
  22. */
  23. typedef struct { unsigned long pd; } hugepd_t;
  24. #define is_hugepd(hugepd) (0)
  25. #define __hugepd(x) ((hugepd_t) { (x) })
  26. static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
  27. unsigned pdshift, unsigned long end,
  28. int write, struct page **pages, int *nr)
  29. {
  30. return 0;
  31. }
  32. #else
  33. extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
  34. unsigned pdshift, unsigned long end,
  35. int write, struct page **pages, int *nr);
  36. #endif
  37. #ifdef CONFIG_HUGETLB_PAGE
  38. #include <linux/mempolicy.h>
  39. #include <linux/shm.h>
  40. #include <asm/tlbflush.h>
  41. struct hugepage_subpool {
  42. spinlock_t lock;
  43. long count;
  44. long max_hpages; /* Maximum huge pages or -1 if no maximum. */
  45. long used_hpages; /* Used count against maximum, includes */
  46. /* both alloced and reserved pages. */
  47. struct hstate *hstate;
  48. long min_hpages; /* Minimum huge pages or -1 if no minimum. */
  49. long rsv_hpages; /* Pages reserved against global pool to */
  50. /* sasitfy minimum size. */
  51. };
  52. struct resv_map {
  53. struct kref refs;
  54. spinlock_t lock;
  55. struct list_head regions;
  56. long adds_in_progress;
  57. struct list_head region_cache;
  58. long region_cache_count;
  59. };
  60. extern struct resv_map *resv_map_alloc(void);
  61. void resv_map_release(struct kref *ref);
  62. extern spinlock_t hugetlb_lock;
  63. extern int hugetlb_max_hstate __read_mostly;
  64. #define for_each_hstate(h) \
  65. for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
  66. struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
  67. long min_hpages);
  68. void hugepage_put_subpool(struct hugepage_subpool *spool);
  69. void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
  70. int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  71. int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  72. int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
  73. #ifdef CONFIG_NUMA
  74. int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
  75. void __user *, size_t *, loff_t *);
  76. #endif
  77. int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  78. long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
  79. struct page **, struct vm_area_struct **,
  80. unsigned long *, unsigned long *, long, unsigned int,
  81. int *);
  82. void unmap_hugepage_range(struct vm_area_struct *,
  83. unsigned long, unsigned long, struct page *);
  84. void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  85. struct vm_area_struct *vma,
  86. unsigned long start, unsigned long end,
  87. struct page *ref_page);
  88. void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
  89. unsigned long start, unsigned long end,
  90. struct page *ref_page);
  91. void hugetlb_report_meminfo(struct seq_file *);
  92. int hugetlb_report_node_meminfo(int, char *);
  93. void hugetlb_show_meminfo(void);
  94. unsigned long hugetlb_total_pages(void);
  95. vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  96. unsigned long address, unsigned int flags);
  97. int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
  98. struct vm_area_struct *dst_vma,
  99. unsigned long dst_addr,
  100. unsigned long src_addr,
  101. struct page **pagep);
  102. int hugetlb_reserve_pages(struct inode *inode, long from, long to,
  103. struct vm_area_struct *vma,
  104. vm_flags_t vm_flags);
  105. long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
  106. long freed);
  107. bool isolate_huge_page(struct page *page, struct list_head *list);
  108. void putback_active_hugepage(struct page *page);
  109. void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
  110. void free_huge_page(struct page *page);
  111. void hugetlb_fix_reserve_counts(struct inode *inode);
  112. extern struct mutex *hugetlb_fault_mutex_table;
  113. u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
  114. pgoff_t idx, unsigned long address);
  115. pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
  116. extern int sysctl_hugetlb_shm_group;
  117. extern struct list_head huge_boot_pages;
  118. /* arch callbacks */
  119. pte_t *huge_pte_alloc(struct mm_struct *mm,
  120. unsigned long addr, unsigned long sz);
  121. pte_t *huge_pte_offset(struct mm_struct *mm,
  122. unsigned long addr, unsigned long sz);
  123. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
  124. void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
  125. unsigned long *start, unsigned long *end);
  126. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  127. int write);
  128. struct page *follow_huge_pd(struct vm_area_struct *vma,
  129. unsigned long address, hugepd_t hpd,
  130. int flags, int pdshift);
  131. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  132. pmd_t *pmd, int flags);
  133. struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
  134. pud_t *pud, int flags);
  135. struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
  136. pgd_t *pgd, int flags);
  137. int pmd_huge(pmd_t pmd);
  138. int pud_huge(pud_t pud);
  139. unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  140. unsigned long address, unsigned long end, pgprot_t newprot);
  141. bool is_hugetlb_entry_migration(pte_t pte);
  142. #else /* !CONFIG_HUGETLB_PAGE */
  143. static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
  144. {
  145. }
  146. static inline unsigned long hugetlb_total_pages(void)
  147. {
  148. return 0;
  149. }
  150. static inline int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr,
  151. pte_t *ptep)
  152. {
  153. return 0;
  154. }
  155. static inline void adjust_range_if_pmd_sharing_possible(
  156. struct vm_area_struct *vma,
  157. unsigned long *start, unsigned long *end)
  158. {
  159. }
  160. #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
  161. #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
  162. #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
  163. static inline void hugetlb_report_meminfo(struct seq_file *m)
  164. {
  165. }
  166. #define hugetlb_report_node_meminfo(n, buf) 0
  167. static inline void hugetlb_show_meminfo(void)
  168. {
  169. }
  170. #define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
  171. #define follow_huge_pmd(mm, addr, pmd, flags) NULL
  172. #define follow_huge_pud(mm, addr, pud, flags) NULL
  173. #define follow_huge_pgd(mm, addr, pgd, flags) NULL
  174. #define prepare_hugepage_range(file, addr, len) (-EINVAL)
  175. #define pmd_huge(x) 0
  176. #define pud_huge(x) 0
  177. #define is_hugepage_only_range(mm, addr, len) 0
  178. #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
  179. #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
  180. #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
  181. src_addr, pagep) ({ BUG(); 0; })
  182. #define huge_pte_offset(mm, address, sz) 0
  183. static inline bool isolate_huge_page(struct page *page, struct list_head *list)
  184. {
  185. return false;
  186. }
  187. #define putback_active_hugepage(p) do {} while (0)
  188. #define move_hugetlb_state(old, new, reason) do {} while (0)
  189. static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
  190. unsigned long address, unsigned long end, pgprot_t newprot)
  191. {
  192. return 0;
  193. }
  194. static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
  195. struct vm_area_struct *vma, unsigned long start,
  196. unsigned long end, struct page *ref_page)
  197. {
  198. BUG();
  199. }
  200. static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
  201. struct vm_area_struct *vma, unsigned long start,
  202. unsigned long end, struct page *ref_page)
  203. {
  204. BUG();
  205. }
  206. #endif /* !CONFIG_HUGETLB_PAGE */
  207. /*
  208. * hugepages at page global directory. If arch support
  209. * hugepages at pgd level, they need to define this.
  210. */
  211. #ifndef pgd_huge
  212. #define pgd_huge(x) 0
  213. #endif
  214. #ifndef p4d_huge
  215. #define p4d_huge(x) 0
  216. #endif
  217. #ifndef pgd_write
  218. static inline int pgd_write(pgd_t pgd)
  219. {
  220. BUG();
  221. return 0;
  222. }
  223. #endif
  224. #define HUGETLB_ANON_FILE "anon_hugepage"
  225. enum {
  226. /*
  227. * The file will be used as an shm file so shmfs accounting rules
  228. * apply
  229. */
  230. HUGETLB_SHMFS_INODE = 1,
  231. /*
  232. * The file is being created on the internal vfs mount and shmfs
  233. * accounting rules do not apply
  234. */
  235. HUGETLB_ANONHUGE_INODE = 2,
  236. };
  237. #ifdef CONFIG_HUGETLBFS
  238. struct hugetlbfs_sb_info {
  239. long max_inodes; /* inodes allowed */
  240. long free_inodes; /* inodes free */
  241. spinlock_t stat_lock;
  242. struct hstate *hstate;
  243. struct hugepage_subpool *spool;
  244. kuid_t uid;
  245. kgid_t gid;
  246. umode_t mode;
  247. };
  248. static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
  249. {
  250. return sb->s_fs_info;
  251. }
  252. struct hugetlbfs_inode_info {
  253. struct shared_policy policy;
  254. struct inode vfs_inode;
  255. unsigned int seals;
  256. };
  257. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  258. {
  259. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  260. }
  261. extern const struct file_operations hugetlbfs_file_operations;
  262. extern const struct vm_operations_struct hugetlb_vm_ops;
  263. struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
  264. struct user_struct **user, int creat_flags,
  265. int page_size_log);
  266. static inline bool is_file_hugepages(struct file *file)
  267. {
  268. if (file->f_op == &hugetlbfs_file_operations)
  269. return true;
  270. return is_file_shm_hugepages(file);
  271. }
  272. #else /* !CONFIG_HUGETLBFS */
  273. #define is_file_hugepages(file) false
  274. static inline struct file *
  275. hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
  276. struct user_struct **user, int creat_flags,
  277. int page_size_log)
  278. {
  279. return ERR_PTR(-ENOSYS);
  280. }
  281. #endif /* !CONFIG_HUGETLBFS */
  282. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  283. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  284. unsigned long len, unsigned long pgoff,
  285. unsigned long flags);
  286. #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
  287. #ifdef CONFIG_HUGETLB_PAGE
  288. #define HSTATE_NAME_LEN 32
  289. /* Defines one hugetlb page size */
  290. struct hstate {
  291. int next_nid_to_alloc;
  292. int next_nid_to_free;
  293. unsigned int order;
  294. unsigned long mask;
  295. unsigned long max_huge_pages;
  296. unsigned long nr_huge_pages;
  297. unsigned long free_huge_pages;
  298. unsigned long resv_huge_pages;
  299. unsigned long surplus_huge_pages;
  300. unsigned long nr_overcommit_huge_pages;
  301. struct list_head hugepage_activelist;
  302. struct list_head hugepage_freelists[MAX_NUMNODES];
  303. unsigned int nr_huge_pages_node[MAX_NUMNODES];
  304. unsigned int free_huge_pages_node[MAX_NUMNODES];
  305. unsigned int surplus_huge_pages_node[MAX_NUMNODES];
  306. #ifdef CONFIG_CGROUP_HUGETLB
  307. /* cgroup control files */
  308. struct cftype cgroup_files[5];
  309. #endif
  310. char name[HSTATE_NAME_LEN];
  311. };
  312. struct huge_bootmem_page {
  313. struct list_head list;
  314. struct hstate *hstate;
  315. };
  316. struct page *alloc_huge_page(struct vm_area_struct *vma,
  317. unsigned long addr, int avoid_reserve);
  318. struct page *alloc_huge_page_node(struct hstate *h, int nid);
  319. struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
  320. nodemask_t *nmask);
  321. struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
  322. unsigned long address);
  323. int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
  324. pgoff_t idx);
  325. /* arch callback */
  326. int __init __alloc_bootmem_huge_page(struct hstate *h);
  327. int __init alloc_bootmem_huge_page(struct hstate *h);
  328. void __init hugetlb_bad_size(void);
  329. void __init hugetlb_add_hstate(unsigned order);
  330. struct hstate *size_to_hstate(unsigned long size);
  331. #ifndef HUGE_MAX_HSTATE
  332. #define HUGE_MAX_HSTATE 1
  333. #endif
  334. extern struct hstate hstates[HUGE_MAX_HSTATE];
  335. extern unsigned int default_hstate_idx;
  336. #define default_hstate (hstates[default_hstate_idx])
  337. static inline struct hstate *hstate_inode(struct inode *i)
  338. {
  339. return HUGETLBFS_SB(i->i_sb)->hstate;
  340. }
  341. static inline struct hstate *hstate_file(struct file *f)
  342. {
  343. return hstate_inode(file_inode(f));
  344. }
  345. static inline struct hstate *hstate_sizelog(int page_size_log)
  346. {
  347. if (!page_size_log)
  348. return &default_hstate;
  349. return size_to_hstate(1UL << page_size_log);
  350. }
  351. static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
  352. {
  353. return hstate_file(vma->vm_file);
  354. }
  355. static inline unsigned long huge_page_size(struct hstate *h)
  356. {
  357. return (unsigned long)PAGE_SIZE << h->order;
  358. }
  359. extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
  360. extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
  361. static inline unsigned long huge_page_mask(struct hstate *h)
  362. {
  363. return h->mask;
  364. }
  365. static inline unsigned int huge_page_order(struct hstate *h)
  366. {
  367. return h->order;
  368. }
  369. static inline unsigned huge_page_shift(struct hstate *h)
  370. {
  371. return h->order + PAGE_SHIFT;
  372. }
  373. static inline bool hstate_is_gigantic(struct hstate *h)
  374. {
  375. return huge_page_order(h) >= MAX_ORDER;
  376. }
  377. static inline unsigned int pages_per_huge_page(struct hstate *h)
  378. {
  379. return 1 << h->order;
  380. }
  381. static inline unsigned int blocks_per_huge_page(struct hstate *h)
  382. {
  383. return huge_page_size(h) / 512;
  384. }
  385. #include <asm/hugetlb.h>
  386. #ifndef arch_make_huge_pte
  387. static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
  388. struct page *page, int writable)
  389. {
  390. return entry;
  391. }
  392. #endif
  393. static inline struct hstate *page_hstate(struct page *page)
  394. {
  395. VM_BUG_ON_PAGE(!PageHuge(page), page);
  396. return size_to_hstate(PAGE_SIZE << compound_order(page));
  397. }
  398. static inline unsigned hstate_index_to_shift(unsigned index)
  399. {
  400. return hstates[index].order + PAGE_SHIFT;
  401. }
  402. static inline int hstate_index(struct hstate *h)
  403. {
  404. return h - hstates;
  405. }
  406. pgoff_t __basepage_index(struct page *page);
  407. /* Return page->index in PAGE_SIZE units */
  408. static inline pgoff_t basepage_index(struct page *page)
  409. {
  410. if (!PageCompound(page))
  411. return page->index;
  412. return __basepage_index(page);
  413. }
  414. extern int dissolve_free_huge_page(struct page *page);
  415. extern int dissolve_free_huge_pages(unsigned long start_pfn,
  416. unsigned long end_pfn);
  417. static inline bool hugepage_migration_supported(struct hstate *h)
  418. {
  419. #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
  420. if ((huge_page_shift(h) == PMD_SHIFT) ||
  421. (huge_page_shift(h) == PGDIR_SHIFT))
  422. return true;
  423. else
  424. return false;
  425. #else
  426. return false;
  427. #endif
  428. }
  429. static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  430. struct mm_struct *mm, pte_t *pte)
  431. {
  432. if (huge_page_size(h) == PMD_SIZE)
  433. return pmd_lockptr(mm, (pmd_t *) pte);
  434. VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
  435. return &mm->page_table_lock;
  436. }
  437. #ifndef hugepages_supported
  438. /*
  439. * Some platform decide whether they support huge pages at boot
  440. * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
  441. * when there is no such support
  442. */
  443. #define hugepages_supported() (HPAGE_SHIFT != 0)
  444. #endif
  445. void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
  446. static inline void hugetlb_count_add(long l, struct mm_struct *mm)
  447. {
  448. atomic_long_add(l, &mm->hugetlb_usage);
  449. }
  450. static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  451. {
  452. atomic_long_sub(l, &mm->hugetlb_usage);
  453. }
  454. #ifndef set_huge_swap_pte_at
  455. static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  456. pte_t *ptep, pte_t pte, unsigned long sz)
  457. {
  458. set_huge_pte_at(mm, addr, ptep, pte);
  459. }
  460. #endif
  461. #else /* CONFIG_HUGETLB_PAGE */
  462. struct hstate {};
  463. #define alloc_huge_page(v, a, r) NULL
  464. #define alloc_huge_page_node(h, nid) NULL
  465. #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
  466. #define alloc_huge_page_vma(h, vma, address) NULL
  467. #define alloc_bootmem_huge_page(h) NULL
  468. #define hstate_file(f) NULL
  469. #define hstate_sizelog(s) NULL
  470. #define hstate_vma(v) NULL
  471. #define hstate_inode(i) NULL
  472. #define page_hstate(page) NULL
  473. #define huge_page_size(h) PAGE_SIZE
  474. #define huge_page_mask(h) PAGE_MASK
  475. #define vma_kernel_pagesize(v) PAGE_SIZE
  476. #define vma_mmu_pagesize(v) PAGE_SIZE
  477. #define huge_page_order(h) 0
  478. #define huge_page_shift(h) PAGE_SHIFT
  479. static inline bool hstate_is_gigantic(struct hstate *h)
  480. {
  481. return false;
  482. }
  483. static inline unsigned int pages_per_huge_page(struct hstate *h)
  484. {
  485. return 1;
  486. }
  487. static inline unsigned hstate_index_to_shift(unsigned index)
  488. {
  489. return 0;
  490. }
  491. static inline int hstate_index(struct hstate *h)
  492. {
  493. return 0;
  494. }
  495. static inline pgoff_t basepage_index(struct page *page)
  496. {
  497. return page->index;
  498. }
  499. static inline int dissolve_free_huge_page(struct page *page)
  500. {
  501. return 0;
  502. }
  503. static inline int dissolve_free_huge_pages(unsigned long start_pfn,
  504. unsigned long end_pfn)
  505. {
  506. return 0;
  507. }
  508. static inline bool hugepage_migration_supported(struct hstate *h)
  509. {
  510. return false;
  511. }
  512. static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
  513. struct mm_struct *mm, pte_t *pte)
  514. {
  515. return &mm->page_table_lock;
  516. }
  517. static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
  518. {
  519. }
  520. static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
  521. {
  522. }
  523. static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
  524. pte_t *ptep, pte_t pte, unsigned long sz)
  525. {
  526. }
  527. #endif /* CONFIG_HUGETLB_PAGE */
  528. static inline spinlock_t *huge_pte_lock(struct hstate *h,
  529. struct mm_struct *mm, pte_t *pte)
  530. {
  531. spinlock_t *ptl;
  532. ptl = huge_pte_lockptr(h, mm, pte);
  533. spin_lock(ptl);
  534. return ptl;
  535. }
  536. #endif /* _LINUX_HUGETLB_H */