hugetlb.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. #ifndef _LINUX_HUGETLB_H
  2. #define _LINUX_HUGETLB_H
  3. #include <linux/fs.h>
  4. #ifdef CONFIG_HUGETLB_PAGE
  5. #include <linux/mempolicy.h>
  6. #include <linux/shm.h>
  7. #include <asm/tlbflush.h>
  8. struct ctl_table;
  9. static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
  10. {
  11. return vma->vm_flags & VM_HUGETLB;
  12. }
  13. int hugetlb_sysctl_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
  14. int hugetlb_treat_movable_handler(struct ctl_table *, int, struct file *, void __user *, size_t *, loff_t *);
  15. int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
  16. int follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, struct page **, struct vm_area_struct **, unsigned long *, int *, int);
  17. void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
  18. void __unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long);
  19. int hugetlb_prefault(struct address_space *, struct vm_area_struct *);
  20. int hugetlb_report_meminfo(char *);
  21. int hugetlb_report_node_meminfo(int, char *);
  22. unsigned long hugetlb_total_pages(void);
  23. int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
  24. unsigned long address, int write_access);
  25. int hugetlb_reserve_pages(struct inode *inode, long from, long to);
  26. void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
  27. extern unsigned long max_huge_pages;
  28. extern unsigned long hugepages_treat_as_movable;
  29. extern const unsigned long hugetlb_zero, hugetlb_infinity;
  30. extern int sysctl_hugetlb_shm_group;
  31. /* arch callbacks */
  32. pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr);
  33. pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr);
  34. int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
  35. struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
  36. int write);
  37. struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
  38. pmd_t *pmd, int write);
  39. int pmd_huge(pmd_t pmd);
  40. void hugetlb_change_protection(struct vm_area_struct *vma,
  41. unsigned long address, unsigned long end, pgprot_t newprot);
  42. #ifndef ARCH_HAS_HUGEPAGE_ONLY_RANGE
  43. #define is_hugepage_only_range(mm, addr, len) 0
  44. #endif
  45. #ifndef ARCH_HAS_HUGETLB_FREE_PGD_RANGE
  46. #define hugetlb_free_pgd_range free_pgd_range
  47. #else
  48. void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
  49. unsigned long end, unsigned long floor,
  50. unsigned long ceiling);
  51. #endif
  52. #ifndef ARCH_HAS_PREPARE_HUGEPAGE_RANGE
  53. /*
  54. * If the arch doesn't supply something else, assume that hugepage
  55. * size aligned regions are ok without further preparation.
  56. */
  57. static inline int prepare_hugepage_range(unsigned long addr, unsigned long len,
  58. pgoff_t pgoff)
  59. {
  60. if (pgoff & (~HPAGE_MASK >> PAGE_SHIFT))
  61. return -EINVAL;
  62. if (len & ~HPAGE_MASK)
  63. return -EINVAL;
  64. if (addr & ~HPAGE_MASK)
  65. return -EINVAL;
  66. return 0;
  67. }
  68. #else
  69. int prepare_hugepage_range(unsigned long addr, unsigned long len,
  70. pgoff_t pgoff);
  71. #endif
  72. #ifndef ARCH_HAS_SETCLEAR_HUGE_PTE
  73. #define set_huge_pte_at(mm, addr, ptep, pte) set_pte_at(mm, addr, ptep, pte)
  74. #define huge_ptep_get_and_clear(mm, addr, ptep) ptep_get_and_clear(mm, addr, ptep)
  75. #else
  76. void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  77. pte_t *ptep, pte_t pte);
  78. pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  79. pte_t *ptep);
  80. #endif
  81. #ifndef ARCH_HAS_HUGETLB_PREFAULT_HOOK
  82. #define hugetlb_prefault_arch_hook(mm) do { } while (0)
  83. #else
  84. void hugetlb_prefault_arch_hook(struct mm_struct *mm);
  85. #endif
  86. #else /* !CONFIG_HUGETLB_PAGE */
  87. static inline int is_vm_hugetlb_page(struct vm_area_struct *vma)
  88. {
  89. return 0;
  90. }
  91. static inline unsigned long hugetlb_total_pages(void)
  92. {
  93. return 0;
  94. }
  95. #define follow_hugetlb_page(m,v,p,vs,a,b,i) ({ BUG(); 0; })
  96. #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
  97. #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
  98. #define hugetlb_prefault(mapping, vma) ({ BUG(); 0; })
  99. #define unmap_hugepage_range(vma, start, end) BUG()
  100. #define hugetlb_report_meminfo(buf) 0
  101. #define hugetlb_report_node_meminfo(n, buf) 0
  102. #define follow_huge_pmd(mm, addr, pmd, write) NULL
  103. #define prepare_hugepage_range(addr,len,pgoff) (-EINVAL)
  104. #define pmd_huge(x) 0
  105. #define is_hugepage_only_range(mm, addr, len) 0
  106. #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
  107. #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; })
  108. #define hugetlb_change_protection(vma, address, end, newprot)
  109. #ifndef HPAGE_MASK
  110. #define HPAGE_MASK PAGE_MASK /* Keep the compiler happy */
  111. #define HPAGE_SIZE PAGE_SIZE
  112. #endif
  113. #endif /* !CONFIG_HUGETLB_PAGE */
  114. #ifdef CONFIG_HUGETLBFS
  115. struct hugetlbfs_config {
  116. uid_t uid;
  117. gid_t gid;
  118. umode_t mode;
  119. long nr_blocks;
  120. long nr_inodes;
  121. };
  122. struct hugetlbfs_sb_info {
  123. long max_blocks; /* blocks allowed */
  124. long free_blocks; /* blocks free */
  125. long max_inodes; /* inodes allowed */
  126. long free_inodes; /* inodes free */
  127. spinlock_t stat_lock;
  128. };
  129. struct hugetlbfs_inode_info {
  130. struct shared_policy policy;
  131. struct inode vfs_inode;
  132. };
  133. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  134. {
  135. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  136. }
  137. static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
  138. {
  139. return sb->s_fs_info;
  140. }
  141. extern const struct file_operations hugetlbfs_file_operations;
  142. extern struct vm_operations_struct hugetlb_vm_ops;
  143. struct file *hugetlb_file_setup(const char *name, size_t);
  144. int hugetlb_get_quota(struct address_space *mapping);
  145. void hugetlb_put_quota(struct address_space *mapping);
  146. static inline int is_file_hugepages(struct file *file)
  147. {
  148. if (file->f_op == &hugetlbfs_file_operations)
  149. return 1;
  150. if (is_file_shm_hugepages(file))
  151. return 1;
  152. return 0;
  153. }
  154. static inline void set_file_hugepages(struct file *file)
  155. {
  156. file->f_op = &hugetlbfs_file_operations;
  157. }
  158. #else /* !CONFIG_HUGETLBFS */
  159. #define is_file_hugepages(file) 0
  160. #define set_file_hugepages(file) BUG()
  161. #define hugetlb_file_setup(name,size) ERR_PTR(-ENOSYS)
  162. #endif /* !CONFIG_HUGETLBFS */
  163. #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  164. unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  165. unsigned long len, unsigned long pgoff,
  166. unsigned long flags);
  167. #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
  168. #endif /* _LINUX_HUGETLB_H */