inode.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. * License: GPL
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/sched/signal.h> /* remove ASAP */
  13. #include <linux/falloc.h>
  14. #include <linux/fs.h>
  15. #include <linux/mount.h>
  16. #include <linux/file.h>
  17. #include <linux/kernel.h>
  18. #include <linux/writeback.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/init.h>
  22. #include <linux/string.h>
  23. #include <linux/capability.h>
  24. #include <linux/ctype.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/hugetlb.h>
  27. #include <linux/pagevec.h>
  28. #include <linux/parser.h>
  29. #include <linux/mman.h>
  30. #include <linux/slab.h>
  31. #include <linux/dnotify.h>
  32. #include <linux/statfs.h>
  33. #include <linux/security.h>
  34. #include <linux/magic.h>
  35. #include <linux/migrate.h>
  36. #include <linux/uio.h>
  37. #include <linux/uaccess.h>
  38. static const struct super_operations hugetlbfs_ops;
  39. static const struct address_space_operations hugetlbfs_aops;
  40. const struct file_operations hugetlbfs_file_operations;
  41. static const struct inode_operations hugetlbfs_dir_inode_operations;
  42. static const struct inode_operations hugetlbfs_inode_operations;
  43. struct hugetlbfs_config {
  44. struct hstate *hstate;
  45. long max_hpages;
  46. long nr_inodes;
  47. long min_hpages;
  48. kuid_t uid;
  49. kgid_t gid;
  50. umode_t mode;
  51. };
  52. struct hugetlbfs_inode_info {
  53. struct shared_policy policy;
  54. struct inode vfs_inode;
  55. };
  56. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  57. {
  58. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  59. }
  60. int sysctl_hugetlb_shm_group;
  61. enum {
  62. Opt_size, Opt_nr_inodes,
  63. Opt_mode, Opt_uid, Opt_gid,
  64. Opt_pagesize, Opt_min_size,
  65. Opt_err,
  66. };
  67. static const match_table_t tokens = {
  68. {Opt_size, "size=%s"},
  69. {Opt_nr_inodes, "nr_inodes=%s"},
  70. {Opt_mode, "mode=%o"},
  71. {Opt_uid, "uid=%u"},
  72. {Opt_gid, "gid=%u"},
  73. {Opt_pagesize, "pagesize=%s"},
  74. {Opt_min_size, "min_size=%s"},
  75. {Opt_err, NULL},
  76. };
  77. #ifdef CONFIG_NUMA
  78. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  79. struct inode *inode, pgoff_t index)
  80. {
  81. vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
  82. index);
  83. }
  84. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  85. {
  86. mpol_cond_put(vma->vm_policy);
  87. }
  88. #else
  89. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  90. struct inode *inode, pgoff_t index)
  91. {
  92. }
  93. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  94. {
  95. }
  96. #endif
  97. static void huge_pagevec_release(struct pagevec *pvec)
  98. {
  99. int i;
  100. for (i = 0; i < pagevec_count(pvec); ++i)
  101. put_page(pvec->pages[i]);
  102. pagevec_reinit(pvec);
  103. }
  104. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  105. {
  106. struct inode *inode = file_inode(file);
  107. loff_t len, vma_len;
  108. int ret;
  109. struct hstate *h = hstate_file(file);
  110. /*
  111. * vma address alignment (but not the pgoff alignment) has
  112. * already been checked by prepare_hugepage_range. If you add
  113. * any error returns here, do so after setting VM_HUGETLB, so
  114. * is_vm_hugetlb_page tests below unmap_region go the right
  115. * way when do_mmap_pgoff unwinds (may be important on powerpc
  116. * and ia64).
  117. */
  118. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
  119. vma->vm_ops = &hugetlb_vm_ops;
  120. /*
  121. * Offset passed to mmap (before page shift) could have been
  122. * negative when represented as a (l)off_t.
  123. */
  124. if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
  125. return -EINVAL;
  126. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  127. return -EINVAL;
  128. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  129. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  130. /* check for overflow */
  131. if (len < vma_len)
  132. return -EINVAL;
  133. inode_lock(inode);
  134. file_accessed(file);
  135. ret = -ENOMEM;
  136. if (hugetlb_reserve_pages(inode,
  137. vma->vm_pgoff >> huge_page_order(h),
  138. len >> huge_page_shift(h), vma,
  139. vma->vm_flags))
  140. goto out;
  141. ret = 0;
  142. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  143. i_size_write(inode, len);
  144. out:
  145. inode_unlock(inode);
  146. return ret;
  147. }
  148. /*
  149. * Called under down_write(mmap_sem).
  150. */
  151. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  152. static unsigned long
  153. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  154. unsigned long len, unsigned long pgoff, unsigned long flags)
  155. {
  156. struct mm_struct *mm = current->mm;
  157. struct vm_area_struct *vma;
  158. struct hstate *h = hstate_file(file);
  159. struct vm_unmapped_area_info info;
  160. if (len & ~huge_page_mask(h))
  161. return -EINVAL;
  162. if (len > TASK_SIZE)
  163. return -ENOMEM;
  164. if (flags & MAP_FIXED) {
  165. if (prepare_hugepage_range(file, addr, len))
  166. return -EINVAL;
  167. return addr;
  168. }
  169. if (addr) {
  170. addr = ALIGN(addr, huge_page_size(h));
  171. vma = find_vma(mm, addr);
  172. if (TASK_SIZE - len >= addr &&
  173. (!vma || addr + len <= vm_start_gap(vma)))
  174. return addr;
  175. }
  176. info.flags = 0;
  177. info.length = len;
  178. info.low_limit = TASK_UNMAPPED_BASE;
  179. info.high_limit = TASK_SIZE;
  180. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  181. info.align_offset = 0;
  182. return vm_unmapped_area(&info);
  183. }
  184. #endif
  185. static size_t
  186. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  187. struct iov_iter *to, unsigned long size)
  188. {
  189. size_t copied = 0;
  190. int i, chunksize;
  191. /* Find which 4k chunk and offset with in that chunk */
  192. i = offset >> PAGE_SHIFT;
  193. offset = offset & ~PAGE_MASK;
  194. while (size) {
  195. size_t n;
  196. chunksize = PAGE_SIZE;
  197. if (offset)
  198. chunksize -= offset;
  199. if (chunksize > size)
  200. chunksize = size;
  201. n = copy_page_to_iter(&page[i], offset, chunksize, to);
  202. copied += n;
  203. if (n != chunksize)
  204. return copied;
  205. offset = 0;
  206. size -= chunksize;
  207. i++;
  208. }
  209. return copied;
  210. }
  211. /*
  212. * Support for read() - Find the page attached to f_mapping and copy out the
  213. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  214. * since it has PAGE_SIZE assumptions.
  215. */
  216. static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
  217. {
  218. struct file *file = iocb->ki_filp;
  219. struct hstate *h = hstate_file(file);
  220. struct address_space *mapping = file->f_mapping;
  221. struct inode *inode = mapping->host;
  222. unsigned long index = iocb->ki_pos >> huge_page_shift(h);
  223. unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
  224. unsigned long end_index;
  225. loff_t isize;
  226. ssize_t retval = 0;
  227. while (iov_iter_count(to)) {
  228. struct page *page;
  229. size_t nr, copied;
  230. /* nr is the maximum number of bytes to copy from this page */
  231. nr = huge_page_size(h);
  232. isize = i_size_read(inode);
  233. if (!isize)
  234. break;
  235. end_index = (isize - 1) >> huge_page_shift(h);
  236. if (index > end_index)
  237. break;
  238. if (index == end_index) {
  239. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  240. if (nr <= offset)
  241. break;
  242. }
  243. nr = nr - offset;
  244. /* Find the page */
  245. page = find_lock_page(mapping, index);
  246. if (unlikely(page == NULL)) {
  247. /*
  248. * We have a HOLE, zero out the user-buffer for the
  249. * length of the hole or request.
  250. */
  251. copied = iov_iter_zero(nr, to);
  252. } else {
  253. unlock_page(page);
  254. /*
  255. * We have the page, copy it to user space buffer.
  256. */
  257. copied = hugetlbfs_read_actor(page, offset, to, nr);
  258. put_page(page);
  259. }
  260. offset += copied;
  261. retval += copied;
  262. if (copied != nr && iov_iter_count(to)) {
  263. if (!retval)
  264. retval = -EFAULT;
  265. break;
  266. }
  267. index += offset >> huge_page_shift(h);
  268. offset &= ~huge_page_mask(h);
  269. }
  270. iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
  271. return retval;
  272. }
  273. static int hugetlbfs_write_begin(struct file *file,
  274. struct address_space *mapping,
  275. loff_t pos, unsigned len, unsigned flags,
  276. struct page **pagep, void **fsdata)
  277. {
  278. return -EINVAL;
  279. }
  280. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  281. loff_t pos, unsigned len, unsigned copied,
  282. struct page *page, void *fsdata)
  283. {
  284. BUG();
  285. return -EINVAL;
  286. }
  287. static void remove_huge_page(struct page *page)
  288. {
  289. ClearPageDirty(page);
  290. ClearPageUptodate(page);
  291. delete_from_page_cache(page);
  292. }
  293. static void
  294. hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
  295. {
  296. struct vm_area_struct *vma;
  297. /*
  298. * end == 0 indicates that the entire range after
  299. * start should be unmapped.
  300. */
  301. vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
  302. unsigned long v_offset;
  303. unsigned long v_end;
  304. /*
  305. * Can the expression below overflow on 32-bit arches?
  306. * No, because the interval tree returns us only those vmas
  307. * which overlap the truncated area starting at pgoff,
  308. * and no vma on a 32-bit arch can span beyond the 4GB.
  309. */
  310. if (vma->vm_pgoff < start)
  311. v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
  312. else
  313. v_offset = 0;
  314. if (!end)
  315. v_end = vma->vm_end;
  316. else {
  317. v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
  318. + vma->vm_start;
  319. if (v_end > vma->vm_end)
  320. v_end = vma->vm_end;
  321. }
  322. unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
  323. NULL);
  324. }
  325. }
  326. /*
  327. * remove_inode_hugepages handles two distinct cases: truncation and hole
  328. * punch. There are subtle differences in operation for each case.
  329. *
  330. * truncation is indicated by end of range being LLONG_MAX
  331. * In this case, we first scan the range and release found pages.
  332. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
  333. * maps and global counts. Page faults can not race with truncation
  334. * in this routine. hugetlb_no_page() prevents page faults in the
  335. * truncated range. It checks i_size before allocation, and again after
  336. * with the page table lock for the page held. The same lock must be
  337. * acquired to unmap a page.
  338. * hole punch is indicated if end is not LLONG_MAX
  339. * In the hole punch case we scan the range and release found pages.
  340. * Only when releasing a page is the associated region/reserv map
  341. * deleted. The region/reserv map for ranges without associated
  342. * pages are not modified. Page faults can race with hole punch.
  343. * This is indicated if we find a mapped page.
  344. * Note: If the passed end of range value is beyond the end of file, but
  345. * not LLONG_MAX this routine still performs a hole punch operation.
  346. */
  347. static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
  348. loff_t lend)
  349. {
  350. struct hstate *h = hstate_inode(inode);
  351. struct address_space *mapping = &inode->i_data;
  352. const pgoff_t start = lstart >> huge_page_shift(h);
  353. const pgoff_t end = lend >> huge_page_shift(h);
  354. struct vm_area_struct pseudo_vma;
  355. struct pagevec pvec;
  356. pgoff_t next, index;
  357. int i, freed = 0;
  358. bool truncate_op = (lend == LLONG_MAX);
  359. memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
  360. pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  361. pagevec_init(&pvec, 0);
  362. next = start;
  363. while (next < end) {
  364. /*
  365. * When no more pages are found, we are done.
  366. */
  367. if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
  368. break;
  369. for (i = 0; i < pagevec_count(&pvec); ++i) {
  370. struct page *page = pvec.pages[i];
  371. u32 hash;
  372. index = page->index;
  373. hash = hugetlb_fault_mutex_hash(h, current->mm,
  374. &pseudo_vma,
  375. mapping, index, 0);
  376. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  377. /*
  378. * If page is mapped, it was faulted in after being
  379. * unmapped in caller. Unmap (again) now after taking
  380. * the fault mutex. The mutex will prevent faults
  381. * until we finish removing the page.
  382. *
  383. * This race can only happen in the hole punch case.
  384. * Getting here in a truncate operation is a bug.
  385. */
  386. if (unlikely(page_mapped(page))) {
  387. BUG_ON(truncate_op);
  388. i_mmap_lock_write(mapping);
  389. hugetlb_vmdelete_list(&mapping->i_mmap,
  390. index * pages_per_huge_page(h),
  391. (index + 1) * pages_per_huge_page(h));
  392. i_mmap_unlock_write(mapping);
  393. }
  394. lock_page(page);
  395. /*
  396. * We must free the huge page and remove from page
  397. * cache (remove_huge_page) BEFORE removing the
  398. * region/reserve map (hugetlb_unreserve_pages). In
  399. * rare out of memory conditions, removal of the
  400. * region/reserve map could fail. Correspondingly,
  401. * the subpool and global reserve usage count can need
  402. * to be adjusted.
  403. */
  404. VM_BUG_ON(PagePrivate(page));
  405. remove_huge_page(page);
  406. freed++;
  407. if (!truncate_op) {
  408. if (unlikely(hugetlb_unreserve_pages(inode,
  409. index, index + 1, 1)))
  410. hugetlb_fix_reserve_counts(inode);
  411. }
  412. unlock_page(page);
  413. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  414. }
  415. huge_pagevec_release(&pvec);
  416. cond_resched();
  417. }
  418. if (truncate_op)
  419. (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
  420. }
  421. static void hugetlbfs_evict_inode(struct inode *inode)
  422. {
  423. struct resv_map *resv_map;
  424. remove_inode_hugepages(inode, 0, LLONG_MAX);
  425. resv_map = (struct resv_map *)inode->i_mapping->private_data;
  426. /* root inode doesn't have the resv_map, so we should check it */
  427. if (resv_map)
  428. resv_map_release(&resv_map->refs);
  429. clear_inode(inode);
  430. }
  431. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  432. {
  433. pgoff_t pgoff;
  434. struct address_space *mapping = inode->i_mapping;
  435. struct hstate *h = hstate_inode(inode);
  436. BUG_ON(offset & ~huge_page_mask(h));
  437. pgoff = offset >> PAGE_SHIFT;
  438. i_size_write(inode, offset);
  439. i_mmap_lock_write(mapping);
  440. if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
  441. hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
  442. i_mmap_unlock_write(mapping);
  443. remove_inode_hugepages(inode, offset, LLONG_MAX);
  444. return 0;
  445. }
  446. static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  447. {
  448. struct hstate *h = hstate_inode(inode);
  449. loff_t hpage_size = huge_page_size(h);
  450. loff_t hole_start, hole_end;
  451. /*
  452. * For hole punch round up the beginning offset of the hole and
  453. * round down the end.
  454. */
  455. hole_start = round_up(offset, hpage_size);
  456. hole_end = round_down(offset + len, hpage_size);
  457. if (hole_end > hole_start) {
  458. struct address_space *mapping = inode->i_mapping;
  459. inode_lock(inode);
  460. i_mmap_lock_write(mapping);
  461. if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
  462. hugetlb_vmdelete_list(&mapping->i_mmap,
  463. hole_start >> PAGE_SHIFT,
  464. hole_end >> PAGE_SHIFT);
  465. i_mmap_unlock_write(mapping);
  466. remove_inode_hugepages(inode, hole_start, hole_end);
  467. inode_unlock(inode);
  468. }
  469. return 0;
  470. }
  471. static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
  472. loff_t len)
  473. {
  474. struct inode *inode = file_inode(file);
  475. struct address_space *mapping = inode->i_mapping;
  476. struct hstate *h = hstate_inode(inode);
  477. struct vm_area_struct pseudo_vma;
  478. struct mm_struct *mm = current->mm;
  479. loff_t hpage_size = huge_page_size(h);
  480. unsigned long hpage_shift = huge_page_shift(h);
  481. pgoff_t start, index, end;
  482. int error;
  483. u32 hash;
  484. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  485. return -EOPNOTSUPP;
  486. if (mode & FALLOC_FL_PUNCH_HOLE)
  487. return hugetlbfs_punch_hole(inode, offset, len);
  488. /*
  489. * Default preallocate case.
  490. * For this range, start is rounded down and end is rounded up
  491. * as well as being converted to page offsets.
  492. */
  493. start = offset >> hpage_shift;
  494. end = (offset + len + hpage_size - 1) >> hpage_shift;
  495. inode_lock(inode);
  496. /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
  497. error = inode_newsize_ok(inode, offset + len);
  498. if (error)
  499. goto out;
  500. /*
  501. * Initialize a pseudo vma as this is required by the huge page
  502. * allocation routines. If NUMA is configured, use page index
  503. * as input to create an allocation policy.
  504. */
  505. memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
  506. pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  507. pseudo_vma.vm_file = file;
  508. for (index = start; index < end; index++) {
  509. /*
  510. * This is supposed to be the vaddr where the page is being
  511. * faulted in, but we have no vaddr here.
  512. */
  513. struct page *page;
  514. unsigned long addr;
  515. int avoid_reserve = 0;
  516. cond_resched();
  517. /*
  518. * fallocate(2) manpage permits EINTR; we may have been
  519. * interrupted because we are using up too much memory.
  520. */
  521. if (signal_pending(current)) {
  522. error = -EINTR;
  523. break;
  524. }
  525. /* Set numa allocation policy based on index */
  526. hugetlb_set_vma_policy(&pseudo_vma, inode, index);
  527. /* addr is the offset within the file (zero based) */
  528. addr = index * hpage_size;
  529. /* mutex taken here, fault path and hole punch */
  530. hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
  531. index, addr);
  532. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  533. /* See if already present in mapping to avoid alloc/free */
  534. page = find_get_page(mapping, index);
  535. if (page) {
  536. put_page(page);
  537. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  538. hugetlb_drop_vma_policy(&pseudo_vma);
  539. continue;
  540. }
  541. /* Allocate page and add to page cache */
  542. page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
  543. hugetlb_drop_vma_policy(&pseudo_vma);
  544. if (IS_ERR(page)) {
  545. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  546. error = PTR_ERR(page);
  547. goto out;
  548. }
  549. clear_huge_page(page, addr, pages_per_huge_page(h));
  550. __SetPageUptodate(page);
  551. error = huge_add_to_page_cache(page, mapping, index);
  552. if (unlikely(error)) {
  553. put_page(page);
  554. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  555. goto out;
  556. }
  557. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  558. /*
  559. * page_put due to reference from alloc_huge_page()
  560. * unlock_page because locked by add_to_page_cache()
  561. */
  562. put_page(page);
  563. unlock_page(page);
  564. }
  565. if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
  566. i_size_write(inode, offset + len);
  567. inode->i_ctime = current_time(inode);
  568. out:
  569. inode_unlock(inode);
  570. return error;
  571. }
  572. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  573. {
  574. struct inode *inode = d_inode(dentry);
  575. struct hstate *h = hstate_inode(inode);
  576. int error;
  577. unsigned int ia_valid = attr->ia_valid;
  578. BUG_ON(!inode);
  579. error = setattr_prepare(dentry, attr);
  580. if (error)
  581. return error;
  582. if (ia_valid & ATTR_SIZE) {
  583. error = -EINVAL;
  584. if (attr->ia_size & ~huge_page_mask(h))
  585. return -EINVAL;
  586. error = hugetlb_vmtruncate(inode, attr->ia_size);
  587. if (error)
  588. return error;
  589. }
  590. setattr_copy(inode, attr);
  591. mark_inode_dirty(inode);
  592. return 0;
  593. }
  594. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  595. struct hugetlbfs_config *config)
  596. {
  597. struct inode *inode;
  598. inode = new_inode(sb);
  599. if (inode) {
  600. inode->i_ino = get_next_ino();
  601. inode->i_mode = S_IFDIR | config->mode;
  602. inode->i_uid = config->uid;
  603. inode->i_gid = config->gid;
  604. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  605. inode->i_op = &hugetlbfs_dir_inode_operations;
  606. inode->i_fop = &simple_dir_operations;
  607. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  608. inc_nlink(inode);
  609. lockdep_annotate_inode_mutex_key(inode);
  610. }
  611. return inode;
  612. }
  613. /*
  614. * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
  615. * be taken from reclaim -- unlike regular filesystems. This needs an
  616. * annotation because huge_pmd_share() does an allocation under hugetlb's
  617. * i_mmap_rwsem.
  618. */
  619. static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
  620. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  621. struct inode *dir,
  622. umode_t mode, dev_t dev)
  623. {
  624. struct inode *inode;
  625. struct resv_map *resv_map;
  626. resv_map = resv_map_alloc();
  627. if (!resv_map)
  628. return NULL;
  629. inode = new_inode(sb);
  630. if (inode) {
  631. inode->i_ino = get_next_ino();
  632. inode_init_owner(inode, dir, mode);
  633. lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
  634. &hugetlbfs_i_mmap_rwsem_key);
  635. inode->i_mapping->a_ops = &hugetlbfs_aops;
  636. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  637. inode->i_mapping->private_data = resv_map;
  638. switch (mode & S_IFMT) {
  639. default:
  640. init_special_inode(inode, mode, dev);
  641. break;
  642. case S_IFREG:
  643. inode->i_op = &hugetlbfs_inode_operations;
  644. inode->i_fop = &hugetlbfs_file_operations;
  645. break;
  646. case S_IFDIR:
  647. inode->i_op = &hugetlbfs_dir_inode_operations;
  648. inode->i_fop = &simple_dir_operations;
  649. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  650. inc_nlink(inode);
  651. break;
  652. case S_IFLNK:
  653. inode->i_op = &page_symlink_inode_operations;
  654. inode_nohighmem(inode);
  655. break;
  656. }
  657. lockdep_annotate_inode_mutex_key(inode);
  658. } else
  659. kref_put(&resv_map->refs, resv_map_release);
  660. return inode;
  661. }
  662. /*
  663. * File creation. Allocate an inode, and we're done..
  664. */
  665. static int hugetlbfs_mknod(struct inode *dir,
  666. struct dentry *dentry, umode_t mode, dev_t dev)
  667. {
  668. struct inode *inode;
  669. int error = -ENOSPC;
  670. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  671. if (inode) {
  672. dir->i_ctime = dir->i_mtime = current_time(dir);
  673. d_instantiate(dentry, inode);
  674. dget(dentry); /* Extra count - pin the dentry in core */
  675. error = 0;
  676. }
  677. return error;
  678. }
  679. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  680. {
  681. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  682. if (!retval)
  683. inc_nlink(dir);
  684. return retval;
  685. }
  686. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  687. {
  688. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  689. }
  690. static int hugetlbfs_symlink(struct inode *dir,
  691. struct dentry *dentry, const char *symname)
  692. {
  693. struct inode *inode;
  694. int error = -ENOSPC;
  695. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  696. if (inode) {
  697. int l = strlen(symname)+1;
  698. error = page_symlink(inode, symname, l);
  699. if (!error) {
  700. d_instantiate(dentry, inode);
  701. dget(dentry);
  702. } else
  703. iput(inode);
  704. }
  705. dir->i_ctime = dir->i_mtime = current_time(dir);
  706. return error;
  707. }
  708. /*
  709. * mark the head page dirty
  710. */
  711. static int hugetlbfs_set_page_dirty(struct page *page)
  712. {
  713. struct page *head = compound_head(page);
  714. SetPageDirty(head);
  715. return 0;
  716. }
  717. static int hugetlbfs_migrate_page(struct address_space *mapping,
  718. struct page *newpage, struct page *page,
  719. enum migrate_mode mode)
  720. {
  721. int rc;
  722. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  723. if (rc != MIGRATEPAGE_SUCCESS)
  724. return rc;
  725. if (mode != MIGRATE_SYNC_NO_COPY)
  726. migrate_page_copy(newpage, page);
  727. else
  728. migrate_page_states(newpage, page);
  729. return MIGRATEPAGE_SUCCESS;
  730. }
  731. static int hugetlbfs_error_remove_page(struct address_space *mapping,
  732. struct page *page)
  733. {
  734. struct inode *inode = mapping->host;
  735. remove_huge_page(page);
  736. hugetlb_fix_reserve_counts(inode);
  737. return 0;
  738. }
  739. /*
  740. * Display the mount options in /proc/mounts.
  741. */
  742. static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
  743. {
  744. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
  745. struct hugepage_subpool *spool = sbinfo->spool;
  746. unsigned long hpage_size = huge_page_size(sbinfo->hstate);
  747. unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
  748. char mod;
  749. if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
  750. seq_printf(m, ",uid=%u",
  751. from_kuid_munged(&init_user_ns, sbinfo->uid));
  752. if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
  753. seq_printf(m, ",gid=%u",
  754. from_kgid_munged(&init_user_ns, sbinfo->gid));
  755. if (sbinfo->mode != 0755)
  756. seq_printf(m, ",mode=%o", sbinfo->mode);
  757. if (sbinfo->max_inodes != -1)
  758. seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
  759. hpage_size /= 1024;
  760. mod = 'K';
  761. if (hpage_size >= 1024) {
  762. hpage_size /= 1024;
  763. mod = 'M';
  764. }
  765. seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
  766. if (spool) {
  767. if (spool->max_hpages != -1)
  768. seq_printf(m, ",size=%llu",
  769. (unsigned long long)spool->max_hpages << hpage_shift);
  770. if (spool->min_hpages != -1)
  771. seq_printf(m, ",min_size=%llu",
  772. (unsigned long long)spool->min_hpages << hpage_shift);
  773. }
  774. return 0;
  775. }
  776. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  777. {
  778. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  779. struct hstate *h = hstate_inode(d_inode(dentry));
  780. buf->f_type = HUGETLBFS_MAGIC;
  781. buf->f_bsize = huge_page_size(h);
  782. if (sbinfo) {
  783. spin_lock(&sbinfo->stat_lock);
  784. /* If no limits set, just report 0 for max/free/used
  785. * blocks, like simple_statfs() */
  786. if (sbinfo->spool) {
  787. long free_pages;
  788. spin_lock(&sbinfo->spool->lock);
  789. buf->f_blocks = sbinfo->spool->max_hpages;
  790. free_pages = sbinfo->spool->max_hpages
  791. - sbinfo->spool->used_hpages;
  792. buf->f_bavail = buf->f_bfree = free_pages;
  793. spin_unlock(&sbinfo->spool->lock);
  794. buf->f_files = sbinfo->max_inodes;
  795. buf->f_ffree = sbinfo->free_inodes;
  796. }
  797. spin_unlock(&sbinfo->stat_lock);
  798. }
  799. buf->f_namelen = NAME_MAX;
  800. return 0;
  801. }
  802. static void hugetlbfs_put_super(struct super_block *sb)
  803. {
  804. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  805. if (sbi) {
  806. sb->s_fs_info = NULL;
  807. if (sbi->spool)
  808. hugepage_put_subpool(sbi->spool);
  809. kfree(sbi);
  810. }
  811. }
  812. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  813. {
  814. if (sbinfo->free_inodes >= 0) {
  815. spin_lock(&sbinfo->stat_lock);
  816. if (unlikely(!sbinfo->free_inodes)) {
  817. spin_unlock(&sbinfo->stat_lock);
  818. return 0;
  819. }
  820. sbinfo->free_inodes--;
  821. spin_unlock(&sbinfo->stat_lock);
  822. }
  823. return 1;
  824. }
  825. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  826. {
  827. if (sbinfo->free_inodes >= 0) {
  828. spin_lock(&sbinfo->stat_lock);
  829. sbinfo->free_inodes++;
  830. spin_unlock(&sbinfo->stat_lock);
  831. }
  832. }
  833. static struct kmem_cache *hugetlbfs_inode_cachep;
  834. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  835. {
  836. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  837. struct hugetlbfs_inode_info *p;
  838. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  839. return NULL;
  840. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  841. if (unlikely(!p)) {
  842. hugetlbfs_inc_free_inodes(sbinfo);
  843. return NULL;
  844. }
  845. /*
  846. * Any time after allocation, hugetlbfs_destroy_inode can be called
  847. * for the inode. mpol_free_shared_policy is unconditionally called
  848. * as part of hugetlbfs_destroy_inode. So, initialize policy here
  849. * in case of a quick call to destroy.
  850. *
  851. * Note that the policy is initialized even if we are creating a
  852. * private inode. This simplifies hugetlbfs_destroy_inode.
  853. */
  854. mpol_shared_policy_init(&p->policy, NULL);
  855. return &p->vfs_inode;
  856. }
  857. static void hugetlbfs_i_callback(struct rcu_head *head)
  858. {
  859. struct inode *inode = container_of(head, struct inode, i_rcu);
  860. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  861. }
  862. static void hugetlbfs_destroy_inode(struct inode *inode)
  863. {
  864. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  865. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  866. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  867. }
  868. static const struct address_space_operations hugetlbfs_aops = {
  869. .write_begin = hugetlbfs_write_begin,
  870. .write_end = hugetlbfs_write_end,
  871. .set_page_dirty = hugetlbfs_set_page_dirty,
  872. .migratepage = hugetlbfs_migrate_page,
  873. .error_remove_page = hugetlbfs_error_remove_page,
  874. };
  875. static void init_once(void *foo)
  876. {
  877. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  878. inode_init_once(&ei->vfs_inode);
  879. }
  880. const struct file_operations hugetlbfs_file_operations = {
  881. .read_iter = hugetlbfs_read_iter,
  882. .mmap = hugetlbfs_file_mmap,
  883. .fsync = noop_fsync,
  884. .get_unmapped_area = hugetlb_get_unmapped_area,
  885. .llseek = default_llseek,
  886. .fallocate = hugetlbfs_fallocate,
  887. };
  888. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  889. .create = hugetlbfs_create,
  890. .lookup = simple_lookup,
  891. .link = simple_link,
  892. .unlink = simple_unlink,
  893. .symlink = hugetlbfs_symlink,
  894. .mkdir = hugetlbfs_mkdir,
  895. .rmdir = simple_rmdir,
  896. .mknod = hugetlbfs_mknod,
  897. .rename = simple_rename,
  898. .setattr = hugetlbfs_setattr,
  899. };
  900. static const struct inode_operations hugetlbfs_inode_operations = {
  901. .setattr = hugetlbfs_setattr,
  902. };
  903. static const struct super_operations hugetlbfs_ops = {
  904. .alloc_inode = hugetlbfs_alloc_inode,
  905. .destroy_inode = hugetlbfs_destroy_inode,
  906. .evict_inode = hugetlbfs_evict_inode,
  907. .statfs = hugetlbfs_statfs,
  908. .put_super = hugetlbfs_put_super,
  909. .show_options = hugetlbfs_show_options,
  910. };
  911. enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
  912. /*
  913. * Convert size option passed from command line to number of huge pages
  914. * in the pool specified by hstate. Size option could be in bytes
  915. * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
  916. */
  917. static long
  918. hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
  919. enum hugetlbfs_size_type val_type)
  920. {
  921. if (val_type == NO_SIZE)
  922. return -1;
  923. if (val_type == SIZE_PERCENT) {
  924. size_opt <<= huge_page_shift(h);
  925. size_opt *= h->max_huge_pages;
  926. do_div(size_opt, 100);
  927. }
  928. size_opt >>= huge_page_shift(h);
  929. return size_opt;
  930. }
  931. static int
  932. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  933. {
  934. char *p, *rest;
  935. substring_t args[MAX_OPT_ARGS];
  936. int option;
  937. unsigned long long max_size_opt = 0, min_size_opt = 0;
  938. enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
  939. if (!options)
  940. return 0;
  941. while ((p = strsep(&options, ",")) != NULL) {
  942. int token;
  943. if (!*p)
  944. continue;
  945. token = match_token(p, tokens, args);
  946. switch (token) {
  947. case Opt_uid:
  948. if (match_int(&args[0], &option))
  949. goto bad_val;
  950. pconfig->uid = make_kuid(current_user_ns(), option);
  951. if (!uid_valid(pconfig->uid))
  952. goto bad_val;
  953. break;
  954. case Opt_gid:
  955. if (match_int(&args[0], &option))
  956. goto bad_val;
  957. pconfig->gid = make_kgid(current_user_ns(), option);
  958. if (!gid_valid(pconfig->gid))
  959. goto bad_val;
  960. break;
  961. case Opt_mode:
  962. if (match_octal(&args[0], &option))
  963. goto bad_val;
  964. pconfig->mode = option & 01777U;
  965. break;
  966. case Opt_size: {
  967. /* memparse() will accept a K/M/G without a digit */
  968. if (!isdigit(*args[0].from))
  969. goto bad_val;
  970. max_size_opt = memparse(args[0].from, &rest);
  971. max_val_type = SIZE_STD;
  972. if (*rest == '%')
  973. max_val_type = SIZE_PERCENT;
  974. break;
  975. }
  976. case Opt_nr_inodes:
  977. /* memparse() will accept a K/M/G without a digit */
  978. if (!isdigit(*args[0].from))
  979. goto bad_val;
  980. pconfig->nr_inodes = memparse(args[0].from, &rest);
  981. break;
  982. case Opt_pagesize: {
  983. unsigned long ps;
  984. ps = memparse(args[0].from, &rest);
  985. pconfig->hstate = size_to_hstate(ps);
  986. if (!pconfig->hstate) {
  987. pr_err("Unsupported page size %lu MB\n",
  988. ps >> 20);
  989. return -EINVAL;
  990. }
  991. break;
  992. }
  993. case Opt_min_size: {
  994. /* memparse() will accept a K/M/G without a digit */
  995. if (!isdigit(*args[0].from))
  996. goto bad_val;
  997. min_size_opt = memparse(args[0].from, &rest);
  998. min_val_type = SIZE_STD;
  999. if (*rest == '%')
  1000. min_val_type = SIZE_PERCENT;
  1001. break;
  1002. }
  1003. default:
  1004. pr_err("Bad mount option: \"%s\"\n", p);
  1005. return -EINVAL;
  1006. break;
  1007. }
  1008. }
  1009. /*
  1010. * Use huge page pool size (in hstate) to convert the size
  1011. * options to number of huge pages. If NO_SIZE, -1 is returned.
  1012. */
  1013. pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  1014. max_size_opt, max_val_type);
  1015. pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  1016. min_size_opt, min_val_type);
  1017. /*
  1018. * If max_size was specified, then min_size must be smaller
  1019. */
  1020. if (max_val_type > NO_SIZE &&
  1021. pconfig->min_hpages > pconfig->max_hpages) {
  1022. pr_err("minimum size can not be greater than maximum size\n");
  1023. return -EINVAL;
  1024. }
  1025. return 0;
  1026. bad_val:
  1027. pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
  1028. return -EINVAL;
  1029. }
  1030. static int
  1031. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  1032. {
  1033. int ret;
  1034. struct hugetlbfs_config config;
  1035. struct hugetlbfs_sb_info *sbinfo;
  1036. config.max_hpages = -1; /* No limit on size by default */
  1037. config.nr_inodes = -1; /* No limit on number of inodes by default */
  1038. config.uid = current_fsuid();
  1039. config.gid = current_fsgid();
  1040. config.mode = 0755;
  1041. config.hstate = &default_hstate;
  1042. config.min_hpages = -1; /* No default minimum size */
  1043. ret = hugetlbfs_parse_options(data, &config);
  1044. if (ret)
  1045. return ret;
  1046. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  1047. if (!sbinfo)
  1048. return -ENOMEM;
  1049. sb->s_fs_info = sbinfo;
  1050. sbinfo->hstate = config.hstate;
  1051. spin_lock_init(&sbinfo->stat_lock);
  1052. sbinfo->max_inodes = config.nr_inodes;
  1053. sbinfo->free_inodes = config.nr_inodes;
  1054. sbinfo->spool = NULL;
  1055. sbinfo->uid = config.uid;
  1056. sbinfo->gid = config.gid;
  1057. sbinfo->mode = config.mode;
  1058. /*
  1059. * Allocate and initialize subpool if maximum or minimum size is
  1060. * specified. Any needed reservations (for minimim size) are taken
  1061. * taken when the subpool is created.
  1062. */
  1063. if (config.max_hpages != -1 || config.min_hpages != -1) {
  1064. sbinfo->spool = hugepage_new_subpool(config.hstate,
  1065. config.max_hpages,
  1066. config.min_hpages);
  1067. if (!sbinfo->spool)
  1068. goto out_free;
  1069. }
  1070. sb->s_maxbytes = MAX_LFS_FILESIZE;
  1071. sb->s_blocksize = huge_page_size(config.hstate);
  1072. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  1073. sb->s_magic = HUGETLBFS_MAGIC;
  1074. sb->s_op = &hugetlbfs_ops;
  1075. sb->s_time_gran = 1;
  1076. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  1077. if (!sb->s_root)
  1078. goto out_free;
  1079. return 0;
  1080. out_free:
  1081. kfree(sbinfo->spool);
  1082. kfree(sbinfo);
  1083. return -ENOMEM;
  1084. }
  1085. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  1086. int flags, const char *dev_name, void *data)
  1087. {
  1088. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  1089. }
  1090. static struct file_system_type hugetlbfs_fs_type = {
  1091. .name = "hugetlbfs",
  1092. .mount = hugetlbfs_mount,
  1093. .kill_sb = kill_litter_super,
  1094. };
  1095. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  1096. static int can_do_hugetlb_shm(void)
  1097. {
  1098. kgid_t shm_group;
  1099. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  1100. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  1101. }
  1102. static int get_hstate_idx(int page_size_log)
  1103. {
  1104. struct hstate *h = hstate_sizelog(page_size_log);
  1105. if (!h)
  1106. return -1;
  1107. return h - hstates;
  1108. }
  1109. static const struct dentry_operations anon_ops = {
  1110. .d_dname = simple_dname
  1111. };
  1112. /*
  1113. * Note that size should be aligned to proper hugepage size in caller side,
  1114. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  1115. */
  1116. struct file *hugetlb_file_setup(const char *name, size_t size,
  1117. vm_flags_t acctflag, struct user_struct **user,
  1118. int creat_flags, int page_size_log)
  1119. {
  1120. struct file *file = ERR_PTR(-ENOMEM);
  1121. struct inode *inode;
  1122. struct path path;
  1123. struct super_block *sb;
  1124. struct qstr quick_string;
  1125. int hstate_idx;
  1126. hstate_idx = get_hstate_idx(page_size_log);
  1127. if (hstate_idx < 0)
  1128. return ERR_PTR(-ENODEV);
  1129. *user = NULL;
  1130. if (!hugetlbfs_vfsmount[hstate_idx])
  1131. return ERR_PTR(-ENOENT);
  1132. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  1133. *user = current_user();
  1134. if (user_shm_lock(size, *user)) {
  1135. task_lock(current);
  1136. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  1137. current->comm, current->pid);
  1138. task_unlock(current);
  1139. } else {
  1140. *user = NULL;
  1141. return ERR_PTR(-EPERM);
  1142. }
  1143. }
  1144. sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
  1145. quick_string.name = name;
  1146. quick_string.len = strlen(quick_string.name);
  1147. quick_string.hash = 0;
  1148. path.dentry = d_alloc_pseudo(sb, &quick_string);
  1149. if (!path.dentry)
  1150. goto out_shm_unlock;
  1151. d_set_d_op(path.dentry, &anon_ops);
  1152. path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
  1153. file = ERR_PTR(-ENOSPC);
  1154. inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
  1155. if (!inode)
  1156. goto out_dentry;
  1157. if (creat_flags == HUGETLB_SHMFS_INODE)
  1158. inode->i_flags |= S_PRIVATE;
  1159. file = ERR_PTR(-ENOMEM);
  1160. if (hugetlb_reserve_pages(inode, 0,
  1161. size >> huge_page_shift(hstate_inode(inode)), NULL,
  1162. acctflag))
  1163. goto out_inode;
  1164. d_instantiate(path.dentry, inode);
  1165. inode->i_size = size;
  1166. clear_nlink(inode);
  1167. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  1168. &hugetlbfs_file_operations);
  1169. if (IS_ERR(file))
  1170. goto out_dentry; /* inode is already attached */
  1171. return file;
  1172. out_inode:
  1173. iput(inode);
  1174. out_dentry:
  1175. path_put(&path);
  1176. out_shm_unlock:
  1177. if (*user) {
  1178. user_shm_unlock(size, *user);
  1179. *user = NULL;
  1180. }
  1181. return file;
  1182. }
  1183. static int __init init_hugetlbfs_fs(void)
  1184. {
  1185. struct hstate *h;
  1186. int error;
  1187. int i;
  1188. if (!hugepages_supported()) {
  1189. pr_info("disabling because there are no supported hugepage sizes\n");
  1190. return -ENOTSUPP;
  1191. }
  1192. error = -ENOMEM;
  1193. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  1194. sizeof(struct hugetlbfs_inode_info),
  1195. 0, SLAB_ACCOUNT, init_once);
  1196. if (hugetlbfs_inode_cachep == NULL)
  1197. goto out2;
  1198. error = register_filesystem(&hugetlbfs_fs_type);
  1199. if (error)
  1200. goto out;
  1201. i = 0;
  1202. for_each_hstate(h) {
  1203. char buf[50];
  1204. unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
  1205. snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
  1206. hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
  1207. buf);
  1208. if (IS_ERR(hugetlbfs_vfsmount[i])) {
  1209. pr_err("Cannot mount internal hugetlbfs for "
  1210. "page size %uK", ps_kb);
  1211. error = PTR_ERR(hugetlbfs_vfsmount[i]);
  1212. hugetlbfs_vfsmount[i] = NULL;
  1213. }
  1214. i++;
  1215. }
  1216. /* Non default hstates are optional */
  1217. if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
  1218. return 0;
  1219. out:
  1220. kmem_cache_destroy(hugetlbfs_inode_cachep);
  1221. out2:
  1222. return error;
  1223. }
  1224. fs_initcall(init_hugetlbfs_fs)