inode.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. * License: GPL
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/sched/signal.h> /* remove ASAP */
  13. #include <linux/falloc.h>
  14. #include <linux/fs.h>
  15. #include <linux/mount.h>
  16. #include <linux/file.h>
  17. #include <linux/kernel.h>
  18. #include <linux/writeback.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/init.h>
  22. #include <linux/string.h>
  23. #include <linux/capability.h>
  24. #include <linux/ctype.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/hugetlb.h>
  27. #include <linux/pagevec.h>
  28. #include <linux/parser.h>
  29. #include <linux/mman.h>
  30. #include <linux/slab.h>
  31. #include <linux/dnotify.h>
  32. #include <linux/statfs.h>
  33. #include <linux/security.h>
  34. #include <linux/magic.h>
  35. #include <linux/migrate.h>
  36. #include <linux/uio.h>
  37. #include <linux/uaccess.h>
  38. static const struct super_operations hugetlbfs_ops;
  39. static const struct address_space_operations hugetlbfs_aops;
  40. const struct file_operations hugetlbfs_file_operations;
  41. static const struct inode_operations hugetlbfs_dir_inode_operations;
  42. static const struct inode_operations hugetlbfs_inode_operations;
  43. struct hugetlbfs_config {
  44. struct hstate *hstate;
  45. long max_hpages;
  46. long nr_inodes;
  47. long min_hpages;
  48. kuid_t uid;
  49. kgid_t gid;
  50. umode_t mode;
  51. };
  52. struct hugetlbfs_inode_info {
  53. struct shared_policy policy;
  54. struct inode vfs_inode;
  55. };
  56. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  57. {
  58. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  59. }
  60. int sysctl_hugetlb_shm_group;
  61. enum {
  62. Opt_size, Opt_nr_inodes,
  63. Opt_mode, Opt_uid, Opt_gid,
  64. Opt_pagesize, Opt_min_size,
  65. Opt_err,
  66. };
  67. static const match_table_t tokens = {
  68. {Opt_size, "size=%s"},
  69. {Opt_nr_inodes, "nr_inodes=%s"},
  70. {Opt_mode, "mode=%o"},
  71. {Opt_uid, "uid=%u"},
  72. {Opt_gid, "gid=%u"},
  73. {Opt_pagesize, "pagesize=%s"},
  74. {Opt_min_size, "min_size=%s"},
  75. {Opt_err, NULL},
  76. };
  77. #ifdef CONFIG_NUMA
  78. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  79. struct inode *inode, pgoff_t index)
  80. {
  81. vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
  82. index);
  83. }
  84. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  85. {
  86. mpol_cond_put(vma->vm_policy);
  87. }
  88. #else
  89. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  90. struct inode *inode, pgoff_t index)
  91. {
  92. }
  93. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  94. {
  95. }
  96. #endif
  97. static void huge_pagevec_release(struct pagevec *pvec)
  98. {
  99. int i;
  100. for (i = 0; i < pagevec_count(pvec); ++i)
  101. put_page(pvec->pages[i]);
  102. pagevec_reinit(pvec);
  103. }
  104. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  105. {
  106. struct inode *inode = file_inode(file);
  107. loff_t len, vma_len;
  108. int ret;
  109. struct hstate *h = hstate_file(file);
  110. /*
  111. * vma address alignment (but not the pgoff alignment) has
  112. * already been checked by prepare_hugepage_range. If you add
  113. * any error returns here, do so after setting VM_HUGETLB, so
  114. * is_vm_hugetlb_page tests below unmap_region go the right
  115. * way when do_mmap_pgoff unwinds (may be important on powerpc
  116. * and ia64).
  117. */
  118. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
  119. vma->vm_ops = &hugetlb_vm_ops;
  120. /*
  121. * Offset passed to mmap (before page shift) could have been
  122. * negative when represented as a (l)off_t.
  123. */
  124. if (((loff_t)vma->vm_pgoff << PAGE_SHIFT) < 0)
  125. return -EINVAL;
  126. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  127. return -EINVAL;
  128. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  129. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  130. /* check for overflow */
  131. if (len < vma_len)
  132. return -EINVAL;
  133. inode_lock(inode);
  134. file_accessed(file);
  135. ret = -ENOMEM;
  136. if (hugetlb_reserve_pages(inode,
  137. vma->vm_pgoff >> huge_page_order(h),
  138. len >> huge_page_shift(h), vma,
  139. vma->vm_flags))
  140. goto out;
  141. ret = 0;
  142. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  143. i_size_write(inode, len);
  144. out:
  145. inode_unlock(inode);
  146. return ret;
  147. }
  148. /*
  149. * Called under down_write(mmap_sem).
  150. */
  151. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  152. static unsigned long
  153. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  154. unsigned long len, unsigned long pgoff, unsigned long flags)
  155. {
  156. struct mm_struct *mm = current->mm;
  157. struct vm_area_struct *vma;
  158. struct hstate *h = hstate_file(file);
  159. struct vm_unmapped_area_info info;
  160. if (len & ~huge_page_mask(h))
  161. return -EINVAL;
  162. if (len > TASK_SIZE)
  163. return -ENOMEM;
  164. if (flags & MAP_FIXED) {
  165. if (prepare_hugepage_range(file, addr, len))
  166. return -EINVAL;
  167. return addr;
  168. }
  169. if (addr) {
  170. addr = ALIGN(addr, huge_page_size(h));
  171. vma = find_vma(mm, addr);
  172. if (TASK_SIZE - len >= addr &&
  173. (!vma || addr + len <= vm_start_gap(vma)))
  174. return addr;
  175. }
  176. info.flags = 0;
  177. info.length = len;
  178. info.low_limit = TASK_UNMAPPED_BASE;
  179. info.high_limit = TASK_SIZE;
  180. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  181. info.align_offset = 0;
  182. return vm_unmapped_area(&info);
  183. }
  184. #endif
  185. static size_t
  186. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  187. struct iov_iter *to, unsigned long size)
  188. {
  189. size_t copied = 0;
  190. int i, chunksize;
  191. /* Find which 4k chunk and offset with in that chunk */
  192. i = offset >> PAGE_SHIFT;
  193. offset = offset & ~PAGE_MASK;
  194. while (size) {
  195. size_t n;
  196. chunksize = PAGE_SIZE;
  197. if (offset)
  198. chunksize -= offset;
  199. if (chunksize > size)
  200. chunksize = size;
  201. n = copy_page_to_iter(&page[i], offset, chunksize, to);
  202. copied += n;
  203. if (n != chunksize)
  204. return copied;
  205. offset = 0;
  206. size -= chunksize;
  207. i++;
  208. }
  209. return copied;
  210. }
  211. /*
  212. * Support for read() - Find the page attached to f_mapping and copy out the
  213. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  214. * since it has PAGE_SIZE assumptions.
  215. */
  216. static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
  217. {
  218. struct file *file = iocb->ki_filp;
  219. struct hstate *h = hstate_file(file);
  220. struct address_space *mapping = file->f_mapping;
  221. struct inode *inode = mapping->host;
  222. unsigned long index = iocb->ki_pos >> huge_page_shift(h);
  223. unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
  224. unsigned long end_index;
  225. loff_t isize;
  226. ssize_t retval = 0;
  227. while (iov_iter_count(to)) {
  228. struct page *page;
  229. size_t nr, copied;
  230. /* nr is the maximum number of bytes to copy from this page */
  231. nr = huge_page_size(h);
  232. isize = i_size_read(inode);
  233. if (!isize)
  234. break;
  235. end_index = (isize - 1) >> huge_page_shift(h);
  236. if (index > end_index)
  237. break;
  238. if (index == end_index) {
  239. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  240. if (nr <= offset)
  241. break;
  242. }
  243. nr = nr - offset;
  244. /* Find the page */
  245. page = find_lock_page(mapping, index);
  246. if (unlikely(page == NULL)) {
  247. /*
  248. * We have a HOLE, zero out the user-buffer for the
  249. * length of the hole or request.
  250. */
  251. copied = iov_iter_zero(nr, to);
  252. } else {
  253. unlock_page(page);
  254. /*
  255. * We have the page, copy it to user space buffer.
  256. */
  257. copied = hugetlbfs_read_actor(page, offset, to, nr);
  258. put_page(page);
  259. }
  260. offset += copied;
  261. retval += copied;
  262. if (copied != nr && iov_iter_count(to)) {
  263. if (!retval)
  264. retval = -EFAULT;
  265. break;
  266. }
  267. index += offset >> huge_page_shift(h);
  268. offset &= ~huge_page_mask(h);
  269. }
  270. iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
  271. return retval;
  272. }
  273. static int hugetlbfs_write_begin(struct file *file,
  274. struct address_space *mapping,
  275. loff_t pos, unsigned len, unsigned flags,
  276. struct page **pagep, void **fsdata)
  277. {
  278. return -EINVAL;
  279. }
  280. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  281. loff_t pos, unsigned len, unsigned copied,
  282. struct page *page, void *fsdata)
  283. {
  284. BUG();
  285. return -EINVAL;
  286. }
  287. static void remove_huge_page(struct page *page)
  288. {
  289. ClearPageDirty(page);
  290. ClearPageUptodate(page);
  291. delete_from_page_cache(page);
  292. }
  293. static void
  294. hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
  295. {
  296. struct vm_area_struct *vma;
  297. /*
  298. * end == 0 indicates that the entire range after
  299. * start should be unmapped.
  300. */
  301. vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
  302. unsigned long v_offset;
  303. unsigned long v_end;
  304. /*
  305. * Can the expression below overflow on 32-bit arches?
  306. * No, because the interval tree returns us only those vmas
  307. * which overlap the truncated area starting at pgoff,
  308. * and no vma on a 32-bit arch can span beyond the 4GB.
  309. */
  310. if (vma->vm_pgoff < start)
  311. v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
  312. else
  313. v_offset = 0;
  314. if (!end)
  315. v_end = vma->vm_end;
  316. else {
  317. v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
  318. + vma->vm_start;
  319. if (v_end > vma->vm_end)
  320. v_end = vma->vm_end;
  321. }
  322. unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
  323. NULL);
  324. }
  325. }
  326. /*
  327. * remove_inode_hugepages handles two distinct cases: truncation and hole
  328. * punch. There are subtle differences in operation for each case.
  329. *
  330. * truncation is indicated by end of range being LLONG_MAX
  331. * In this case, we first scan the range and release found pages.
  332. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
  333. * maps and global counts. Page faults can not race with truncation
  334. * in this routine. hugetlb_no_page() prevents page faults in the
  335. * truncated range. It checks i_size before allocation, and again after
  336. * with the page table lock for the page held. The same lock must be
  337. * acquired to unmap a page.
  338. * hole punch is indicated if end is not LLONG_MAX
  339. * In the hole punch case we scan the range and release found pages.
  340. * Only when releasing a page is the associated region/reserv map
  341. * deleted. The region/reserv map for ranges without associated
  342. * pages are not modified. Page faults can race with hole punch.
  343. * This is indicated if we find a mapped page.
  344. * Note: If the passed end of range value is beyond the end of file, but
  345. * not LLONG_MAX this routine still performs a hole punch operation.
  346. */
  347. static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
  348. loff_t lend)
  349. {
  350. struct hstate *h = hstate_inode(inode);
  351. struct address_space *mapping = &inode->i_data;
  352. const pgoff_t start = lstart >> huge_page_shift(h);
  353. const pgoff_t end = lend >> huge_page_shift(h);
  354. struct vm_area_struct pseudo_vma;
  355. struct pagevec pvec;
  356. pgoff_t next;
  357. int i, freed = 0;
  358. long lookup_nr = PAGEVEC_SIZE;
  359. bool truncate_op = (lend == LLONG_MAX);
  360. memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
  361. pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  362. pagevec_init(&pvec, 0);
  363. next = start;
  364. while (next < end) {
  365. /*
  366. * Don't grab more pages than the number left in the range.
  367. */
  368. if (end - next < lookup_nr)
  369. lookup_nr = end - next;
  370. /*
  371. * When no more pages are found, we are done.
  372. */
  373. if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
  374. break;
  375. for (i = 0; i < pagevec_count(&pvec); ++i) {
  376. struct page *page = pvec.pages[i];
  377. u32 hash;
  378. /*
  379. * The page (index) could be beyond end. This is
  380. * only possible in the punch hole case as end is
  381. * max page offset in the truncate case.
  382. */
  383. next = page->index;
  384. if (next >= end)
  385. break;
  386. hash = hugetlb_fault_mutex_hash(h, current->mm,
  387. &pseudo_vma,
  388. mapping, next, 0);
  389. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  390. /*
  391. * If page is mapped, it was faulted in after being
  392. * unmapped in caller. Unmap (again) now after taking
  393. * the fault mutex. The mutex will prevent faults
  394. * until we finish removing the page.
  395. *
  396. * This race can only happen in the hole punch case.
  397. * Getting here in a truncate operation is a bug.
  398. */
  399. if (unlikely(page_mapped(page))) {
  400. BUG_ON(truncate_op);
  401. i_mmap_lock_write(mapping);
  402. hugetlb_vmdelete_list(&mapping->i_mmap,
  403. next * pages_per_huge_page(h),
  404. (next + 1) * pages_per_huge_page(h));
  405. i_mmap_unlock_write(mapping);
  406. }
  407. lock_page(page);
  408. /*
  409. * We must free the huge page and remove from page
  410. * cache (remove_huge_page) BEFORE removing the
  411. * region/reserve map (hugetlb_unreserve_pages). In
  412. * rare out of memory conditions, removal of the
  413. * region/reserve map could fail. Correspondingly,
  414. * the subpool and global reserve usage count can need
  415. * to be adjusted.
  416. */
  417. VM_BUG_ON(PagePrivate(page));
  418. remove_huge_page(page);
  419. freed++;
  420. if (!truncate_op) {
  421. if (unlikely(hugetlb_unreserve_pages(inode,
  422. next, next + 1, 1)))
  423. hugetlb_fix_reserve_counts(inode);
  424. }
  425. unlock_page(page);
  426. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  427. }
  428. ++next;
  429. huge_pagevec_release(&pvec);
  430. cond_resched();
  431. }
  432. if (truncate_op)
  433. (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
  434. }
  435. static void hugetlbfs_evict_inode(struct inode *inode)
  436. {
  437. struct resv_map *resv_map;
  438. remove_inode_hugepages(inode, 0, LLONG_MAX);
  439. resv_map = (struct resv_map *)inode->i_mapping->private_data;
  440. /* root inode doesn't have the resv_map, so we should check it */
  441. if (resv_map)
  442. resv_map_release(&resv_map->refs);
  443. clear_inode(inode);
  444. }
  445. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  446. {
  447. pgoff_t pgoff;
  448. struct address_space *mapping = inode->i_mapping;
  449. struct hstate *h = hstate_inode(inode);
  450. BUG_ON(offset & ~huge_page_mask(h));
  451. pgoff = offset >> PAGE_SHIFT;
  452. i_size_write(inode, offset);
  453. i_mmap_lock_write(mapping);
  454. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  455. hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
  456. i_mmap_unlock_write(mapping);
  457. remove_inode_hugepages(inode, offset, LLONG_MAX);
  458. return 0;
  459. }
  460. static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  461. {
  462. struct hstate *h = hstate_inode(inode);
  463. loff_t hpage_size = huge_page_size(h);
  464. loff_t hole_start, hole_end;
  465. /*
  466. * For hole punch round up the beginning offset of the hole and
  467. * round down the end.
  468. */
  469. hole_start = round_up(offset, hpage_size);
  470. hole_end = round_down(offset + len, hpage_size);
  471. if (hole_end > hole_start) {
  472. struct address_space *mapping = inode->i_mapping;
  473. inode_lock(inode);
  474. i_mmap_lock_write(mapping);
  475. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  476. hugetlb_vmdelete_list(&mapping->i_mmap,
  477. hole_start >> PAGE_SHIFT,
  478. hole_end >> PAGE_SHIFT);
  479. i_mmap_unlock_write(mapping);
  480. remove_inode_hugepages(inode, hole_start, hole_end);
  481. inode_unlock(inode);
  482. }
  483. return 0;
  484. }
  485. static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
  486. loff_t len)
  487. {
  488. struct inode *inode = file_inode(file);
  489. struct address_space *mapping = inode->i_mapping;
  490. struct hstate *h = hstate_inode(inode);
  491. struct vm_area_struct pseudo_vma;
  492. struct mm_struct *mm = current->mm;
  493. loff_t hpage_size = huge_page_size(h);
  494. unsigned long hpage_shift = huge_page_shift(h);
  495. pgoff_t start, index, end;
  496. int error;
  497. u32 hash;
  498. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  499. return -EOPNOTSUPP;
  500. if (mode & FALLOC_FL_PUNCH_HOLE)
  501. return hugetlbfs_punch_hole(inode, offset, len);
  502. /*
  503. * Default preallocate case.
  504. * For this range, start is rounded down and end is rounded up
  505. * as well as being converted to page offsets.
  506. */
  507. start = offset >> hpage_shift;
  508. end = (offset + len + hpage_size - 1) >> hpage_shift;
  509. inode_lock(inode);
  510. /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
  511. error = inode_newsize_ok(inode, offset + len);
  512. if (error)
  513. goto out;
  514. /*
  515. * Initialize a pseudo vma as this is required by the huge page
  516. * allocation routines. If NUMA is configured, use page index
  517. * as input to create an allocation policy.
  518. */
  519. memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
  520. pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  521. pseudo_vma.vm_file = file;
  522. for (index = start; index < end; index++) {
  523. /*
  524. * This is supposed to be the vaddr where the page is being
  525. * faulted in, but we have no vaddr here.
  526. */
  527. struct page *page;
  528. unsigned long addr;
  529. int avoid_reserve = 0;
  530. cond_resched();
  531. /*
  532. * fallocate(2) manpage permits EINTR; we may have been
  533. * interrupted because we are using up too much memory.
  534. */
  535. if (signal_pending(current)) {
  536. error = -EINTR;
  537. break;
  538. }
  539. /* Set numa allocation policy based on index */
  540. hugetlb_set_vma_policy(&pseudo_vma, inode, index);
  541. /* addr is the offset within the file (zero based) */
  542. addr = index * hpage_size;
  543. /* mutex taken here, fault path and hole punch */
  544. hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
  545. index, addr);
  546. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  547. /* See if already present in mapping to avoid alloc/free */
  548. page = find_get_page(mapping, index);
  549. if (page) {
  550. put_page(page);
  551. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  552. hugetlb_drop_vma_policy(&pseudo_vma);
  553. continue;
  554. }
  555. /* Allocate page and add to page cache */
  556. page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
  557. hugetlb_drop_vma_policy(&pseudo_vma);
  558. if (IS_ERR(page)) {
  559. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  560. error = PTR_ERR(page);
  561. goto out;
  562. }
  563. clear_huge_page(page, addr, pages_per_huge_page(h));
  564. __SetPageUptodate(page);
  565. error = huge_add_to_page_cache(page, mapping, index);
  566. if (unlikely(error)) {
  567. put_page(page);
  568. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  569. goto out;
  570. }
  571. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  572. /*
  573. * page_put due to reference from alloc_huge_page()
  574. * unlock_page because locked by add_to_page_cache()
  575. */
  576. put_page(page);
  577. unlock_page(page);
  578. }
  579. if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
  580. i_size_write(inode, offset + len);
  581. inode->i_ctime = current_time(inode);
  582. out:
  583. inode_unlock(inode);
  584. return error;
  585. }
  586. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  587. {
  588. struct inode *inode = d_inode(dentry);
  589. struct hstate *h = hstate_inode(inode);
  590. int error;
  591. unsigned int ia_valid = attr->ia_valid;
  592. BUG_ON(!inode);
  593. error = setattr_prepare(dentry, attr);
  594. if (error)
  595. return error;
  596. if (ia_valid & ATTR_SIZE) {
  597. error = -EINVAL;
  598. if (attr->ia_size & ~huge_page_mask(h))
  599. return -EINVAL;
  600. error = hugetlb_vmtruncate(inode, attr->ia_size);
  601. if (error)
  602. return error;
  603. }
  604. setattr_copy(inode, attr);
  605. mark_inode_dirty(inode);
  606. return 0;
  607. }
  608. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  609. struct hugetlbfs_config *config)
  610. {
  611. struct inode *inode;
  612. inode = new_inode(sb);
  613. if (inode) {
  614. inode->i_ino = get_next_ino();
  615. inode->i_mode = S_IFDIR | config->mode;
  616. inode->i_uid = config->uid;
  617. inode->i_gid = config->gid;
  618. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  619. inode->i_op = &hugetlbfs_dir_inode_operations;
  620. inode->i_fop = &simple_dir_operations;
  621. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  622. inc_nlink(inode);
  623. lockdep_annotate_inode_mutex_key(inode);
  624. }
  625. return inode;
  626. }
  627. /*
  628. * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
  629. * be taken from reclaim -- unlike regular filesystems. This needs an
  630. * annotation because huge_pmd_share() does an allocation under hugetlb's
  631. * i_mmap_rwsem.
  632. */
  633. static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
  634. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  635. struct inode *dir,
  636. umode_t mode, dev_t dev)
  637. {
  638. struct inode *inode;
  639. struct resv_map *resv_map;
  640. resv_map = resv_map_alloc();
  641. if (!resv_map)
  642. return NULL;
  643. inode = new_inode(sb);
  644. if (inode) {
  645. inode->i_ino = get_next_ino();
  646. inode_init_owner(inode, dir, mode);
  647. lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
  648. &hugetlbfs_i_mmap_rwsem_key);
  649. inode->i_mapping->a_ops = &hugetlbfs_aops;
  650. inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
  651. inode->i_mapping->private_data = resv_map;
  652. switch (mode & S_IFMT) {
  653. default:
  654. init_special_inode(inode, mode, dev);
  655. break;
  656. case S_IFREG:
  657. inode->i_op = &hugetlbfs_inode_operations;
  658. inode->i_fop = &hugetlbfs_file_operations;
  659. break;
  660. case S_IFDIR:
  661. inode->i_op = &hugetlbfs_dir_inode_operations;
  662. inode->i_fop = &simple_dir_operations;
  663. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  664. inc_nlink(inode);
  665. break;
  666. case S_IFLNK:
  667. inode->i_op = &page_symlink_inode_operations;
  668. inode_nohighmem(inode);
  669. break;
  670. }
  671. lockdep_annotate_inode_mutex_key(inode);
  672. } else
  673. kref_put(&resv_map->refs, resv_map_release);
  674. return inode;
  675. }
  676. /*
  677. * File creation. Allocate an inode, and we're done..
  678. */
  679. static int hugetlbfs_mknod(struct inode *dir,
  680. struct dentry *dentry, umode_t mode, dev_t dev)
  681. {
  682. struct inode *inode;
  683. int error = -ENOSPC;
  684. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  685. if (inode) {
  686. dir->i_ctime = dir->i_mtime = current_time(dir);
  687. d_instantiate(dentry, inode);
  688. dget(dentry); /* Extra count - pin the dentry in core */
  689. error = 0;
  690. }
  691. return error;
  692. }
  693. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  694. {
  695. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  696. if (!retval)
  697. inc_nlink(dir);
  698. return retval;
  699. }
  700. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  701. {
  702. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  703. }
  704. static int hugetlbfs_symlink(struct inode *dir,
  705. struct dentry *dentry, const char *symname)
  706. {
  707. struct inode *inode;
  708. int error = -ENOSPC;
  709. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  710. if (inode) {
  711. int l = strlen(symname)+1;
  712. error = page_symlink(inode, symname, l);
  713. if (!error) {
  714. d_instantiate(dentry, inode);
  715. dget(dentry);
  716. } else
  717. iput(inode);
  718. }
  719. dir->i_ctime = dir->i_mtime = current_time(dir);
  720. return error;
  721. }
  722. /*
  723. * mark the head page dirty
  724. */
  725. static int hugetlbfs_set_page_dirty(struct page *page)
  726. {
  727. struct page *head = compound_head(page);
  728. SetPageDirty(head);
  729. return 0;
  730. }
  731. static int hugetlbfs_migrate_page(struct address_space *mapping,
  732. struct page *newpage, struct page *page,
  733. enum migrate_mode mode)
  734. {
  735. int rc;
  736. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  737. if (rc != MIGRATEPAGE_SUCCESS)
  738. return rc;
  739. migrate_page_copy(newpage, page);
  740. return MIGRATEPAGE_SUCCESS;
  741. }
  742. static int hugetlbfs_error_remove_page(struct address_space *mapping,
  743. struct page *page)
  744. {
  745. struct inode *inode = mapping->host;
  746. remove_huge_page(page);
  747. hugetlb_fix_reserve_counts(inode);
  748. return 0;
  749. }
  750. /*
  751. * Display the mount options in /proc/mounts.
  752. */
  753. static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
  754. {
  755. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
  756. struct hugepage_subpool *spool = sbinfo->spool;
  757. unsigned long hpage_size = huge_page_size(sbinfo->hstate);
  758. unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
  759. char mod;
  760. if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
  761. seq_printf(m, ",uid=%u",
  762. from_kuid_munged(&init_user_ns, sbinfo->uid));
  763. if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
  764. seq_printf(m, ",gid=%u",
  765. from_kgid_munged(&init_user_ns, sbinfo->gid));
  766. if (sbinfo->mode != 0755)
  767. seq_printf(m, ",mode=%o", sbinfo->mode);
  768. if (sbinfo->max_inodes != -1)
  769. seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
  770. hpage_size /= 1024;
  771. mod = 'K';
  772. if (hpage_size >= 1024) {
  773. hpage_size /= 1024;
  774. mod = 'M';
  775. }
  776. seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
  777. if (spool) {
  778. if (spool->max_hpages != -1)
  779. seq_printf(m, ",size=%llu",
  780. (unsigned long long)spool->max_hpages << hpage_shift);
  781. if (spool->min_hpages != -1)
  782. seq_printf(m, ",min_size=%llu",
  783. (unsigned long long)spool->min_hpages << hpage_shift);
  784. }
  785. return 0;
  786. }
  787. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  788. {
  789. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  790. struct hstate *h = hstate_inode(d_inode(dentry));
  791. buf->f_type = HUGETLBFS_MAGIC;
  792. buf->f_bsize = huge_page_size(h);
  793. if (sbinfo) {
  794. spin_lock(&sbinfo->stat_lock);
  795. /* If no limits set, just report 0 for max/free/used
  796. * blocks, like simple_statfs() */
  797. if (sbinfo->spool) {
  798. long free_pages;
  799. spin_lock(&sbinfo->spool->lock);
  800. buf->f_blocks = sbinfo->spool->max_hpages;
  801. free_pages = sbinfo->spool->max_hpages
  802. - sbinfo->spool->used_hpages;
  803. buf->f_bavail = buf->f_bfree = free_pages;
  804. spin_unlock(&sbinfo->spool->lock);
  805. buf->f_files = sbinfo->max_inodes;
  806. buf->f_ffree = sbinfo->free_inodes;
  807. }
  808. spin_unlock(&sbinfo->stat_lock);
  809. }
  810. buf->f_namelen = NAME_MAX;
  811. return 0;
  812. }
  813. static void hugetlbfs_put_super(struct super_block *sb)
  814. {
  815. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  816. if (sbi) {
  817. sb->s_fs_info = NULL;
  818. if (sbi->spool)
  819. hugepage_put_subpool(sbi->spool);
  820. kfree(sbi);
  821. }
  822. }
  823. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  824. {
  825. if (sbinfo->free_inodes >= 0) {
  826. spin_lock(&sbinfo->stat_lock);
  827. if (unlikely(!sbinfo->free_inodes)) {
  828. spin_unlock(&sbinfo->stat_lock);
  829. return 0;
  830. }
  831. sbinfo->free_inodes--;
  832. spin_unlock(&sbinfo->stat_lock);
  833. }
  834. return 1;
  835. }
  836. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  837. {
  838. if (sbinfo->free_inodes >= 0) {
  839. spin_lock(&sbinfo->stat_lock);
  840. sbinfo->free_inodes++;
  841. spin_unlock(&sbinfo->stat_lock);
  842. }
  843. }
  844. static struct kmem_cache *hugetlbfs_inode_cachep;
  845. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  846. {
  847. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  848. struct hugetlbfs_inode_info *p;
  849. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  850. return NULL;
  851. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  852. if (unlikely(!p)) {
  853. hugetlbfs_inc_free_inodes(sbinfo);
  854. return NULL;
  855. }
  856. /*
  857. * Any time after allocation, hugetlbfs_destroy_inode can be called
  858. * for the inode. mpol_free_shared_policy is unconditionally called
  859. * as part of hugetlbfs_destroy_inode. So, initialize policy here
  860. * in case of a quick call to destroy.
  861. *
  862. * Note that the policy is initialized even if we are creating a
  863. * private inode. This simplifies hugetlbfs_destroy_inode.
  864. */
  865. mpol_shared_policy_init(&p->policy, NULL);
  866. return &p->vfs_inode;
  867. }
  868. static void hugetlbfs_i_callback(struct rcu_head *head)
  869. {
  870. struct inode *inode = container_of(head, struct inode, i_rcu);
  871. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  872. }
  873. static void hugetlbfs_destroy_inode(struct inode *inode)
  874. {
  875. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  876. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  877. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  878. }
  879. static const struct address_space_operations hugetlbfs_aops = {
  880. .write_begin = hugetlbfs_write_begin,
  881. .write_end = hugetlbfs_write_end,
  882. .set_page_dirty = hugetlbfs_set_page_dirty,
  883. .migratepage = hugetlbfs_migrate_page,
  884. .error_remove_page = hugetlbfs_error_remove_page,
  885. };
  886. static void init_once(void *foo)
  887. {
  888. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  889. inode_init_once(&ei->vfs_inode);
  890. }
  891. const struct file_operations hugetlbfs_file_operations = {
  892. .read_iter = hugetlbfs_read_iter,
  893. .mmap = hugetlbfs_file_mmap,
  894. .fsync = noop_fsync,
  895. .get_unmapped_area = hugetlb_get_unmapped_area,
  896. .llseek = default_llseek,
  897. .fallocate = hugetlbfs_fallocate,
  898. };
  899. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  900. .create = hugetlbfs_create,
  901. .lookup = simple_lookup,
  902. .link = simple_link,
  903. .unlink = simple_unlink,
  904. .symlink = hugetlbfs_symlink,
  905. .mkdir = hugetlbfs_mkdir,
  906. .rmdir = simple_rmdir,
  907. .mknod = hugetlbfs_mknod,
  908. .rename = simple_rename,
  909. .setattr = hugetlbfs_setattr,
  910. };
  911. static const struct inode_operations hugetlbfs_inode_operations = {
  912. .setattr = hugetlbfs_setattr,
  913. };
  914. static const struct super_operations hugetlbfs_ops = {
  915. .alloc_inode = hugetlbfs_alloc_inode,
  916. .destroy_inode = hugetlbfs_destroy_inode,
  917. .evict_inode = hugetlbfs_evict_inode,
  918. .statfs = hugetlbfs_statfs,
  919. .put_super = hugetlbfs_put_super,
  920. .show_options = hugetlbfs_show_options,
  921. };
  922. enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
  923. /*
  924. * Convert size option passed from command line to number of huge pages
  925. * in the pool specified by hstate. Size option could be in bytes
  926. * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
  927. */
  928. static long
  929. hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
  930. enum hugetlbfs_size_type val_type)
  931. {
  932. if (val_type == NO_SIZE)
  933. return -1;
  934. if (val_type == SIZE_PERCENT) {
  935. size_opt <<= huge_page_shift(h);
  936. size_opt *= h->max_huge_pages;
  937. do_div(size_opt, 100);
  938. }
  939. size_opt >>= huge_page_shift(h);
  940. return size_opt;
  941. }
  942. static int
  943. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  944. {
  945. char *p, *rest;
  946. substring_t args[MAX_OPT_ARGS];
  947. int option;
  948. unsigned long long max_size_opt = 0, min_size_opt = 0;
  949. enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
  950. if (!options)
  951. return 0;
  952. while ((p = strsep(&options, ",")) != NULL) {
  953. int token;
  954. if (!*p)
  955. continue;
  956. token = match_token(p, tokens, args);
  957. switch (token) {
  958. case Opt_uid:
  959. if (match_int(&args[0], &option))
  960. goto bad_val;
  961. pconfig->uid = make_kuid(current_user_ns(), option);
  962. if (!uid_valid(pconfig->uid))
  963. goto bad_val;
  964. break;
  965. case Opt_gid:
  966. if (match_int(&args[0], &option))
  967. goto bad_val;
  968. pconfig->gid = make_kgid(current_user_ns(), option);
  969. if (!gid_valid(pconfig->gid))
  970. goto bad_val;
  971. break;
  972. case Opt_mode:
  973. if (match_octal(&args[0], &option))
  974. goto bad_val;
  975. pconfig->mode = option & 01777U;
  976. break;
  977. case Opt_size: {
  978. /* memparse() will accept a K/M/G without a digit */
  979. if (!isdigit(*args[0].from))
  980. goto bad_val;
  981. max_size_opt = memparse(args[0].from, &rest);
  982. max_val_type = SIZE_STD;
  983. if (*rest == '%')
  984. max_val_type = SIZE_PERCENT;
  985. break;
  986. }
  987. case Opt_nr_inodes:
  988. /* memparse() will accept a K/M/G without a digit */
  989. if (!isdigit(*args[0].from))
  990. goto bad_val;
  991. pconfig->nr_inodes = memparse(args[0].from, &rest);
  992. break;
  993. case Opt_pagesize: {
  994. unsigned long ps;
  995. ps = memparse(args[0].from, &rest);
  996. pconfig->hstate = size_to_hstate(ps);
  997. if (!pconfig->hstate) {
  998. pr_err("Unsupported page size %lu MB\n",
  999. ps >> 20);
  1000. return -EINVAL;
  1001. }
  1002. break;
  1003. }
  1004. case Opt_min_size: {
  1005. /* memparse() will accept a K/M/G without a digit */
  1006. if (!isdigit(*args[0].from))
  1007. goto bad_val;
  1008. min_size_opt = memparse(args[0].from, &rest);
  1009. min_val_type = SIZE_STD;
  1010. if (*rest == '%')
  1011. min_val_type = SIZE_PERCENT;
  1012. break;
  1013. }
  1014. default:
  1015. pr_err("Bad mount option: \"%s\"\n", p);
  1016. return -EINVAL;
  1017. break;
  1018. }
  1019. }
  1020. /*
  1021. * Use huge page pool size (in hstate) to convert the size
  1022. * options to number of huge pages. If NO_SIZE, -1 is returned.
  1023. */
  1024. pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  1025. max_size_opt, max_val_type);
  1026. pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  1027. min_size_opt, min_val_type);
  1028. /*
  1029. * If max_size was specified, then min_size must be smaller
  1030. */
  1031. if (max_val_type > NO_SIZE &&
  1032. pconfig->min_hpages > pconfig->max_hpages) {
  1033. pr_err("minimum size can not be greater than maximum size\n");
  1034. return -EINVAL;
  1035. }
  1036. return 0;
  1037. bad_val:
  1038. pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
  1039. return -EINVAL;
  1040. }
  1041. static int
  1042. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  1043. {
  1044. int ret;
  1045. struct hugetlbfs_config config;
  1046. struct hugetlbfs_sb_info *sbinfo;
  1047. config.max_hpages = -1; /* No limit on size by default */
  1048. config.nr_inodes = -1; /* No limit on number of inodes by default */
  1049. config.uid = current_fsuid();
  1050. config.gid = current_fsgid();
  1051. config.mode = 0755;
  1052. config.hstate = &default_hstate;
  1053. config.min_hpages = -1; /* No default minimum size */
  1054. ret = hugetlbfs_parse_options(data, &config);
  1055. if (ret)
  1056. return ret;
  1057. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  1058. if (!sbinfo)
  1059. return -ENOMEM;
  1060. sb->s_fs_info = sbinfo;
  1061. sbinfo->hstate = config.hstate;
  1062. spin_lock_init(&sbinfo->stat_lock);
  1063. sbinfo->max_inodes = config.nr_inodes;
  1064. sbinfo->free_inodes = config.nr_inodes;
  1065. sbinfo->spool = NULL;
  1066. sbinfo->uid = config.uid;
  1067. sbinfo->gid = config.gid;
  1068. sbinfo->mode = config.mode;
  1069. /*
  1070. * Allocate and initialize subpool if maximum or minimum size is
  1071. * specified. Any needed reservations (for minimim size) are taken
  1072. * taken when the subpool is created.
  1073. */
  1074. if (config.max_hpages != -1 || config.min_hpages != -1) {
  1075. sbinfo->spool = hugepage_new_subpool(config.hstate,
  1076. config.max_hpages,
  1077. config.min_hpages);
  1078. if (!sbinfo->spool)
  1079. goto out_free;
  1080. }
  1081. sb->s_maxbytes = MAX_LFS_FILESIZE;
  1082. sb->s_blocksize = huge_page_size(config.hstate);
  1083. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  1084. sb->s_magic = HUGETLBFS_MAGIC;
  1085. sb->s_op = &hugetlbfs_ops;
  1086. sb->s_time_gran = 1;
  1087. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  1088. if (!sb->s_root)
  1089. goto out_free;
  1090. return 0;
  1091. out_free:
  1092. kfree(sbinfo->spool);
  1093. kfree(sbinfo);
  1094. return -ENOMEM;
  1095. }
  1096. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  1097. int flags, const char *dev_name, void *data)
  1098. {
  1099. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  1100. }
  1101. static struct file_system_type hugetlbfs_fs_type = {
  1102. .name = "hugetlbfs",
  1103. .mount = hugetlbfs_mount,
  1104. .kill_sb = kill_litter_super,
  1105. };
  1106. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  1107. static int can_do_hugetlb_shm(void)
  1108. {
  1109. kgid_t shm_group;
  1110. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  1111. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  1112. }
  1113. static int get_hstate_idx(int page_size_log)
  1114. {
  1115. struct hstate *h = hstate_sizelog(page_size_log);
  1116. if (!h)
  1117. return -1;
  1118. return h - hstates;
  1119. }
  1120. static const struct dentry_operations anon_ops = {
  1121. .d_dname = simple_dname
  1122. };
  1123. /*
  1124. * Note that size should be aligned to proper hugepage size in caller side,
  1125. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  1126. */
  1127. struct file *hugetlb_file_setup(const char *name, size_t size,
  1128. vm_flags_t acctflag, struct user_struct **user,
  1129. int creat_flags, int page_size_log)
  1130. {
  1131. struct file *file = ERR_PTR(-ENOMEM);
  1132. struct inode *inode;
  1133. struct path path;
  1134. struct super_block *sb;
  1135. struct qstr quick_string;
  1136. int hstate_idx;
  1137. hstate_idx = get_hstate_idx(page_size_log);
  1138. if (hstate_idx < 0)
  1139. return ERR_PTR(-ENODEV);
  1140. *user = NULL;
  1141. if (!hugetlbfs_vfsmount[hstate_idx])
  1142. return ERR_PTR(-ENOENT);
  1143. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  1144. *user = current_user();
  1145. if (user_shm_lock(size, *user)) {
  1146. task_lock(current);
  1147. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  1148. current->comm, current->pid);
  1149. task_unlock(current);
  1150. } else {
  1151. *user = NULL;
  1152. return ERR_PTR(-EPERM);
  1153. }
  1154. }
  1155. sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
  1156. quick_string.name = name;
  1157. quick_string.len = strlen(quick_string.name);
  1158. quick_string.hash = 0;
  1159. path.dentry = d_alloc_pseudo(sb, &quick_string);
  1160. if (!path.dentry)
  1161. goto out_shm_unlock;
  1162. d_set_d_op(path.dentry, &anon_ops);
  1163. path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
  1164. file = ERR_PTR(-ENOSPC);
  1165. inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
  1166. if (!inode)
  1167. goto out_dentry;
  1168. if (creat_flags == HUGETLB_SHMFS_INODE)
  1169. inode->i_flags |= S_PRIVATE;
  1170. file = ERR_PTR(-ENOMEM);
  1171. if (hugetlb_reserve_pages(inode, 0,
  1172. size >> huge_page_shift(hstate_inode(inode)), NULL,
  1173. acctflag))
  1174. goto out_inode;
  1175. d_instantiate(path.dentry, inode);
  1176. inode->i_size = size;
  1177. clear_nlink(inode);
  1178. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  1179. &hugetlbfs_file_operations);
  1180. if (IS_ERR(file))
  1181. goto out_dentry; /* inode is already attached */
  1182. return file;
  1183. out_inode:
  1184. iput(inode);
  1185. out_dentry:
  1186. path_put(&path);
  1187. out_shm_unlock:
  1188. if (*user) {
  1189. user_shm_unlock(size, *user);
  1190. *user = NULL;
  1191. }
  1192. return file;
  1193. }
  1194. static int __init init_hugetlbfs_fs(void)
  1195. {
  1196. struct hstate *h;
  1197. int error;
  1198. int i;
  1199. if (!hugepages_supported()) {
  1200. pr_info("disabling because there are no supported hugepage sizes\n");
  1201. return -ENOTSUPP;
  1202. }
  1203. error = -ENOMEM;
  1204. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  1205. sizeof(struct hugetlbfs_inode_info),
  1206. 0, SLAB_ACCOUNT, init_once);
  1207. if (hugetlbfs_inode_cachep == NULL)
  1208. goto out2;
  1209. error = register_filesystem(&hugetlbfs_fs_type);
  1210. if (error)
  1211. goto out;
  1212. i = 0;
  1213. for_each_hstate(h) {
  1214. char buf[50];
  1215. unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
  1216. snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
  1217. hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
  1218. buf);
  1219. if (IS_ERR(hugetlbfs_vfsmount[i])) {
  1220. pr_err("Cannot mount internal hugetlbfs for "
  1221. "page size %uK", ps_kb);
  1222. error = PTR_ERR(hugetlbfs_vfsmount[i]);
  1223. hugetlbfs_vfsmount[i] = NULL;
  1224. }
  1225. i++;
  1226. }
  1227. /* Non default hstates are optional */
  1228. if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
  1229. return 0;
  1230. out:
  1231. kmem_cache_destroy(hugetlbfs_inode_cachep);
  1232. out2:
  1233. return error;
  1234. }
  1235. fs_initcall(init_hugetlbfs_fs)