inode.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/module.h>
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/sched.h> /* remove ASAP */
  13. #include <linux/falloc.h>
  14. #include <linux/fs.h>
  15. #include <linux/mount.h>
  16. #include <linux/file.h>
  17. #include <linux/kernel.h>
  18. #include <linux/writeback.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/highmem.h>
  21. #include <linux/init.h>
  22. #include <linux/string.h>
  23. #include <linux/capability.h>
  24. #include <linux/ctype.h>
  25. #include <linux/backing-dev.h>
  26. #include <linux/hugetlb.h>
  27. #include <linux/pagevec.h>
  28. #include <linux/parser.h>
  29. #include <linux/mman.h>
  30. #include <linux/slab.h>
  31. #include <linux/dnotify.h>
  32. #include <linux/statfs.h>
  33. #include <linux/security.h>
  34. #include <linux/magic.h>
  35. #include <linux/migrate.h>
  36. #include <linux/uio.h>
  37. #include <asm/uaccess.h>
  38. static const struct super_operations hugetlbfs_ops;
  39. static const struct address_space_operations hugetlbfs_aops;
  40. const struct file_operations hugetlbfs_file_operations;
  41. static const struct inode_operations hugetlbfs_dir_inode_operations;
  42. static const struct inode_operations hugetlbfs_inode_operations;
  43. struct hugetlbfs_config {
  44. kuid_t uid;
  45. kgid_t gid;
  46. umode_t mode;
  47. long max_hpages;
  48. long nr_inodes;
  49. struct hstate *hstate;
  50. long min_hpages;
  51. };
  52. struct hugetlbfs_inode_info {
  53. struct shared_policy policy;
  54. struct inode vfs_inode;
  55. };
  56. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  57. {
  58. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  59. }
  60. int sysctl_hugetlb_shm_group;
  61. enum {
  62. Opt_size, Opt_nr_inodes,
  63. Opt_mode, Opt_uid, Opt_gid,
  64. Opt_pagesize, Opt_min_size,
  65. Opt_err,
  66. };
  67. static const match_table_t tokens = {
  68. {Opt_size, "size=%s"},
  69. {Opt_nr_inodes, "nr_inodes=%s"},
  70. {Opt_mode, "mode=%o"},
  71. {Opt_uid, "uid=%u"},
  72. {Opt_gid, "gid=%u"},
  73. {Opt_pagesize, "pagesize=%s"},
  74. {Opt_min_size, "min_size=%s"},
  75. {Opt_err, NULL},
  76. };
  77. #ifdef CONFIG_NUMA
  78. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  79. struct inode *inode, pgoff_t index)
  80. {
  81. vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
  82. index);
  83. }
  84. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  85. {
  86. mpol_cond_put(vma->vm_policy);
  87. }
  88. #else
  89. static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
  90. struct inode *inode, pgoff_t index)
  91. {
  92. }
  93. static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
  94. {
  95. }
  96. #endif
  97. static void huge_pagevec_release(struct pagevec *pvec)
  98. {
  99. int i;
  100. for (i = 0; i < pagevec_count(pvec); ++i)
  101. put_page(pvec->pages[i]);
  102. pagevec_reinit(pvec);
  103. }
  104. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  105. {
  106. struct inode *inode = file_inode(file);
  107. loff_t len, vma_len;
  108. int ret;
  109. struct hstate *h = hstate_file(file);
  110. /*
  111. * vma address alignment (but not the pgoff alignment) has
  112. * already been checked by prepare_hugepage_range. If you add
  113. * any error returns here, do so after setting VM_HUGETLB, so
  114. * is_vm_hugetlb_page tests below unmap_region go the right
  115. * way when do_mmap_pgoff unwinds (may be important on powerpc
  116. * and ia64).
  117. */
  118. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
  119. vma->vm_ops = &hugetlb_vm_ops;
  120. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  121. return -EINVAL;
  122. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  123. mutex_lock(&inode->i_mutex);
  124. file_accessed(file);
  125. ret = -ENOMEM;
  126. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  127. if (hugetlb_reserve_pages(inode,
  128. vma->vm_pgoff >> huge_page_order(h),
  129. len >> huge_page_shift(h), vma,
  130. vma->vm_flags))
  131. goto out;
  132. ret = 0;
  133. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  134. inode->i_size = len;
  135. out:
  136. mutex_unlock(&inode->i_mutex);
  137. return ret;
  138. }
  139. /*
  140. * Called under down_write(mmap_sem).
  141. */
  142. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  143. static unsigned long
  144. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  145. unsigned long len, unsigned long pgoff, unsigned long flags)
  146. {
  147. struct mm_struct *mm = current->mm;
  148. struct vm_area_struct *vma;
  149. struct hstate *h = hstate_file(file);
  150. struct vm_unmapped_area_info info;
  151. if (len & ~huge_page_mask(h))
  152. return -EINVAL;
  153. if (len > TASK_SIZE)
  154. return -ENOMEM;
  155. if (flags & MAP_FIXED) {
  156. if (prepare_hugepage_range(file, addr, len))
  157. return -EINVAL;
  158. return addr;
  159. }
  160. if (addr) {
  161. addr = ALIGN(addr, huge_page_size(h));
  162. vma = find_vma(mm, addr);
  163. if (TASK_SIZE - len >= addr &&
  164. (!vma || addr + len <= vma->vm_start))
  165. return addr;
  166. }
  167. info.flags = 0;
  168. info.length = len;
  169. info.low_limit = TASK_UNMAPPED_BASE;
  170. info.high_limit = TASK_SIZE;
  171. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  172. info.align_offset = 0;
  173. return vm_unmapped_area(&info);
  174. }
  175. #endif
  176. static size_t
  177. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  178. struct iov_iter *to, unsigned long size)
  179. {
  180. size_t copied = 0;
  181. int i, chunksize;
  182. /* Find which 4k chunk and offset with in that chunk */
  183. i = offset >> PAGE_CACHE_SHIFT;
  184. offset = offset & ~PAGE_CACHE_MASK;
  185. while (size) {
  186. size_t n;
  187. chunksize = PAGE_CACHE_SIZE;
  188. if (offset)
  189. chunksize -= offset;
  190. if (chunksize > size)
  191. chunksize = size;
  192. n = copy_page_to_iter(&page[i], offset, chunksize, to);
  193. copied += n;
  194. if (n != chunksize)
  195. return copied;
  196. offset = 0;
  197. size -= chunksize;
  198. i++;
  199. }
  200. return copied;
  201. }
  202. /*
  203. * Support for read() - Find the page attached to f_mapping and copy out the
  204. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  205. * since it has PAGE_CACHE_SIZE assumptions.
  206. */
  207. static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
  208. {
  209. struct file *file = iocb->ki_filp;
  210. struct hstate *h = hstate_file(file);
  211. struct address_space *mapping = file->f_mapping;
  212. struct inode *inode = mapping->host;
  213. unsigned long index = iocb->ki_pos >> huge_page_shift(h);
  214. unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
  215. unsigned long end_index;
  216. loff_t isize;
  217. ssize_t retval = 0;
  218. while (iov_iter_count(to)) {
  219. struct page *page;
  220. size_t nr, copied;
  221. /* nr is the maximum number of bytes to copy from this page */
  222. nr = huge_page_size(h);
  223. isize = i_size_read(inode);
  224. if (!isize)
  225. break;
  226. end_index = (isize - 1) >> huge_page_shift(h);
  227. if (index > end_index)
  228. break;
  229. if (index == end_index) {
  230. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  231. if (nr <= offset)
  232. break;
  233. }
  234. nr = nr - offset;
  235. /* Find the page */
  236. page = find_lock_page(mapping, index);
  237. if (unlikely(page == NULL)) {
  238. /*
  239. * We have a HOLE, zero out the user-buffer for the
  240. * length of the hole or request.
  241. */
  242. copied = iov_iter_zero(nr, to);
  243. } else {
  244. unlock_page(page);
  245. /*
  246. * We have the page, copy it to user space buffer.
  247. */
  248. copied = hugetlbfs_read_actor(page, offset, to, nr);
  249. page_cache_release(page);
  250. }
  251. offset += copied;
  252. retval += copied;
  253. if (copied != nr && iov_iter_count(to)) {
  254. if (!retval)
  255. retval = -EFAULT;
  256. break;
  257. }
  258. index += offset >> huge_page_shift(h);
  259. offset &= ~huge_page_mask(h);
  260. }
  261. iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
  262. return retval;
  263. }
  264. static int hugetlbfs_write_begin(struct file *file,
  265. struct address_space *mapping,
  266. loff_t pos, unsigned len, unsigned flags,
  267. struct page **pagep, void **fsdata)
  268. {
  269. return -EINVAL;
  270. }
  271. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  272. loff_t pos, unsigned len, unsigned copied,
  273. struct page *page, void *fsdata)
  274. {
  275. BUG();
  276. return -EINVAL;
  277. }
  278. static void remove_huge_page(struct page *page)
  279. {
  280. ClearPageDirty(page);
  281. ClearPageUptodate(page);
  282. delete_from_page_cache(page);
  283. }
  284. /*
  285. * remove_inode_hugepages handles two distinct cases: truncation and hole
  286. * punch. There are subtle differences in operation for each case.
  287. * truncation is indicated by end of range being LLONG_MAX
  288. * In this case, we first scan the range and release found pages.
  289. * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
  290. * maps and global counts. Page faults can not race with truncation
  291. * in this routine. hugetlb_no_page() prevents page faults in the
  292. * truncated range. It checks i_size before allocation, and again after
  293. * with the page table lock for the page held. The same lock must be
  294. * acquired to unmap a page.
  295. * hole punch is indicated if end is not LLONG_MAX
  296. * In the hole punch case we scan the range and release found pages.
  297. * Only when releasing a page is the associated region/reserv map
  298. * deleted. The region/reserv map for ranges without associated
  299. * pages are not modified. Page faults can race with hole punch.
  300. * This is indicated if we find a mapped page.
  301. * Note: If the passed end of range value is beyond the end of file, but
  302. * not LLONG_MAX this routine still performs a hole punch operation.
  303. */
  304. static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
  305. loff_t lend)
  306. {
  307. struct hstate *h = hstate_inode(inode);
  308. struct address_space *mapping = &inode->i_data;
  309. const pgoff_t start = lstart >> huge_page_shift(h);
  310. const pgoff_t end = lend >> huge_page_shift(h);
  311. struct vm_area_struct pseudo_vma;
  312. struct pagevec pvec;
  313. pgoff_t next;
  314. int i, freed = 0;
  315. long lookup_nr = PAGEVEC_SIZE;
  316. bool truncate_op = (lend == LLONG_MAX);
  317. memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
  318. pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  319. pagevec_init(&pvec, 0);
  320. next = start;
  321. while (next < end) {
  322. /*
  323. * Don't grab more pages than the number left in the range.
  324. */
  325. if (end - next < lookup_nr)
  326. lookup_nr = end - next;
  327. /*
  328. * When no more pages are found, we are done.
  329. */
  330. if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
  331. break;
  332. for (i = 0; i < pagevec_count(&pvec); ++i) {
  333. struct page *page = pvec.pages[i];
  334. u32 hash;
  335. /*
  336. * The page (index) could be beyond end. This is
  337. * only possible in the punch hole case as end is
  338. * max page offset in the truncate case.
  339. */
  340. next = page->index;
  341. if (next >= end)
  342. break;
  343. hash = hugetlb_fault_mutex_hash(h, current->mm,
  344. &pseudo_vma,
  345. mapping, next, 0);
  346. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  347. lock_page(page);
  348. if (likely(!page_mapped(page))) {
  349. bool rsv_on_error = !PagePrivate(page);
  350. /*
  351. * We must free the huge page and remove
  352. * from page cache (remove_huge_page) BEFORE
  353. * removing the region/reserve map
  354. * (hugetlb_unreserve_pages). In rare out
  355. * of memory conditions, removal of the
  356. * region/reserve map could fail. Before
  357. * free'ing the page, note PagePrivate which
  358. * is used in case of error.
  359. */
  360. remove_huge_page(page);
  361. freed++;
  362. if (!truncate_op) {
  363. if (unlikely(hugetlb_unreserve_pages(
  364. inode, next,
  365. next + 1, 1)))
  366. hugetlb_fix_reserve_counts(
  367. inode, rsv_on_error);
  368. }
  369. } else {
  370. /*
  371. * If page is mapped, it was faulted in after
  372. * being unmapped. It indicates a race between
  373. * hole punch and page fault. Do nothing in
  374. * this case. Getting here in a truncate
  375. * operation is a bug.
  376. */
  377. BUG_ON(truncate_op);
  378. }
  379. unlock_page(page);
  380. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  381. }
  382. ++next;
  383. huge_pagevec_release(&pvec);
  384. cond_resched();
  385. }
  386. if (truncate_op)
  387. (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
  388. }
  389. static void hugetlbfs_evict_inode(struct inode *inode)
  390. {
  391. struct resv_map *resv_map;
  392. remove_inode_hugepages(inode, 0, LLONG_MAX);
  393. resv_map = (struct resv_map *)inode->i_mapping->private_data;
  394. /* root inode doesn't have the resv_map, so we should check it */
  395. if (resv_map)
  396. resv_map_release(&resv_map->refs);
  397. clear_inode(inode);
  398. }
  399. static inline void
  400. hugetlb_vmdelete_list(struct rb_root *root, pgoff_t start, pgoff_t end)
  401. {
  402. struct vm_area_struct *vma;
  403. /*
  404. * end == 0 indicates that the entire range after
  405. * start should be unmapped.
  406. */
  407. vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
  408. unsigned long v_offset;
  409. /*
  410. * Can the expression below overflow on 32-bit arches?
  411. * No, because the interval tree returns us only those vmas
  412. * which overlap the truncated area starting at pgoff,
  413. * and no vma on a 32-bit arch can span beyond the 4GB.
  414. */
  415. if (vma->vm_pgoff < start)
  416. v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
  417. else
  418. v_offset = 0;
  419. if (end) {
  420. end = ((end - start) << PAGE_SHIFT) +
  421. vma->vm_start + v_offset;
  422. if (end > vma->vm_end)
  423. end = vma->vm_end;
  424. } else
  425. end = vma->vm_end;
  426. unmap_hugepage_range(vma, vma->vm_start + v_offset, end, NULL);
  427. }
  428. }
  429. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  430. {
  431. pgoff_t pgoff;
  432. struct address_space *mapping = inode->i_mapping;
  433. struct hstate *h = hstate_inode(inode);
  434. BUG_ON(offset & ~huge_page_mask(h));
  435. pgoff = offset >> PAGE_SHIFT;
  436. i_size_write(inode, offset);
  437. i_mmap_lock_write(mapping);
  438. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  439. hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
  440. i_mmap_unlock_write(mapping);
  441. remove_inode_hugepages(inode, offset, LLONG_MAX);
  442. return 0;
  443. }
  444. static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
  445. {
  446. struct hstate *h = hstate_inode(inode);
  447. loff_t hpage_size = huge_page_size(h);
  448. loff_t hole_start, hole_end;
  449. /*
  450. * For hole punch round up the beginning offset of the hole and
  451. * round down the end.
  452. */
  453. hole_start = round_up(offset, hpage_size);
  454. hole_end = round_down(offset + len, hpage_size);
  455. if (hole_end > hole_start) {
  456. struct address_space *mapping = inode->i_mapping;
  457. mutex_lock(&inode->i_mutex);
  458. i_mmap_lock_write(mapping);
  459. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  460. hugetlb_vmdelete_list(&mapping->i_mmap,
  461. hole_start >> PAGE_SHIFT,
  462. hole_end >> PAGE_SHIFT);
  463. i_mmap_unlock_write(mapping);
  464. remove_inode_hugepages(inode, hole_start, hole_end);
  465. mutex_unlock(&inode->i_mutex);
  466. }
  467. return 0;
  468. }
  469. static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
  470. loff_t len)
  471. {
  472. struct inode *inode = file_inode(file);
  473. struct address_space *mapping = inode->i_mapping;
  474. struct hstate *h = hstate_inode(inode);
  475. struct vm_area_struct pseudo_vma;
  476. struct mm_struct *mm = current->mm;
  477. loff_t hpage_size = huge_page_size(h);
  478. unsigned long hpage_shift = huge_page_shift(h);
  479. pgoff_t start, index, end;
  480. int error;
  481. u32 hash;
  482. if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
  483. return -EOPNOTSUPP;
  484. if (mode & FALLOC_FL_PUNCH_HOLE)
  485. return hugetlbfs_punch_hole(inode, offset, len);
  486. /*
  487. * Default preallocate case.
  488. * For this range, start is rounded down and end is rounded up
  489. * as well as being converted to page offsets.
  490. */
  491. start = offset >> hpage_shift;
  492. end = (offset + len + hpage_size - 1) >> hpage_shift;
  493. mutex_lock(&inode->i_mutex);
  494. /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
  495. error = inode_newsize_ok(inode, offset + len);
  496. if (error)
  497. goto out;
  498. /*
  499. * Initialize a pseudo vma as this is required by the huge page
  500. * allocation routines. If NUMA is configured, use page index
  501. * as input to create an allocation policy.
  502. */
  503. memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
  504. pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
  505. pseudo_vma.vm_file = file;
  506. for (index = start; index < end; index++) {
  507. /*
  508. * This is supposed to be the vaddr where the page is being
  509. * faulted in, but we have no vaddr here.
  510. */
  511. struct page *page;
  512. unsigned long addr;
  513. int avoid_reserve = 0;
  514. cond_resched();
  515. /*
  516. * fallocate(2) manpage permits EINTR; we may have been
  517. * interrupted because we are using up too much memory.
  518. */
  519. if (signal_pending(current)) {
  520. error = -EINTR;
  521. break;
  522. }
  523. /* Set numa allocation policy based on index */
  524. hugetlb_set_vma_policy(&pseudo_vma, inode, index);
  525. /* addr is the offset within the file (zero based) */
  526. addr = index * hpage_size;
  527. /* mutex taken here, fault path and hole punch */
  528. hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
  529. index, addr);
  530. mutex_lock(&hugetlb_fault_mutex_table[hash]);
  531. /* See if already present in mapping to avoid alloc/free */
  532. page = find_get_page(mapping, index);
  533. if (page) {
  534. put_page(page);
  535. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  536. hugetlb_drop_vma_policy(&pseudo_vma);
  537. continue;
  538. }
  539. /* Allocate page and add to page cache */
  540. page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
  541. hugetlb_drop_vma_policy(&pseudo_vma);
  542. if (IS_ERR(page)) {
  543. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  544. error = PTR_ERR(page);
  545. goto out;
  546. }
  547. clear_huge_page(page, addr, pages_per_huge_page(h));
  548. __SetPageUptodate(page);
  549. error = huge_add_to_page_cache(page, mapping, index);
  550. if (unlikely(error)) {
  551. put_page(page);
  552. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  553. goto out;
  554. }
  555. mutex_unlock(&hugetlb_fault_mutex_table[hash]);
  556. /*
  557. * page_put due to reference from alloc_huge_page()
  558. * unlock_page because locked by add_to_page_cache()
  559. */
  560. put_page(page);
  561. unlock_page(page);
  562. }
  563. if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
  564. i_size_write(inode, offset + len);
  565. inode->i_ctime = CURRENT_TIME;
  566. out:
  567. mutex_unlock(&inode->i_mutex);
  568. return error;
  569. }
  570. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  571. {
  572. struct inode *inode = d_inode(dentry);
  573. struct hstate *h = hstate_inode(inode);
  574. int error;
  575. unsigned int ia_valid = attr->ia_valid;
  576. BUG_ON(!inode);
  577. error = inode_change_ok(inode, attr);
  578. if (error)
  579. return error;
  580. if (ia_valid & ATTR_SIZE) {
  581. error = -EINVAL;
  582. if (attr->ia_size & ~huge_page_mask(h))
  583. return -EINVAL;
  584. error = hugetlb_vmtruncate(inode, attr->ia_size);
  585. if (error)
  586. return error;
  587. }
  588. setattr_copy(inode, attr);
  589. mark_inode_dirty(inode);
  590. return 0;
  591. }
  592. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  593. struct hugetlbfs_config *config)
  594. {
  595. struct inode *inode;
  596. inode = new_inode(sb);
  597. if (inode) {
  598. struct hugetlbfs_inode_info *info;
  599. inode->i_ino = get_next_ino();
  600. inode->i_mode = S_IFDIR | config->mode;
  601. inode->i_uid = config->uid;
  602. inode->i_gid = config->gid;
  603. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  604. info = HUGETLBFS_I(inode);
  605. mpol_shared_policy_init(&info->policy, NULL);
  606. inode->i_op = &hugetlbfs_dir_inode_operations;
  607. inode->i_fop = &simple_dir_operations;
  608. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  609. inc_nlink(inode);
  610. lockdep_annotate_inode_mutex_key(inode);
  611. }
  612. return inode;
  613. }
  614. /*
  615. * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
  616. * be taken from reclaim -- unlike regular filesystems. This needs an
  617. * annotation because huge_pmd_share() does an allocation under
  618. * i_mmap_rwsem.
  619. */
  620. static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
  621. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  622. struct inode *dir,
  623. umode_t mode, dev_t dev)
  624. {
  625. struct inode *inode;
  626. struct resv_map *resv_map;
  627. resv_map = resv_map_alloc();
  628. if (!resv_map)
  629. return NULL;
  630. inode = new_inode(sb);
  631. if (inode) {
  632. struct hugetlbfs_inode_info *info;
  633. inode->i_ino = get_next_ino();
  634. inode_init_owner(inode, dir, mode);
  635. lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
  636. &hugetlbfs_i_mmap_rwsem_key);
  637. inode->i_mapping->a_ops = &hugetlbfs_aops;
  638. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  639. inode->i_mapping->private_data = resv_map;
  640. info = HUGETLBFS_I(inode);
  641. /*
  642. * The policy is initialized here even if we are creating a
  643. * private inode because initialization simply creates an
  644. * an empty rb tree and calls spin_lock_init(), later when we
  645. * call mpol_free_shared_policy() it will just return because
  646. * the rb tree will still be empty.
  647. */
  648. mpol_shared_policy_init(&info->policy, NULL);
  649. switch (mode & S_IFMT) {
  650. default:
  651. init_special_inode(inode, mode, dev);
  652. break;
  653. case S_IFREG:
  654. inode->i_op = &hugetlbfs_inode_operations;
  655. inode->i_fop = &hugetlbfs_file_operations;
  656. break;
  657. case S_IFDIR:
  658. inode->i_op = &hugetlbfs_dir_inode_operations;
  659. inode->i_fop = &simple_dir_operations;
  660. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  661. inc_nlink(inode);
  662. break;
  663. case S_IFLNK:
  664. inode->i_op = &page_symlink_inode_operations;
  665. inode_nohighmem(inode);
  666. break;
  667. }
  668. lockdep_annotate_inode_mutex_key(inode);
  669. } else
  670. kref_put(&resv_map->refs, resv_map_release);
  671. return inode;
  672. }
  673. /*
  674. * File creation. Allocate an inode, and we're done..
  675. */
  676. static int hugetlbfs_mknod(struct inode *dir,
  677. struct dentry *dentry, umode_t mode, dev_t dev)
  678. {
  679. struct inode *inode;
  680. int error = -ENOSPC;
  681. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  682. if (inode) {
  683. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  684. d_instantiate(dentry, inode);
  685. dget(dentry); /* Extra count - pin the dentry in core */
  686. error = 0;
  687. }
  688. return error;
  689. }
  690. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  691. {
  692. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  693. if (!retval)
  694. inc_nlink(dir);
  695. return retval;
  696. }
  697. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  698. {
  699. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  700. }
  701. static int hugetlbfs_symlink(struct inode *dir,
  702. struct dentry *dentry, const char *symname)
  703. {
  704. struct inode *inode;
  705. int error = -ENOSPC;
  706. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  707. if (inode) {
  708. int l = strlen(symname)+1;
  709. error = page_symlink(inode, symname, l);
  710. if (!error) {
  711. d_instantiate(dentry, inode);
  712. dget(dentry);
  713. } else
  714. iput(inode);
  715. }
  716. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  717. return error;
  718. }
  719. /*
  720. * mark the head page dirty
  721. */
  722. static int hugetlbfs_set_page_dirty(struct page *page)
  723. {
  724. struct page *head = compound_head(page);
  725. SetPageDirty(head);
  726. return 0;
  727. }
  728. static int hugetlbfs_migrate_page(struct address_space *mapping,
  729. struct page *newpage, struct page *page,
  730. enum migrate_mode mode)
  731. {
  732. int rc;
  733. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  734. if (rc != MIGRATEPAGE_SUCCESS)
  735. return rc;
  736. migrate_page_copy(newpage, page);
  737. return MIGRATEPAGE_SUCCESS;
  738. }
  739. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  740. {
  741. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  742. struct hstate *h = hstate_inode(d_inode(dentry));
  743. buf->f_type = HUGETLBFS_MAGIC;
  744. buf->f_bsize = huge_page_size(h);
  745. if (sbinfo) {
  746. spin_lock(&sbinfo->stat_lock);
  747. /* If no limits set, just report 0 for max/free/used
  748. * blocks, like simple_statfs() */
  749. if (sbinfo->spool) {
  750. long free_pages;
  751. spin_lock(&sbinfo->spool->lock);
  752. buf->f_blocks = sbinfo->spool->max_hpages;
  753. free_pages = sbinfo->spool->max_hpages
  754. - sbinfo->spool->used_hpages;
  755. buf->f_bavail = buf->f_bfree = free_pages;
  756. spin_unlock(&sbinfo->spool->lock);
  757. buf->f_files = sbinfo->max_inodes;
  758. buf->f_ffree = sbinfo->free_inodes;
  759. }
  760. spin_unlock(&sbinfo->stat_lock);
  761. }
  762. buf->f_namelen = NAME_MAX;
  763. return 0;
  764. }
  765. static void hugetlbfs_put_super(struct super_block *sb)
  766. {
  767. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  768. if (sbi) {
  769. sb->s_fs_info = NULL;
  770. if (sbi->spool)
  771. hugepage_put_subpool(sbi->spool);
  772. kfree(sbi);
  773. }
  774. }
  775. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  776. {
  777. if (sbinfo->free_inodes >= 0) {
  778. spin_lock(&sbinfo->stat_lock);
  779. if (unlikely(!sbinfo->free_inodes)) {
  780. spin_unlock(&sbinfo->stat_lock);
  781. return 0;
  782. }
  783. sbinfo->free_inodes--;
  784. spin_unlock(&sbinfo->stat_lock);
  785. }
  786. return 1;
  787. }
  788. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  789. {
  790. if (sbinfo->free_inodes >= 0) {
  791. spin_lock(&sbinfo->stat_lock);
  792. sbinfo->free_inodes++;
  793. spin_unlock(&sbinfo->stat_lock);
  794. }
  795. }
  796. static struct kmem_cache *hugetlbfs_inode_cachep;
  797. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  798. {
  799. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  800. struct hugetlbfs_inode_info *p;
  801. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  802. return NULL;
  803. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  804. if (unlikely(!p)) {
  805. hugetlbfs_inc_free_inodes(sbinfo);
  806. return NULL;
  807. }
  808. return &p->vfs_inode;
  809. }
  810. static void hugetlbfs_i_callback(struct rcu_head *head)
  811. {
  812. struct inode *inode = container_of(head, struct inode, i_rcu);
  813. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  814. }
  815. static void hugetlbfs_destroy_inode(struct inode *inode)
  816. {
  817. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  818. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  819. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  820. }
  821. static const struct address_space_operations hugetlbfs_aops = {
  822. .write_begin = hugetlbfs_write_begin,
  823. .write_end = hugetlbfs_write_end,
  824. .set_page_dirty = hugetlbfs_set_page_dirty,
  825. .migratepage = hugetlbfs_migrate_page,
  826. };
  827. static void init_once(void *foo)
  828. {
  829. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  830. inode_init_once(&ei->vfs_inode);
  831. }
  832. const struct file_operations hugetlbfs_file_operations = {
  833. .read_iter = hugetlbfs_read_iter,
  834. .mmap = hugetlbfs_file_mmap,
  835. .fsync = noop_fsync,
  836. .get_unmapped_area = hugetlb_get_unmapped_area,
  837. .llseek = default_llseek,
  838. .fallocate = hugetlbfs_fallocate,
  839. };
  840. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  841. .create = hugetlbfs_create,
  842. .lookup = simple_lookup,
  843. .link = simple_link,
  844. .unlink = simple_unlink,
  845. .symlink = hugetlbfs_symlink,
  846. .mkdir = hugetlbfs_mkdir,
  847. .rmdir = simple_rmdir,
  848. .mknod = hugetlbfs_mknod,
  849. .rename = simple_rename,
  850. .setattr = hugetlbfs_setattr,
  851. };
  852. static const struct inode_operations hugetlbfs_inode_operations = {
  853. .setattr = hugetlbfs_setattr,
  854. };
  855. static const struct super_operations hugetlbfs_ops = {
  856. .alloc_inode = hugetlbfs_alloc_inode,
  857. .destroy_inode = hugetlbfs_destroy_inode,
  858. .evict_inode = hugetlbfs_evict_inode,
  859. .statfs = hugetlbfs_statfs,
  860. .put_super = hugetlbfs_put_super,
  861. .show_options = generic_show_options,
  862. };
  863. enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
  864. /*
  865. * Convert size option passed from command line to number of huge pages
  866. * in the pool specified by hstate. Size option could be in bytes
  867. * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
  868. */
  869. static long long
  870. hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
  871. int val_type)
  872. {
  873. if (val_type == NO_SIZE)
  874. return -1;
  875. if (val_type == SIZE_PERCENT) {
  876. size_opt <<= huge_page_shift(h);
  877. size_opt *= h->max_huge_pages;
  878. do_div(size_opt, 100);
  879. }
  880. size_opt >>= huge_page_shift(h);
  881. return size_opt;
  882. }
  883. static int
  884. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  885. {
  886. char *p, *rest;
  887. substring_t args[MAX_OPT_ARGS];
  888. int option;
  889. unsigned long long max_size_opt = 0, min_size_opt = 0;
  890. int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
  891. if (!options)
  892. return 0;
  893. while ((p = strsep(&options, ",")) != NULL) {
  894. int token;
  895. if (!*p)
  896. continue;
  897. token = match_token(p, tokens, args);
  898. switch (token) {
  899. case Opt_uid:
  900. if (match_int(&args[0], &option))
  901. goto bad_val;
  902. pconfig->uid = make_kuid(current_user_ns(), option);
  903. if (!uid_valid(pconfig->uid))
  904. goto bad_val;
  905. break;
  906. case Opt_gid:
  907. if (match_int(&args[0], &option))
  908. goto bad_val;
  909. pconfig->gid = make_kgid(current_user_ns(), option);
  910. if (!gid_valid(pconfig->gid))
  911. goto bad_val;
  912. break;
  913. case Opt_mode:
  914. if (match_octal(&args[0], &option))
  915. goto bad_val;
  916. pconfig->mode = option & 01777U;
  917. break;
  918. case Opt_size: {
  919. /* memparse() will accept a K/M/G without a digit */
  920. if (!isdigit(*args[0].from))
  921. goto bad_val;
  922. max_size_opt = memparse(args[0].from, &rest);
  923. max_val_type = SIZE_STD;
  924. if (*rest == '%')
  925. max_val_type = SIZE_PERCENT;
  926. break;
  927. }
  928. case Opt_nr_inodes:
  929. /* memparse() will accept a K/M/G without a digit */
  930. if (!isdigit(*args[0].from))
  931. goto bad_val;
  932. pconfig->nr_inodes = memparse(args[0].from, &rest);
  933. break;
  934. case Opt_pagesize: {
  935. unsigned long ps;
  936. ps = memparse(args[0].from, &rest);
  937. pconfig->hstate = size_to_hstate(ps);
  938. if (!pconfig->hstate) {
  939. pr_err("Unsupported page size %lu MB\n",
  940. ps >> 20);
  941. return -EINVAL;
  942. }
  943. break;
  944. }
  945. case Opt_min_size: {
  946. /* memparse() will accept a K/M/G without a digit */
  947. if (!isdigit(*args[0].from))
  948. goto bad_val;
  949. min_size_opt = memparse(args[0].from, &rest);
  950. min_val_type = SIZE_STD;
  951. if (*rest == '%')
  952. min_val_type = SIZE_PERCENT;
  953. break;
  954. }
  955. default:
  956. pr_err("Bad mount option: \"%s\"\n", p);
  957. return -EINVAL;
  958. break;
  959. }
  960. }
  961. /*
  962. * Use huge page pool size (in hstate) to convert the size
  963. * options to number of huge pages. If NO_SIZE, -1 is returned.
  964. */
  965. pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  966. max_size_opt, max_val_type);
  967. pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  968. min_size_opt, min_val_type);
  969. /*
  970. * If max_size was specified, then min_size must be smaller
  971. */
  972. if (max_val_type > NO_SIZE &&
  973. pconfig->min_hpages > pconfig->max_hpages) {
  974. pr_err("minimum size can not be greater than maximum size\n");
  975. return -EINVAL;
  976. }
  977. return 0;
  978. bad_val:
  979. pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
  980. return -EINVAL;
  981. }
  982. static int
  983. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  984. {
  985. int ret;
  986. struct hugetlbfs_config config;
  987. struct hugetlbfs_sb_info *sbinfo;
  988. save_mount_options(sb, data);
  989. config.max_hpages = -1; /* No limit on size by default */
  990. config.nr_inodes = -1; /* No limit on number of inodes by default */
  991. config.uid = current_fsuid();
  992. config.gid = current_fsgid();
  993. config.mode = 0755;
  994. config.hstate = &default_hstate;
  995. config.min_hpages = -1; /* No default minimum size */
  996. ret = hugetlbfs_parse_options(data, &config);
  997. if (ret)
  998. return ret;
  999. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  1000. if (!sbinfo)
  1001. return -ENOMEM;
  1002. sb->s_fs_info = sbinfo;
  1003. sbinfo->hstate = config.hstate;
  1004. spin_lock_init(&sbinfo->stat_lock);
  1005. sbinfo->max_inodes = config.nr_inodes;
  1006. sbinfo->free_inodes = config.nr_inodes;
  1007. sbinfo->spool = NULL;
  1008. /*
  1009. * Allocate and initialize subpool if maximum or minimum size is
  1010. * specified. Any needed reservations (for minimim size) are taken
  1011. * taken when the subpool is created.
  1012. */
  1013. if (config.max_hpages != -1 || config.min_hpages != -1) {
  1014. sbinfo->spool = hugepage_new_subpool(config.hstate,
  1015. config.max_hpages,
  1016. config.min_hpages);
  1017. if (!sbinfo->spool)
  1018. goto out_free;
  1019. }
  1020. sb->s_maxbytes = MAX_LFS_FILESIZE;
  1021. sb->s_blocksize = huge_page_size(config.hstate);
  1022. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  1023. sb->s_magic = HUGETLBFS_MAGIC;
  1024. sb->s_op = &hugetlbfs_ops;
  1025. sb->s_time_gran = 1;
  1026. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  1027. if (!sb->s_root)
  1028. goto out_free;
  1029. return 0;
  1030. out_free:
  1031. kfree(sbinfo->spool);
  1032. kfree(sbinfo);
  1033. return -ENOMEM;
  1034. }
  1035. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  1036. int flags, const char *dev_name, void *data)
  1037. {
  1038. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  1039. }
  1040. static struct file_system_type hugetlbfs_fs_type = {
  1041. .name = "hugetlbfs",
  1042. .mount = hugetlbfs_mount,
  1043. .kill_sb = kill_litter_super,
  1044. };
  1045. MODULE_ALIAS_FS("hugetlbfs");
  1046. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  1047. static int can_do_hugetlb_shm(void)
  1048. {
  1049. kgid_t shm_group;
  1050. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  1051. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  1052. }
  1053. static int get_hstate_idx(int page_size_log)
  1054. {
  1055. struct hstate *h = hstate_sizelog(page_size_log);
  1056. if (!h)
  1057. return -1;
  1058. return h - hstates;
  1059. }
  1060. static const struct dentry_operations anon_ops = {
  1061. .d_dname = simple_dname
  1062. };
  1063. /*
  1064. * Note that size should be aligned to proper hugepage size in caller side,
  1065. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  1066. */
  1067. struct file *hugetlb_file_setup(const char *name, size_t size,
  1068. vm_flags_t acctflag, struct user_struct **user,
  1069. int creat_flags, int page_size_log)
  1070. {
  1071. struct file *file = ERR_PTR(-ENOMEM);
  1072. struct inode *inode;
  1073. struct path path;
  1074. struct super_block *sb;
  1075. struct qstr quick_string;
  1076. int hstate_idx;
  1077. hstate_idx = get_hstate_idx(page_size_log);
  1078. if (hstate_idx < 0)
  1079. return ERR_PTR(-ENODEV);
  1080. *user = NULL;
  1081. if (!hugetlbfs_vfsmount[hstate_idx])
  1082. return ERR_PTR(-ENOENT);
  1083. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  1084. *user = current_user();
  1085. if (user_shm_lock(size, *user)) {
  1086. task_lock(current);
  1087. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  1088. current->comm, current->pid);
  1089. task_unlock(current);
  1090. } else {
  1091. *user = NULL;
  1092. return ERR_PTR(-EPERM);
  1093. }
  1094. }
  1095. sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
  1096. quick_string.name = name;
  1097. quick_string.len = strlen(quick_string.name);
  1098. quick_string.hash = 0;
  1099. path.dentry = d_alloc_pseudo(sb, &quick_string);
  1100. if (!path.dentry)
  1101. goto out_shm_unlock;
  1102. d_set_d_op(path.dentry, &anon_ops);
  1103. path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
  1104. file = ERR_PTR(-ENOSPC);
  1105. inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
  1106. if (!inode)
  1107. goto out_dentry;
  1108. if (creat_flags == HUGETLB_SHMFS_INODE)
  1109. inode->i_flags |= S_PRIVATE;
  1110. file = ERR_PTR(-ENOMEM);
  1111. if (hugetlb_reserve_pages(inode, 0,
  1112. size >> huge_page_shift(hstate_inode(inode)), NULL,
  1113. acctflag))
  1114. goto out_inode;
  1115. d_instantiate(path.dentry, inode);
  1116. inode->i_size = size;
  1117. clear_nlink(inode);
  1118. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  1119. &hugetlbfs_file_operations);
  1120. if (IS_ERR(file))
  1121. goto out_dentry; /* inode is already attached */
  1122. return file;
  1123. out_inode:
  1124. iput(inode);
  1125. out_dentry:
  1126. path_put(&path);
  1127. out_shm_unlock:
  1128. if (*user) {
  1129. user_shm_unlock(size, *user);
  1130. *user = NULL;
  1131. }
  1132. return file;
  1133. }
  1134. static int __init init_hugetlbfs_fs(void)
  1135. {
  1136. struct hstate *h;
  1137. int error;
  1138. int i;
  1139. if (!hugepages_supported()) {
  1140. pr_info("disabling because there are no supported hugepage sizes\n");
  1141. return -ENOTSUPP;
  1142. }
  1143. error = -ENOMEM;
  1144. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  1145. sizeof(struct hugetlbfs_inode_info),
  1146. 0, 0, init_once);
  1147. if (hugetlbfs_inode_cachep == NULL)
  1148. goto out2;
  1149. error = register_filesystem(&hugetlbfs_fs_type);
  1150. if (error)
  1151. goto out;
  1152. i = 0;
  1153. for_each_hstate(h) {
  1154. char buf[50];
  1155. unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
  1156. snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
  1157. hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
  1158. buf);
  1159. if (IS_ERR(hugetlbfs_vfsmount[i])) {
  1160. pr_err("Cannot mount internal hugetlbfs for "
  1161. "page size %uK", ps_kb);
  1162. error = PTR_ERR(hugetlbfs_vfsmount[i]);
  1163. hugetlbfs_vfsmount[i] = NULL;
  1164. }
  1165. i++;
  1166. }
  1167. /* Non default hstates are optional */
  1168. if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
  1169. return 0;
  1170. out:
  1171. kmem_cache_destroy(hugetlbfs_inode_cachep);
  1172. out2:
  1173. return error;
  1174. }
  1175. static void __exit exit_hugetlbfs_fs(void)
  1176. {
  1177. struct hstate *h;
  1178. int i;
  1179. /*
  1180. * Make sure all delayed rcu free inodes are flushed before we
  1181. * destroy cache.
  1182. */
  1183. rcu_barrier();
  1184. kmem_cache_destroy(hugetlbfs_inode_cachep);
  1185. i = 0;
  1186. for_each_hstate(h)
  1187. kern_unmount(hugetlbfs_vfsmount[i++]);
  1188. unregister_filesystem(&hugetlbfs_fs_type);
  1189. }
  1190. module_init(init_hugetlbfs_fs)
  1191. module_exit(exit_hugetlbfs_fs)
  1192. MODULE_LICENSE("GPL");