inode.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/module.h>
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/sched.h> /* remove ASAP */
  13. #include <linux/fs.h>
  14. #include <linux/mount.h>
  15. #include <linux/file.h>
  16. #include <linux/kernel.h>
  17. #include <linux/writeback.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/init.h>
  21. #include <linux/string.h>
  22. #include <linux/capability.h>
  23. #include <linux/ctype.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/parser.h>
  28. #include <linux/mman.h>
  29. #include <linux/slab.h>
  30. #include <linux/dnotify.h>
  31. #include <linux/statfs.h>
  32. #include <linux/security.h>
  33. #include <linux/magic.h>
  34. #include <linux/migrate.h>
  35. #include <linux/uio.h>
  36. #include <asm/uaccess.h>
  37. static const struct super_operations hugetlbfs_ops;
  38. static const struct address_space_operations hugetlbfs_aops;
  39. const struct file_operations hugetlbfs_file_operations;
  40. static const struct inode_operations hugetlbfs_dir_inode_operations;
  41. static const struct inode_operations hugetlbfs_inode_operations;
  42. struct hugetlbfs_config {
  43. kuid_t uid;
  44. kgid_t gid;
  45. umode_t mode;
  46. long max_hpages;
  47. long nr_inodes;
  48. struct hstate *hstate;
  49. long min_hpages;
  50. };
  51. struct hugetlbfs_inode_info {
  52. struct shared_policy policy;
  53. struct inode vfs_inode;
  54. };
  55. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  56. {
  57. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  58. }
  59. int sysctl_hugetlb_shm_group;
  60. enum {
  61. Opt_size, Opt_nr_inodes,
  62. Opt_mode, Opt_uid, Opt_gid,
  63. Opt_pagesize, Opt_min_size,
  64. Opt_err,
  65. };
  66. static const match_table_t tokens = {
  67. {Opt_size, "size=%s"},
  68. {Opt_nr_inodes, "nr_inodes=%s"},
  69. {Opt_mode, "mode=%o"},
  70. {Opt_uid, "uid=%u"},
  71. {Opt_gid, "gid=%u"},
  72. {Opt_pagesize, "pagesize=%s"},
  73. {Opt_min_size, "min_size=%s"},
  74. {Opt_err, NULL},
  75. };
  76. static void huge_pagevec_release(struct pagevec *pvec)
  77. {
  78. int i;
  79. for (i = 0; i < pagevec_count(pvec); ++i)
  80. put_page(pvec->pages[i]);
  81. pagevec_reinit(pvec);
  82. }
  83. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  84. {
  85. struct inode *inode = file_inode(file);
  86. loff_t len, vma_len;
  87. int ret;
  88. struct hstate *h = hstate_file(file);
  89. /*
  90. * vma address alignment (but not the pgoff alignment) has
  91. * already been checked by prepare_hugepage_range. If you add
  92. * any error returns here, do so after setting VM_HUGETLB, so
  93. * is_vm_hugetlb_page tests below unmap_region go the right
  94. * way when do_mmap_pgoff unwinds (may be important on powerpc
  95. * and ia64).
  96. */
  97. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
  98. vma->vm_ops = &hugetlb_vm_ops;
  99. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  100. return -EINVAL;
  101. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  102. mutex_lock(&inode->i_mutex);
  103. file_accessed(file);
  104. ret = -ENOMEM;
  105. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  106. if (hugetlb_reserve_pages(inode,
  107. vma->vm_pgoff >> huge_page_order(h),
  108. len >> huge_page_shift(h), vma,
  109. vma->vm_flags))
  110. goto out;
  111. ret = 0;
  112. hugetlb_prefault_arch_hook(vma->vm_mm);
  113. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  114. inode->i_size = len;
  115. out:
  116. mutex_unlock(&inode->i_mutex);
  117. return ret;
  118. }
  119. /*
  120. * Called under down_write(mmap_sem).
  121. */
  122. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  123. static unsigned long
  124. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  125. unsigned long len, unsigned long pgoff, unsigned long flags)
  126. {
  127. struct mm_struct *mm = current->mm;
  128. struct vm_area_struct *vma;
  129. struct hstate *h = hstate_file(file);
  130. struct vm_unmapped_area_info info;
  131. if (len & ~huge_page_mask(h))
  132. return -EINVAL;
  133. if (len > TASK_SIZE)
  134. return -ENOMEM;
  135. if (flags & MAP_FIXED) {
  136. if (prepare_hugepage_range(file, addr, len))
  137. return -EINVAL;
  138. return addr;
  139. }
  140. if (addr) {
  141. addr = ALIGN(addr, huge_page_size(h));
  142. vma = find_vma(mm, addr);
  143. if (TASK_SIZE - len >= addr &&
  144. (!vma || addr + len <= vma->vm_start))
  145. return addr;
  146. }
  147. info.flags = 0;
  148. info.length = len;
  149. info.low_limit = TASK_UNMAPPED_BASE;
  150. info.high_limit = TASK_SIZE;
  151. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  152. info.align_offset = 0;
  153. return vm_unmapped_area(&info);
  154. }
  155. #endif
  156. static size_t
  157. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  158. struct iov_iter *to, unsigned long size)
  159. {
  160. size_t copied = 0;
  161. int i, chunksize;
  162. /* Find which 4k chunk and offset with in that chunk */
  163. i = offset >> PAGE_CACHE_SHIFT;
  164. offset = offset & ~PAGE_CACHE_MASK;
  165. while (size) {
  166. size_t n;
  167. chunksize = PAGE_CACHE_SIZE;
  168. if (offset)
  169. chunksize -= offset;
  170. if (chunksize > size)
  171. chunksize = size;
  172. n = copy_page_to_iter(&page[i], offset, chunksize, to);
  173. copied += n;
  174. if (n != chunksize)
  175. return copied;
  176. offset = 0;
  177. size -= chunksize;
  178. i++;
  179. }
  180. return copied;
  181. }
  182. /*
  183. * Support for read() - Find the page attached to f_mapping and copy out the
  184. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  185. * since it has PAGE_CACHE_SIZE assumptions.
  186. */
  187. static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
  188. {
  189. struct file *file = iocb->ki_filp;
  190. struct hstate *h = hstate_file(file);
  191. struct address_space *mapping = file->f_mapping;
  192. struct inode *inode = mapping->host;
  193. unsigned long index = iocb->ki_pos >> huge_page_shift(h);
  194. unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
  195. unsigned long end_index;
  196. loff_t isize;
  197. ssize_t retval = 0;
  198. while (iov_iter_count(to)) {
  199. struct page *page;
  200. size_t nr, copied;
  201. /* nr is the maximum number of bytes to copy from this page */
  202. nr = huge_page_size(h);
  203. isize = i_size_read(inode);
  204. if (!isize)
  205. break;
  206. end_index = (isize - 1) >> huge_page_shift(h);
  207. if (index > end_index)
  208. break;
  209. if (index == end_index) {
  210. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  211. if (nr <= offset)
  212. break;
  213. }
  214. nr = nr - offset;
  215. /* Find the page */
  216. page = find_lock_page(mapping, index);
  217. if (unlikely(page == NULL)) {
  218. /*
  219. * We have a HOLE, zero out the user-buffer for the
  220. * length of the hole or request.
  221. */
  222. copied = iov_iter_zero(nr, to);
  223. } else {
  224. unlock_page(page);
  225. /*
  226. * We have the page, copy it to user space buffer.
  227. */
  228. copied = hugetlbfs_read_actor(page, offset, to, nr);
  229. page_cache_release(page);
  230. }
  231. offset += copied;
  232. retval += copied;
  233. if (copied != nr && iov_iter_count(to)) {
  234. if (!retval)
  235. retval = -EFAULT;
  236. break;
  237. }
  238. index += offset >> huge_page_shift(h);
  239. offset &= ~huge_page_mask(h);
  240. }
  241. iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
  242. return retval;
  243. }
  244. static int hugetlbfs_write_begin(struct file *file,
  245. struct address_space *mapping,
  246. loff_t pos, unsigned len, unsigned flags,
  247. struct page **pagep, void **fsdata)
  248. {
  249. return -EINVAL;
  250. }
  251. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  252. loff_t pos, unsigned len, unsigned copied,
  253. struct page *page, void *fsdata)
  254. {
  255. BUG();
  256. return -EINVAL;
  257. }
  258. static void truncate_huge_page(struct page *page)
  259. {
  260. ClearPageDirty(page);
  261. ClearPageUptodate(page);
  262. delete_from_page_cache(page);
  263. }
  264. static void truncate_hugepages(struct inode *inode, loff_t lstart)
  265. {
  266. struct hstate *h = hstate_inode(inode);
  267. struct address_space *mapping = &inode->i_data;
  268. const pgoff_t start = lstart >> huge_page_shift(h);
  269. struct pagevec pvec;
  270. pgoff_t next;
  271. int i, freed = 0;
  272. pagevec_init(&pvec, 0);
  273. next = start;
  274. while (1) {
  275. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  276. if (next == start)
  277. break;
  278. next = start;
  279. continue;
  280. }
  281. for (i = 0; i < pagevec_count(&pvec); ++i) {
  282. struct page *page = pvec.pages[i];
  283. lock_page(page);
  284. if (page->index > next)
  285. next = page->index;
  286. ++next;
  287. truncate_huge_page(page);
  288. unlock_page(page);
  289. freed++;
  290. }
  291. huge_pagevec_release(&pvec);
  292. }
  293. BUG_ON(!lstart && mapping->nrpages);
  294. hugetlb_unreserve_pages(inode, start, freed);
  295. }
  296. static void hugetlbfs_evict_inode(struct inode *inode)
  297. {
  298. struct resv_map *resv_map;
  299. truncate_hugepages(inode, 0);
  300. resv_map = (struct resv_map *)inode->i_mapping->private_data;
  301. /* root inode doesn't have the resv_map, so we should check it */
  302. if (resv_map)
  303. resv_map_release(&resv_map->refs);
  304. clear_inode(inode);
  305. }
  306. static inline void
  307. hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
  308. {
  309. struct vm_area_struct *vma;
  310. vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
  311. unsigned long v_offset;
  312. /*
  313. * Can the expression below overflow on 32-bit arches?
  314. * No, because the interval tree returns us only those vmas
  315. * which overlap the truncated area starting at pgoff,
  316. * and no vma on a 32-bit arch can span beyond the 4GB.
  317. */
  318. if (vma->vm_pgoff < pgoff)
  319. v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
  320. else
  321. v_offset = 0;
  322. unmap_hugepage_range(vma, vma->vm_start + v_offset,
  323. vma->vm_end, NULL);
  324. }
  325. }
  326. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  327. {
  328. pgoff_t pgoff;
  329. struct address_space *mapping = inode->i_mapping;
  330. struct hstate *h = hstate_inode(inode);
  331. BUG_ON(offset & ~huge_page_mask(h));
  332. pgoff = offset >> PAGE_SHIFT;
  333. i_size_write(inode, offset);
  334. i_mmap_lock_write(mapping);
  335. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  336. hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
  337. i_mmap_unlock_write(mapping);
  338. truncate_hugepages(inode, offset);
  339. return 0;
  340. }
  341. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  342. {
  343. struct inode *inode = d_inode(dentry);
  344. struct hstate *h = hstate_inode(inode);
  345. int error;
  346. unsigned int ia_valid = attr->ia_valid;
  347. BUG_ON(!inode);
  348. error = inode_change_ok(inode, attr);
  349. if (error)
  350. return error;
  351. if (ia_valid & ATTR_SIZE) {
  352. error = -EINVAL;
  353. if (attr->ia_size & ~huge_page_mask(h))
  354. return -EINVAL;
  355. error = hugetlb_vmtruncate(inode, attr->ia_size);
  356. if (error)
  357. return error;
  358. }
  359. setattr_copy(inode, attr);
  360. mark_inode_dirty(inode);
  361. return 0;
  362. }
  363. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  364. struct hugetlbfs_config *config)
  365. {
  366. struct inode *inode;
  367. inode = new_inode(sb);
  368. if (inode) {
  369. struct hugetlbfs_inode_info *info;
  370. inode->i_ino = get_next_ino();
  371. inode->i_mode = S_IFDIR | config->mode;
  372. inode->i_uid = config->uid;
  373. inode->i_gid = config->gid;
  374. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  375. info = HUGETLBFS_I(inode);
  376. mpol_shared_policy_init(&info->policy, NULL);
  377. inode->i_op = &hugetlbfs_dir_inode_operations;
  378. inode->i_fop = &simple_dir_operations;
  379. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  380. inc_nlink(inode);
  381. lockdep_annotate_inode_mutex_key(inode);
  382. }
  383. return inode;
  384. }
  385. /*
  386. * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
  387. * be taken from reclaim -- unlike regular filesystems. This needs an
  388. * annotation because huge_pmd_share() does an allocation under
  389. * i_mmap_rwsem.
  390. */
  391. static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
  392. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  393. struct inode *dir,
  394. umode_t mode, dev_t dev)
  395. {
  396. struct inode *inode;
  397. struct resv_map *resv_map;
  398. resv_map = resv_map_alloc();
  399. if (!resv_map)
  400. return NULL;
  401. inode = new_inode(sb);
  402. if (inode) {
  403. struct hugetlbfs_inode_info *info;
  404. inode->i_ino = get_next_ino();
  405. inode_init_owner(inode, dir, mode);
  406. lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
  407. &hugetlbfs_i_mmap_rwsem_key);
  408. inode->i_mapping->a_ops = &hugetlbfs_aops;
  409. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  410. inode->i_mapping->private_data = resv_map;
  411. info = HUGETLBFS_I(inode);
  412. /*
  413. * The policy is initialized here even if we are creating a
  414. * private inode because initialization simply creates an
  415. * an empty rb tree and calls spin_lock_init(), later when we
  416. * call mpol_free_shared_policy() it will just return because
  417. * the rb tree will still be empty.
  418. */
  419. mpol_shared_policy_init(&info->policy, NULL);
  420. switch (mode & S_IFMT) {
  421. default:
  422. init_special_inode(inode, mode, dev);
  423. break;
  424. case S_IFREG:
  425. inode->i_op = &hugetlbfs_inode_operations;
  426. inode->i_fop = &hugetlbfs_file_operations;
  427. break;
  428. case S_IFDIR:
  429. inode->i_op = &hugetlbfs_dir_inode_operations;
  430. inode->i_fop = &simple_dir_operations;
  431. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  432. inc_nlink(inode);
  433. break;
  434. case S_IFLNK:
  435. inode->i_op = &page_symlink_inode_operations;
  436. break;
  437. }
  438. lockdep_annotate_inode_mutex_key(inode);
  439. } else
  440. kref_put(&resv_map->refs, resv_map_release);
  441. return inode;
  442. }
  443. /*
  444. * File creation. Allocate an inode, and we're done..
  445. */
  446. static int hugetlbfs_mknod(struct inode *dir,
  447. struct dentry *dentry, umode_t mode, dev_t dev)
  448. {
  449. struct inode *inode;
  450. int error = -ENOSPC;
  451. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  452. if (inode) {
  453. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  454. d_instantiate(dentry, inode);
  455. dget(dentry); /* Extra count - pin the dentry in core */
  456. error = 0;
  457. }
  458. return error;
  459. }
  460. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  461. {
  462. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  463. if (!retval)
  464. inc_nlink(dir);
  465. return retval;
  466. }
  467. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  468. {
  469. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  470. }
  471. static int hugetlbfs_symlink(struct inode *dir,
  472. struct dentry *dentry, const char *symname)
  473. {
  474. struct inode *inode;
  475. int error = -ENOSPC;
  476. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  477. if (inode) {
  478. int l = strlen(symname)+1;
  479. error = page_symlink(inode, symname, l);
  480. if (!error) {
  481. d_instantiate(dentry, inode);
  482. dget(dentry);
  483. } else
  484. iput(inode);
  485. }
  486. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  487. return error;
  488. }
  489. /*
  490. * mark the head page dirty
  491. */
  492. static int hugetlbfs_set_page_dirty(struct page *page)
  493. {
  494. struct page *head = compound_head(page);
  495. SetPageDirty(head);
  496. return 0;
  497. }
  498. static int hugetlbfs_migrate_page(struct address_space *mapping,
  499. struct page *newpage, struct page *page,
  500. enum migrate_mode mode)
  501. {
  502. int rc;
  503. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  504. if (rc != MIGRATEPAGE_SUCCESS)
  505. return rc;
  506. migrate_page_copy(newpage, page);
  507. return MIGRATEPAGE_SUCCESS;
  508. }
  509. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  510. {
  511. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  512. struct hstate *h = hstate_inode(d_inode(dentry));
  513. buf->f_type = HUGETLBFS_MAGIC;
  514. buf->f_bsize = huge_page_size(h);
  515. if (sbinfo) {
  516. spin_lock(&sbinfo->stat_lock);
  517. /* If no limits set, just report 0 for max/free/used
  518. * blocks, like simple_statfs() */
  519. if (sbinfo->spool) {
  520. long free_pages;
  521. spin_lock(&sbinfo->spool->lock);
  522. buf->f_blocks = sbinfo->spool->max_hpages;
  523. free_pages = sbinfo->spool->max_hpages
  524. - sbinfo->spool->used_hpages;
  525. buf->f_bavail = buf->f_bfree = free_pages;
  526. spin_unlock(&sbinfo->spool->lock);
  527. buf->f_files = sbinfo->max_inodes;
  528. buf->f_ffree = sbinfo->free_inodes;
  529. }
  530. spin_unlock(&sbinfo->stat_lock);
  531. }
  532. buf->f_namelen = NAME_MAX;
  533. return 0;
  534. }
  535. static void hugetlbfs_put_super(struct super_block *sb)
  536. {
  537. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  538. if (sbi) {
  539. sb->s_fs_info = NULL;
  540. if (sbi->spool)
  541. hugepage_put_subpool(sbi->spool);
  542. kfree(sbi);
  543. }
  544. }
  545. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  546. {
  547. if (sbinfo->free_inodes >= 0) {
  548. spin_lock(&sbinfo->stat_lock);
  549. if (unlikely(!sbinfo->free_inodes)) {
  550. spin_unlock(&sbinfo->stat_lock);
  551. return 0;
  552. }
  553. sbinfo->free_inodes--;
  554. spin_unlock(&sbinfo->stat_lock);
  555. }
  556. return 1;
  557. }
  558. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  559. {
  560. if (sbinfo->free_inodes >= 0) {
  561. spin_lock(&sbinfo->stat_lock);
  562. sbinfo->free_inodes++;
  563. spin_unlock(&sbinfo->stat_lock);
  564. }
  565. }
  566. static struct kmem_cache *hugetlbfs_inode_cachep;
  567. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  568. {
  569. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  570. struct hugetlbfs_inode_info *p;
  571. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  572. return NULL;
  573. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  574. if (unlikely(!p)) {
  575. hugetlbfs_inc_free_inodes(sbinfo);
  576. return NULL;
  577. }
  578. return &p->vfs_inode;
  579. }
  580. static void hugetlbfs_i_callback(struct rcu_head *head)
  581. {
  582. struct inode *inode = container_of(head, struct inode, i_rcu);
  583. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  584. }
  585. static void hugetlbfs_destroy_inode(struct inode *inode)
  586. {
  587. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  588. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  589. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  590. }
  591. static const struct address_space_operations hugetlbfs_aops = {
  592. .write_begin = hugetlbfs_write_begin,
  593. .write_end = hugetlbfs_write_end,
  594. .set_page_dirty = hugetlbfs_set_page_dirty,
  595. .migratepage = hugetlbfs_migrate_page,
  596. };
  597. static void init_once(void *foo)
  598. {
  599. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  600. inode_init_once(&ei->vfs_inode);
  601. }
  602. const struct file_operations hugetlbfs_file_operations = {
  603. .read_iter = hugetlbfs_read_iter,
  604. .mmap = hugetlbfs_file_mmap,
  605. .fsync = noop_fsync,
  606. .get_unmapped_area = hugetlb_get_unmapped_area,
  607. .llseek = default_llseek,
  608. };
  609. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  610. .create = hugetlbfs_create,
  611. .lookup = simple_lookup,
  612. .link = simple_link,
  613. .unlink = simple_unlink,
  614. .symlink = hugetlbfs_symlink,
  615. .mkdir = hugetlbfs_mkdir,
  616. .rmdir = simple_rmdir,
  617. .mknod = hugetlbfs_mknod,
  618. .rename = simple_rename,
  619. .setattr = hugetlbfs_setattr,
  620. };
  621. static const struct inode_operations hugetlbfs_inode_operations = {
  622. .setattr = hugetlbfs_setattr,
  623. };
  624. static const struct super_operations hugetlbfs_ops = {
  625. .alloc_inode = hugetlbfs_alloc_inode,
  626. .destroy_inode = hugetlbfs_destroy_inode,
  627. .evict_inode = hugetlbfs_evict_inode,
  628. .statfs = hugetlbfs_statfs,
  629. .put_super = hugetlbfs_put_super,
  630. .show_options = generic_show_options,
  631. };
  632. enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
  633. /*
  634. * Convert size option passed from command line to number of huge pages
  635. * in the pool specified by hstate. Size option could be in bytes
  636. * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
  637. */
  638. static long long
  639. hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
  640. int val_type)
  641. {
  642. if (val_type == NO_SIZE)
  643. return -1;
  644. if (val_type == SIZE_PERCENT) {
  645. size_opt <<= huge_page_shift(h);
  646. size_opt *= h->max_huge_pages;
  647. do_div(size_opt, 100);
  648. }
  649. size_opt >>= huge_page_shift(h);
  650. return size_opt;
  651. }
  652. static int
  653. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  654. {
  655. char *p, *rest;
  656. substring_t args[MAX_OPT_ARGS];
  657. int option;
  658. unsigned long long max_size_opt = 0, min_size_opt = 0;
  659. int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
  660. if (!options)
  661. return 0;
  662. while ((p = strsep(&options, ",")) != NULL) {
  663. int token;
  664. if (!*p)
  665. continue;
  666. token = match_token(p, tokens, args);
  667. switch (token) {
  668. case Opt_uid:
  669. if (match_int(&args[0], &option))
  670. goto bad_val;
  671. pconfig->uid = make_kuid(current_user_ns(), option);
  672. if (!uid_valid(pconfig->uid))
  673. goto bad_val;
  674. break;
  675. case Opt_gid:
  676. if (match_int(&args[0], &option))
  677. goto bad_val;
  678. pconfig->gid = make_kgid(current_user_ns(), option);
  679. if (!gid_valid(pconfig->gid))
  680. goto bad_val;
  681. break;
  682. case Opt_mode:
  683. if (match_octal(&args[0], &option))
  684. goto bad_val;
  685. pconfig->mode = option & 01777U;
  686. break;
  687. case Opt_size: {
  688. /* memparse() will accept a K/M/G without a digit */
  689. if (!isdigit(*args[0].from))
  690. goto bad_val;
  691. max_size_opt = memparse(args[0].from, &rest);
  692. max_val_type = SIZE_STD;
  693. if (*rest == '%')
  694. max_val_type = SIZE_PERCENT;
  695. break;
  696. }
  697. case Opt_nr_inodes:
  698. /* memparse() will accept a K/M/G without a digit */
  699. if (!isdigit(*args[0].from))
  700. goto bad_val;
  701. pconfig->nr_inodes = memparse(args[0].from, &rest);
  702. break;
  703. case Opt_pagesize: {
  704. unsigned long ps;
  705. ps = memparse(args[0].from, &rest);
  706. pconfig->hstate = size_to_hstate(ps);
  707. if (!pconfig->hstate) {
  708. pr_err("Unsupported page size %lu MB\n",
  709. ps >> 20);
  710. return -EINVAL;
  711. }
  712. break;
  713. }
  714. case Opt_min_size: {
  715. /* memparse() will accept a K/M/G without a digit */
  716. if (!isdigit(*args[0].from))
  717. goto bad_val;
  718. min_size_opt = memparse(args[0].from, &rest);
  719. min_val_type = SIZE_STD;
  720. if (*rest == '%')
  721. min_val_type = SIZE_PERCENT;
  722. break;
  723. }
  724. default:
  725. pr_err("Bad mount option: \"%s\"\n", p);
  726. return -EINVAL;
  727. break;
  728. }
  729. }
  730. /*
  731. * Use huge page pool size (in hstate) to convert the size
  732. * options to number of huge pages. If NO_SIZE, -1 is returned.
  733. */
  734. pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  735. max_size_opt, max_val_type);
  736. pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  737. min_size_opt, min_val_type);
  738. /*
  739. * If max_size was specified, then min_size must be smaller
  740. */
  741. if (max_val_type > NO_SIZE &&
  742. pconfig->min_hpages > pconfig->max_hpages) {
  743. pr_err("minimum size can not be greater than maximum size\n");
  744. return -EINVAL;
  745. }
  746. return 0;
  747. bad_val:
  748. pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
  749. return -EINVAL;
  750. }
  751. static int
  752. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  753. {
  754. int ret;
  755. struct hugetlbfs_config config;
  756. struct hugetlbfs_sb_info *sbinfo;
  757. save_mount_options(sb, data);
  758. config.max_hpages = -1; /* No limit on size by default */
  759. config.nr_inodes = -1; /* No limit on number of inodes by default */
  760. config.uid = current_fsuid();
  761. config.gid = current_fsgid();
  762. config.mode = 0755;
  763. config.hstate = &default_hstate;
  764. config.min_hpages = -1; /* No default minimum size */
  765. ret = hugetlbfs_parse_options(data, &config);
  766. if (ret)
  767. return ret;
  768. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  769. if (!sbinfo)
  770. return -ENOMEM;
  771. sb->s_fs_info = sbinfo;
  772. sbinfo->hstate = config.hstate;
  773. spin_lock_init(&sbinfo->stat_lock);
  774. sbinfo->max_inodes = config.nr_inodes;
  775. sbinfo->free_inodes = config.nr_inodes;
  776. sbinfo->spool = NULL;
  777. /*
  778. * Allocate and initialize subpool if maximum or minimum size is
  779. * specified. Any needed reservations (for minimim size) are taken
  780. * taken when the subpool is created.
  781. */
  782. if (config.max_hpages != -1 || config.min_hpages != -1) {
  783. sbinfo->spool = hugepage_new_subpool(config.hstate,
  784. config.max_hpages,
  785. config.min_hpages);
  786. if (!sbinfo->spool)
  787. goto out_free;
  788. }
  789. sb->s_maxbytes = MAX_LFS_FILESIZE;
  790. sb->s_blocksize = huge_page_size(config.hstate);
  791. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  792. sb->s_magic = HUGETLBFS_MAGIC;
  793. sb->s_op = &hugetlbfs_ops;
  794. sb->s_time_gran = 1;
  795. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  796. if (!sb->s_root)
  797. goto out_free;
  798. return 0;
  799. out_free:
  800. kfree(sbinfo->spool);
  801. kfree(sbinfo);
  802. return -ENOMEM;
  803. }
  804. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  805. int flags, const char *dev_name, void *data)
  806. {
  807. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  808. }
  809. static struct file_system_type hugetlbfs_fs_type = {
  810. .name = "hugetlbfs",
  811. .mount = hugetlbfs_mount,
  812. .kill_sb = kill_litter_super,
  813. };
  814. MODULE_ALIAS_FS("hugetlbfs");
  815. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  816. static int can_do_hugetlb_shm(void)
  817. {
  818. kgid_t shm_group;
  819. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  820. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  821. }
  822. static int get_hstate_idx(int page_size_log)
  823. {
  824. struct hstate *h = hstate_sizelog(page_size_log);
  825. if (!h)
  826. return -1;
  827. return h - hstates;
  828. }
  829. static const struct dentry_operations anon_ops = {
  830. .d_dname = simple_dname
  831. };
  832. /*
  833. * Note that size should be aligned to proper hugepage size in caller side,
  834. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  835. */
  836. struct file *hugetlb_file_setup(const char *name, size_t size,
  837. vm_flags_t acctflag, struct user_struct **user,
  838. int creat_flags, int page_size_log)
  839. {
  840. struct file *file = ERR_PTR(-ENOMEM);
  841. struct inode *inode;
  842. struct path path;
  843. struct super_block *sb;
  844. struct qstr quick_string;
  845. int hstate_idx;
  846. hstate_idx = get_hstate_idx(page_size_log);
  847. if (hstate_idx < 0)
  848. return ERR_PTR(-ENODEV);
  849. *user = NULL;
  850. if (!hugetlbfs_vfsmount[hstate_idx])
  851. return ERR_PTR(-ENOENT);
  852. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  853. *user = current_user();
  854. if (user_shm_lock(size, *user)) {
  855. task_lock(current);
  856. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  857. current->comm, current->pid);
  858. task_unlock(current);
  859. } else {
  860. *user = NULL;
  861. return ERR_PTR(-EPERM);
  862. }
  863. }
  864. sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
  865. quick_string.name = name;
  866. quick_string.len = strlen(quick_string.name);
  867. quick_string.hash = 0;
  868. path.dentry = d_alloc_pseudo(sb, &quick_string);
  869. if (!path.dentry)
  870. goto out_shm_unlock;
  871. d_set_d_op(path.dentry, &anon_ops);
  872. path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
  873. file = ERR_PTR(-ENOSPC);
  874. inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
  875. if (!inode)
  876. goto out_dentry;
  877. file = ERR_PTR(-ENOMEM);
  878. if (hugetlb_reserve_pages(inode, 0,
  879. size >> huge_page_shift(hstate_inode(inode)), NULL,
  880. acctflag))
  881. goto out_inode;
  882. d_instantiate(path.dentry, inode);
  883. inode->i_size = size;
  884. clear_nlink(inode);
  885. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  886. &hugetlbfs_file_operations);
  887. if (IS_ERR(file))
  888. goto out_dentry; /* inode is already attached */
  889. return file;
  890. out_inode:
  891. iput(inode);
  892. out_dentry:
  893. path_put(&path);
  894. out_shm_unlock:
  895. if (*user) {
  896. user_shm_unlock(size, *user);
  897. *user = NULL;
  898. }
  899. return file;
  900. }
  901. static int __init init_hugetlbfs_fs(void)
  902. {
  903. struct hstate *h;
  904. int error;
  905. int i;
  906. if (!hugepages_supported()) {
  907. pr_info("disabling because there are no supported hugepage sizes\n");
  908. return -ENOTSUPP;
  909. }
  910. error = -ENOMEM;
  911. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  912. sizeof(struct hugetlbfs_inode_info),
  913. 0, 0, init_once);
  914. if (hugetlbfs_inode_cachep == NULL)
  915. goto out2;
  916. error = register_filesystem(&hugetlbfs_fs_type);
  917. if (error)
  918. goto out;
  919. i = 0;
  920. for_each_hstate(h) {
  921. char buf[50];
  922. unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
  923. snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
  924. hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
  925. buf);
  926. if (IS_ERR(hugetlbfs_vfsmount[i])) {
  927. pr_err("Cannot mount internal hugetlbfs for "
  928. "page size %uK", ps_kb);
  929. error = PTR_ERR(hugetlbfs_vfsmount[i]);
  930. hugetlbfs_vfsmount[i] = NULL;
  931. }
  932. i++;
  933. }
  934. /* Non default hstates are optional */
  935. if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
  936. return 0;
  937. out:
  938. kmem_cache_destroy(hugetlbfs_inode_cachep);
  939. out2:
  940. return error;
  941. }
  942. static void __exit exit_hugetlbfs_fs(void)
  943. {
  944. struct hstate *h;
  945. int i;
  946. /*
  947. * Make sure all delayed rcu free inodes are flushed before we
  948. * destroy cache.
  949. */
  950. rcu_barrier();
  951. kmem_cache_destroy(hugetlbfs_inode_cachep);
  952. i = 0;
  953. for_each_hstate(h)
  954. kern_unmount(hugetlbfs_vfsmount[i++]);
  955. unregister_filesystem(&hugetlbfs_fs_type);
  956. }
  957. module_init(init_hugetlbfs_fs)
  958. module_exit(exit_hugetlbfs_fs)
  959. MODULE_LICENSE("GPL");