inode.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/module.h>
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/sched.h> /* remove ASAP */
  13. #include <linux/fs.h>
  14. #include <linux/mount.h>
  15. #include <linux/file.h>
  16. #include <linux/kernel.h>
  17. #include <linux/writeback.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/init.h>
  21. #include <linux/string.h>
  22. #include <linux/capability.h>
  23. #include <linux/ctype.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/parser.h>
  28. #include <linux/mman.h>
  29. #include <linux/slab.h>
  30. #include <linux/dnotify.h>
  31. #include <linux/statfs.h>
  32. #include <linux/security.h>
  33. #include <linux/magic.h>
  34. #include <linux/migrate.h>
  35. #include <linux/uio.h>
  36. #include <asm/uaccess.h>
  37. static const struct super_operations hugetlbfs_ops;
  38. static const struct address_space_operations hugetlbfs_aops;
  39. const struct file_operations hugetlbfs_file_operations;
  40. static const struct inode_operations hugetlbfs_dir_inode_operations;
  41. static const struct inode_operations hugetlbfs_inode_operations;
  42. struct hugetlbfs_config {
  43. kuid_t uid;
  44. kgid_t gid;
  45. umode_t mode;
  46. long max_hpages;
  47. long nr_inodes;
  48. struct hstate *hstate;
  49. long min_hpages;
  50. };
  51. struct hugetlbfs_inode_info {
  52. struct shared_policy policy;
  53. struct inode vfs_inode;
  54. };
  55. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  56. {
  57. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  58. }
  59. int sysctl_hugetlb_shm_group;
  60. enum {
  61. Opt_size, Opt_nr_inodes,
  62. Opt_mode, Opt_uid, Opt_gid,
  63. Opt_pagesize, Opt_min_size,
  64. Opt_err,
  65. };
  66. static const match_table_t tokens = {
  67. {Opt_size, "size=%s"},
  68. {Opt_nr_inodes, "nr_inodes=%s"},
  69. {Opt_mode, "mode=%o"},
  70. {Opt_uid, "uid=%u"},
  71. {Opt_gid, "gid=%u"},
  72. {Opt_pagesize, "pagesize=%s"},
  73. {Opt_min_size, "min_size=%s"},
  74. {Opt_err, NULL},
  75. };
  76. static void huge_pagevec_release(struct pagevec *pvec)
  77. {
  78. int i;
  79. for (i = 0; i < pagevec_count(pvec); ++i)
  80. put_page(pvec->pages[i]);
  81. pagevec_reinit(pvec);
  82. }
  83. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  84. {
  85. struct inode *inode = file_inode(file);
  86. loff_t len, vma_len;
  87. int ret;
  88. struct hstate *h = hstate_file(file);
  89. /*
  90. * vma address alignment (but not the pgoff alignment) has
  91. * already been checked by prepare_hugepage_range. If you add
  92. * any error returns here, do so after setting VM_HUGETLB, so
  93. * is_vm_hugetlb_page tests below unmap_region go the right
  94. * way when do_mmap_pgoff unwinds (may be important on powerpc
  95. * and ia64).
  96. */
  97. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
  98. vma->vm_ops = &hugetlb_vm_ops;
  99. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  100. return -EINVAL;
  101. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  102. mutex_lock(&inode->i_mutex);
  103. file_accessed(file);
  104. ret = -ENOMEM;
  105. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  106. if (hugetlb_reserve_pages(inode,
  107. vma->vm_pgoff >> huge_page_order(h),
  108. len >> huge_page_shift(h), vma,
  109. vma->vm_flags))
  110. goto out;
  111. ret = 0;
  112. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  113. inode->i_size = len;
  114. out:
  115. mutex_unlock(&inode->i_mutex);
  116. return ret;
  117. }
  118. /*
  119. * Called under down_write(mmap_sem).
  120. */
  121. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  122. static unsigned long
  123. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  124. unsigned long len, unsigned long pgoff, unsigned long flags)
  125. {
  126. struct mm_struct *mm = current->mm;
  127. struct vm_area_struct *vma;
  128. struct hstate *h = hstate_file(file);
  129. struct vm_unmapped_area_info info;
  130. if (len & ~huge_page_mask(h))
  131. return -EINVAL;
  132. if (len > TASK_SIZE)
  133. return -ENOMEM;
  134. if (flags & MAP_FIXED) {
  135. if (prepare_hugepage_range(file, addr, len))
  136. return -EINVAL;
  137. return addr;
  138. }
  139. if (addr) {
  140. addr = ALIGN(addr, huge_page_size(h));
  141. vma = find_vma(mm, addr);
  142. if (TASK_SIZE - len >= addr &&
  143. (!vma || addr + len <= vma->vm_start))
  144. return addr;
  145. }
  146. info.flags = 0;
  147. info.length = len;
  148. info.low_limit = TASK_UNMAPPED_BASE;
  149. info.high_limit = TASK_SIZE;
  150. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  151. info.align_offset = 0;
  152. return vm_unmapped_area(&info);
  153. }
  154. #endif
  155. static size_t
  156. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  157. struct iov_iter *to, unsigned long size)
  158. {
  159. size_t copied = 0;
  160. int i, chunksize;
  161. /* Find which 4k chunk and offset with in that chunk */
  162. i = offset >> PAGE_CACHE_SHIFT;
  163. offset = offset & ~PAGE_CACHE_MASK;
  164. while (size) {
  165. size_t n;
  166. chunksize = PAGE_CACHE_SIZE;
  167. if (offset)
  168. chunksize -= offset;
  169. if (chunksize > size)
  170. chunksize = size;
  171. n = copy_page_to_iter(&page[i], offset, chunksize, to);
  172. copied += n;
  173. if (n != chunksize)
  174. return copied;
  175. offset = 0;
  176. size -= chunksize;
  177. i++;
  178. }
  179. return copied;
  180. }
  181. /*
  182. * Support for read() - Find the page attached to f_mapping and copy out the
  183. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  184. * since it has PAGE_CACHE_SIZE assumptions.
  185. */
  186. static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
  187. {
  188. struct file *file = iocb->ki_filp;
  189. struct hstate *h = hstate_file(file);
  190. struct address_space *mapping = file->f_mapping;
  191. struct inode *inode = mapping->host;
  192. unsigned long index = iocb->ki_pos >> huge_page_shift(h);
  193. unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
  194. unsigned long end_index;
  195. loff_t isize;
  196. ssize_t retval = 0;
  197. while (iov_iter_count(to)) {
  198. struct page *page;
  199. size_t nr, copied;
  200. /* nr is the maximum number of bytes to copy from this page */
  201. nr = huge_page_size(h);
  202. isize = i_size_read(inode);
  203. if (!isize)
  204. break;
  205. end_index = (isize - 1) >> huge_page_shift(h);
  206. if (index > end_index)
  207. break;
  208. if (index == end_index) {
  209. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  210. if (nr <= offset)
  211. break;
  212. }
  213. nr = nr - offset;
  214. /* Find the page */
  215. page = find_lock_page(mapping, index);
  216. if (unlikely(page == NULL)) {
  217. /*
  218. * We have a HOLE, zero out the user-buffer for the
  219. * length of the hole or request.
  220. */
  221. copied = iov_iter_zero(nr, to);
  222. } else {
  223. unlock_page(page);
  224. /*
  225. * We have the page, copy it to user space buffer.
  226. */
  227. copied = hugetlbfs_read_actor(page, offset, to, nr);
  228. page_cache_release(page);
  229. }
  230. offset += copied;
  231. retval += copied;
  232. if (copied != nr && iov_iter_count(to)) {
  233. if (!retval)
  234. retval = -EFAULT;
  235. break;
  236. }
  237. index += offset >> huge_page_shift(h);
  238. offset &= ~huge_page_mask(h);
  239. }
  240. iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
  241. return retval;
  242. }
  243. static int hugetlbfs_write_begin(struct file *file,
  244. struct address_space *mapping,
  245. loff_t pos, unsigned len, unsigned flags,
  246. struct page **pagep, void **fsdata)
  247. {
  248. return -EINVAL;
  249. }
  250. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  251. loff_t pos, unsigned len, unsigned copied,
  252. struct page *page, void *fsdata)
  253. {
  254. BUG();
  255. return -EINVAL;
  256. }
  257. static void truncate_huge_page(struct page *page)
  258. {
  259. ClearPageDirty(page);
  260. ClearPageUptodate(page);
  261. delete_from_page_cache(page);
  262. }
  263. static void truncate_hugepages(struct inode *inode, loff_t lstart)
  264. {
  265. struct hstate *h = hstate_inode(inode);
  266. struct address_space *mapping = &inode->i_data;
  267. const pgoff_t start = lstart >> huge_page_shift(h);
  268. struct pagevec pvec;
  269. pgoff_t next;
  270. int i, freed = 0;
  271. pagevec_init(&pvec, 0);
  272. next = start;
  273. while (1) {
  274. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  275. if (next == start)
  276. break;
  277. next = start;
  278. continue;
  279. }
  280. for (i = 0; i < pagevec_count(&pvec); ++i) {
  281. struct page *page = pvec.pages[i];
  282. lock_page(page);
  283. if (page->index > next)
  284. next = page->index;
  285. ++next;
  286. truncate_huge_page(page);
  287. unlock_page(page);
  288. freed++;
  289. }
  290. huge_pagevec_release(&pvec);
  291. }
  292. BUG_ON(!lstart && mapping->nrpages);
  293. hugetlb_unreserve_pages(inode, start, freed);
  294. }
  295. static void hugetlbfs_evict_inode(struct inode *inode)
  296. {
  297. struct resv_map *resv_map;
  298. truncate_hugepages(inode, 0);
  299. resv_map = (struct resv_map *)inode->i_mapping->private_data;
  300. /* root inode doesn't have the resv_map, so we should check it */
  301. if (resv_map)
  302. resv_map_release(&resv_map->refs);
  303. clear_inode(inode);
  304. }
  305. static inline void
  306. hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
  307. {
  308. struct vm_area_struct *vma;
  309. vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
  310. unsigned long v_offset;
  311. /*
  312. * Can the expression below overflow on 32-bit arches?
  313. * No, because the interval tree returns us only those vmas
  314. * which overlap the truncated area starting at pgoff,
  315. * and no vma on a 32-bit arch can span beyond the 4GB.
  316. */
  317. if (vma->vm_pgoff < pgoff)
  318. v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
  319. else
  320. v_offset = 0;
  321. unmap_hugepage_range(vma, vma->vm_start + v_offset,
  322. vma->vm_end, NULL);
  323. }
  324. }
  325. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  326. {
  327. pgoff_t pgoff;
  328. struct address_space *mapping = inode->i_mapping;
  329. struct hstate *h = hstate_inode(inode);
  330. BUG_ON(offset & ~huge_page_mask(h));
  331. pgoff = offset >> PAGE_SHIFT;
  332. i_size_write(inode, offset);
  333. i_mmap_lock_write(mapping);
  334. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  335. hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
  336. i_mmap_unlock_write(mapping);
  337. truncate_hugepages(inode, offset);
  338. return 0;
  339. }
  340. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  341. {
  342. struct inode *inode = d_inode(dentry);
  343. struct hstate *h = hstate_inode(inode);
  344. int error;
  345. unsigned int ia_valid = attr->ia_valid;
  346. BUG_ON(!inode);
  347. error = inode_change_ok(inode, attr);
  348. if (error)
  349. return error;
  350. if (ia_valid & ATTR_SIZE) {
  351. error = -EINVAL;
  352. if (attr->ia_size & ~huge_page_mask(h))
  353. return -EINVAL;
  354. error = hugetlb_vmtruncate(inode, attr->ia_size);
  355. if (error)
  356. return error;
  357. }
  358. setattr_copy(inode, attr);
  359. mark_inode_dirty(inode);
  360. return 0;
  361. }
  362. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  363. struct hugetlbfs_config *config)
  364. {
  365. struct inode *inode;
  366. inode = new_inode(sb);
  367. if (inode) {
  368. struct hugetlbfs_inode_info *info;
  369. inode->i_ino = get_next_ino();
  370. inode->i_mode = S_IFDIR | config->mode;
  371. inode->i_uid = config->uid;
  372. inode->i_gid = config->gid;
  373. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  374. info = HUGETLBFS_I(inode);
  375. mpol_shared_policy_init(&info->policy, NULL);
  376. inode->i_op = &hugetlbfs_dir_inode_operations;
  377. inode->i_fop = &simple_dir_operations;
  378. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  379. inc_nlink(inode);
  380. lockdep_annotate_inode_mutex_key(inode);
  381. }
  382. return inode;
  383. }
  384. /*
  385. * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
  386. * be taken from reclaim -- unlike regular filesystems. This needs an
  387. * annotation because huge_pmd_share() does an allocation under
  388. * i_mmap_rwsem.
  389. */
  390. static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
  391. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  392. struct inode *dir,
  393. umode_t mode, dev_t dev)
  394. {
  395. struct inode *inode;
  396. struct resv_map *resv_map;
  397. resv_map = resv_map_alloc();
  398. if (!resv_map)
  399. return NULL;
  400. inode = new_inode(sb);
  401. if (inode) {
  402. struct hugetlbfs_inode_info *info;
  403. inode->i_ino = get_next_ino();
  404. inode_init_owner(inode, dir, mode);
  405. lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
  406. &hugetlbfs_i_mmap_rwsem_key);
  407. inode->i_mapping->a_ops = &hugetlbfs_aops;
  408. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  409. inode->i_mapping->private_data = resv_map;
  410. info = HUGETLBFS_I(inode);
  411. /*
  412. * The policy is initialized here even if we are creating a
  413. * private inode because initialization simply creates an
  414. * an empty rb tree and calls spin_lock_init(), later when we
  415. * call mpol_free_shared_policy() it will just return because
  416. * the rb tree will still be empty.
  417. */
  418. mpol_shared_policy_init(&info->policy, NULL);
  419. switch (mode & S_IFMT) {
  420. default:
  421. init_special_inode(inode, mode, dev);
  422. break;
  423. case S_IFREG:
  424. inode->i_op = &hugetlbfs_inode_operations;
  425. inode->i_fop = &hugetlbfs_file_operations;
  426. break;
  427. case S_IFDIR:
  428. inode->i_op = &hugetlbfs_dir_inode_operations;
  429. inode->i_fop = &simple_dir_operations;
  430. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  431. inc_nlink(inode);
  432. break;
  433. case S_IFLNK:
  434. inode->i_op = &page_symlink_inode_operations;
  435. break;
  436. }
  437. lockdep_annotate_inode_mutex_key(inode);
  438. } else
  439. kref_put(&resv_map->refs, resv_map_release);
  440. return inode;
  441. }
  442. /*
  443. * File creation. Allocate an inode, and we're done..
  444. */
  445. static int hugetlbfs_mknod(struct inode *dir,
  446. struct dentry *dentry, umode_t mode, dev_t dev)
  447. {
  448. struct inode *inode;
  449. int error = -ENOSPC;
  450. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  451. if (inode) {
  452. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  453. d_instantiate(dentry, inode);
  454. dget(dentry); /* Extra count - pin the dentry in core */
  455. error = 0;
  456. }
  457. return error;
  458. }
  459. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  460. {
  461. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  462. if (!retval)
  463. inc_nlink(dir);
  464. return retval;
  465. }
  466. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  467. {
  468. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  469. }
  470. static int hugetlbfs_symlink(struct inode *dir,
  471. struct dentry *dentry, const char *symname)
  472. {
  473. struct inode *inode;
  474. int error = -ENOSPC;
  475. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  476. if (inode) {
  477. int l = strlen(symname)+1;
  478. error = page_symlink(inode, symname, l);
  479. if (!error) {
  480. d_instantiate(dentry, inode);
  481. dget(dentry);
  482. } else
  483. iput(inode);
  484. }
  485. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  486. return error;
  487. }
  488. /*
  489. * mark the head page dirty
  490. */
  491. static int hugetlbfs_set_page_dirty(struct page *page)
  492. {
  493. struct page *head = compound_head(page);
  494. SetPageDirty(head);
  495. return 0;
  496. }
  497. static int hugetlbfs_migrate_page(struct address_space *mapping,
  498. struct page *newpage, struct page *page,
  499. enum migrate_mode mode)
  500. {
  501. int rc;
  502. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  503. if (rc != MIGRATEPAGE_SUCCESS)
  504. return rc;
  505. migrate_page_copy(newpage, page);
  506. return MIGRATEPAGE_SUCCESS;
  507. }
  508. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  509. {
  510. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  511. struct hstate *h = hstate_inode(d_inode(dentry));
  512. buf->f_type = HUGETLBFS_MAGIC;
  513. buf->f_bsize = huge_page_size(h);
  514. if (sbinfo) {
  515. spin_lock(&sbinfo->stat_lock);
  516. /* If no limits set, just report 0 for max/free/used
  517. * blocks, like simple_statfs() */
  518. if (sbinfo->spool) {
  519. long free_pages;
  520. spin_lock(&sbinfo->spool->lock);
  521. buf->f_blocks = sbinfo->spool->max_hpages;
  522. free_pages = sbinfo->spool->max_hpages
  523. - sbinfo->spool->used_hpages;
  524. buf->f_bavail = buf->f_bfree = free_pages;
  525. spin_unlock(&sbinfo->spool->lock);
  526. buf->f_files = sbinfo->max_inodes;
  527. buf->f_ffree = sbinfo->free_inodes;
  528. }
  529. spin_unlock(&sbinfo->stat_lock);
  530. }
  531. buf->f_namelen = NAME_MAX;
  532. return 0;
  533. }
  534. static void hugetlbfs_put_super(struct super_block *sb)
  535. {
  536. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  537. if (sbi) {
  538. sb->s_fs_info = NULL;
  539. if (sbi->spool)
  540. hugepage_put_subpool(sbi->spool);
  541. kfree(sbi);
  542. }
  543. }
  544. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  545. {
  546. if (sbinfo->free_inodes >= 0) {
  547. spin_lock(&sbinfo->stat_lock);
  548. if (unlikely(!sbinfo->free_inodes)) {
  549. spin_unlock(&sbinfo->stat_lock);
  550. return 0;
  551. }
  552. sbinfo->free_inodes--;
  553. spin_unlock(&sbinfo->stat_lock);
  554. }
  555. return 1;
  556. }
  557. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  558. {
  559. if (sbinfo->free_inodes >= 0) {
  560. spin_lock(&sbinfo->stat_lock);
  561. sbinfo->free_inodes++;
  562. spin_unlock(&sbinfo->stat_lock);
  563. }
  564. }
  565. static struct kmem_cache *hugetlbfs_inode_cachep;
  566. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  567. {
  568. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  569. struct hugetlbfs_inode_info *p;
  570. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  571. return NULL;
  572. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  573. if (unlikely(!p)) {
  574. hugetlbfs_inc_free_inodes(sbinfo);
  575. return NULL;
  576. }
  577. return &p->vfs_inode;
  578. }
  579. static void hugetlbfs_i_callback(struct rcu_head *head)
  580. {
  581. struct inode *inode = container_of(head, struct inode, i_rcu);
  582. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  583. }
  584. static void hugetlbfs_destroy_inode(struct inode *inode)
  585. {
  586. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  587. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  588. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  589. }
  590. static const struct address_space_operations hugetlbfs_aops = {
  591. .write_begin = hugetlbfs_write_begin,
  592. .write_end = hugetlbfs_write_end,
  593. .set_page_dirty = hugetlbfs_set_page_dirty,
  594. .migratepage = hugetlbfs_migrate_page,
  595. };
  596. static void init_once(void *foo)
  597. {
  598. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  599. inode_init_once(&ei->vfs_inode);
  600. }
  601. const struct file_operations hugetlbfs_file_operations = {
  602. .read_iter = hugetlbfs_read_iter,
  603. .mmap = hugetlbfs_file_mmap,
  604. .fsync = noop_fsync,
  605. .get_unmapped_area = hugetlb_get_unmapped_area,
  606. .llseek = default_llseek,
  607. };
  608. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  609. .create = hugetlbfs_create,
  610. .lookup = simple_lookup,
  611. .link = simple_link,
  612. .unlink = simple_unlink,
  613. .symlink = hugetlbfs_symlink,
  614. .mkdir = hugetlbfs_mkdir,
  615. .rmdir = simple_rmdir,
  616. .mknod = hugetlbfs_mknod,
  617. .rename = simple_rename,
  618. .setattr = hugetlbfs_setattr,
  619. };
  620. static const struct inode_operations hugetlbfs_inode_operations = {
  621. .setattr = hugetlbfs_setattr,
  622. };
  623. static const struct super_operations hugetlbfs_ops = {
  624. .alloc_inode = hugetlbfs_alloc_inode,
  625. .destroy_inode = hugetlbfs_destroy_inode,
  626. .evict_inode = hugetlbfs_evict_inode,
  627. .statfs = hugetlbfs_statfs,
  628. .put_super = hugetlbfs_put_super,
  629. .show_options = generic_show_options,
  630. };
  631. enum { NO_SIZE, SIZE_STD, SIZE_PERCENT };
  632. /*
  633. * Convert size option passed from command line to number of huge pages
  634. * in the pool specified by hstate. Size option could be in bytes
  635. * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
  636. */
  637. static long long
  638. hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
  639. int val_type)
  640. {
  641. if (val_type == NO_SIZE)
  642. return -1;
  643. if (val_type == SIZE_PERCENT) {
  644. size_opt <<= huge_page_shift(h);
  645. size_opt *= h->max_huge_pages;
  646. do_div(size_opt, 100);
  647. }
  648. size_opt >>= huge_page_shift(h);
  649. return size_opt;
  650. }
  651. static int
  652. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  653. {
  654. char *p, *rest;
  655. substring_t args[MAX_OPT_ARGS];
  656. int option;
  657. unsigned long long max_size_opt = 0, min_size_opt = 0;
  658. int max_val_type = NO_SIZE, min_val_type = NO_SIZE;
  659. if (!options)
  660. return 0;
  661. while ((p = strsep(&options, ",")) != NULL) {
  662. int token;
  663. if (!*p)
  664. continue;
  665. token = match_token(p, tokens, args);
  666. switch (token) {
  667. case Opt_uid:
  668. if (match_int(&args[0], &option))
  669. goto bad_val;
  670. pconfig->uid = make_kuid(current_user_ns(), option);
  671. if (!uid_valid(pconfig->uid))
  672. goto bad_val;
  673. break;
  674. case Opt_gid:
  675. if (match_int(&args[0], &option))
  676. goto bad_val;
  677. pconfig->gid = make_kgid(current_user_ns(), option);
  678. if (!gid_valid(pconfig->gid))
  679. goto bad_val;
  680. break;
  681. case Opt_mode:
  682. if (match_octal(&args[0], &option))
  683. goto bad_val;
  684. pconfig->mode = option & 01777U;
  685. break;
  686. case Opt_size: {
  687. /* memparse() will accept a K/M/G without a digit */
  688. if (!isdigit(*args[0].from))
  689. goto bad_val;
  690. max_size_opt = memparse(args[0].from, &rest);
  691. max_val_type = SIZE_STD;
  692. if (*rest == '%')
  693. max_val_type = SIZE_PERCENT;
  694. break;
  695. }
  696. case Opt_nr_inodes:
  697. /* memparse() will accept a K/M/G without a digit */
  698. if (!isdigit(*args[0].from))
  699. goto bad_val;
  700. pconfig->nr_inodes = memparse(args[0].from, &rest);
  701. break;
  702. case Opt_pagesize: {
  703. unsigned long ps;
  704. ps = memparse(args[0].from, &rest);
  705. pconfig->hstate = size_to_hstate(ps);
  706. if (!pconfig->hstate) {
  707. pr_err("Unsupported page size %lu MB\n",
  708. ps >> 20);
  709. return -EINVAL;
  710. }
  711. break;
  712. }
  713. case Opt_min_size: {
  714. /* memparse() will accept a K/M/G without a digit */
  715. if (!isdigit(*args[0].from))
  716. goto bad_val;
  717. min_size_opt = memparse(args[0].from, &rest);
  718. min_val_type = SIZE_STD;
  719. if (*rest == '%')
  720. min_val_type = SIZE_PERCENT;
  721. break;
  722. }
  723. default:
  724. pr_err("Bad mount option: \"%s\"\n", p);
  725. return -EINVAL;
  726. break;
  727. }
  728. }
  729. /*
  730. * Use huge page pool size (in hstate) to convert the size
  731. * options to number of huge pages. If NO_SIZE, -1 is returned.
  732. */
  733. pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  734. max_size_opt, max_val_type);
  735. pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
  736. min_size_opt, min_val_type);
  737. /*
  738. * If max_size was specified, then min_size must be smaller
  739. */
  740. if (max_val_type > NO_SIZE &&
  741. pconfig->min_hpages > pconfig->max_hpages) {
  742. pr_err("minimum size can not be greater than maximum size\n");
  743. return -EINVAL;
  744. }
  745. return 0;
  746. bad_val:
  747. pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
  748. return -EINVAL;
  749. }
  750. static int
  751. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  752. {
  753. int ret;
  754. struct hugetlbfs_config config;
  755. struct hugetlbfs_sb_info *sbinfo;
  756. save_mount_options(sb, data);
  757. config.max_hpages = -1; /* No limit on size by default */
  758. config.nr_inodes = -1; /* No limit on number of inodes by default */
  759. config.uid = current_fsuid();
  760. config.gid = current_fsgid();
  761. config.mode = 0755;
  762. config.hstate = &default_hstate;
  763. config.min_hpages = -1; /* No default minimum size */
  764. ret = hugetlbfs_parse_options(data, &config);
  765. if (ret)
  766. return ret;
  767. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  768. if (!sbinfo)
  769. return -ENOMEM;
  770. sb->s_fs_info = sbinfo;
  771. sbinfo->hstate = config.hstate;
  772. spin_lock_init(&sbinfo->stat_lock);
  773. sbinfo->max_inodes = config.nr_inodes;
  774. sbinfo->free_inodes = config.nr_inodes;
  775. sbinfo->spool = NULL;
  776. /*
  777. * Allocate and initialize subpool if maximum or minimum size is
  778. * specified. Any needed reservations (for minimim size) are taken
  779. * taken when the subpool is created.
  780. */
  781. if (config.max_hpages != -1 || config.min_hpages != -1) {
  782. sbinfo->spool = hugepage_new_subpool(config.hstate,
  783. config.max_hpages,
  784. config.min_hpages);
  785. if (!sbinfo->spool)
  786. goto out_free;
  787. }
  788. sb->s_maxbytes = MAX_LFS_FILESIZE;
  789. sb->s_blocksize = huge_page_size(config.hstate);
  790. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  791. sb->s_magic = HUGETLBFS_MAGIC;
  792. sb->s_op = &hugetlbfs_ops;
  793. sb->s_time_gran = 1;
  794. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  795. if (!sb->s_root)
  796. goto out_free;
  797. return 0;
  798. out_free:
  799. kfree(sbinfo->spool);
  800. kfree(sbinfo);
  801. return -ENOMEM;
  802. }
  803. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  804. int flags, const char *dev_name, void *data)
  805. {
  806. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  807. }
  808. static struct file_system_type hugetlbfs_fs_type = {
  809. .name = "hugetlbfs",
  810. .mount = hugetlbfs_mount,
  811. .kill_sb = kill_litter_super,
  812. };
  813. MODULE_ALIAS_FS("hugetlbfs");
  814. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  815. static int can_do_hugetlb_shm(void)
  816. {
  817. kgid_t shm_group;
  818. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  819. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  820. }
  821. static int get_hstate_idx(int page_size_log)
  822. {
  823. struct hstate *h = hstate_sizelog(page_size_log);
  824. if (!h)
  825. return -1;
  826. return h - hstates;
  827. }
  828. static const struct dentry_operations anon_ops = {
  829. .d_dname = simple_dname
  830. };
  831. /*
  832. * Note that size should be aligned to proper hugepage size in caller side,
  833. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  834. */
  835. struct file *hugetlb_file_setup(const char *name, size_t size,
  836. vm_flags_t acctflag, struct user_struct **user,
  837. int creat_flags, int page_size_log)
  838. {
  839. struct file *file = ERR_PTR(-ENOMEM);
  840. struct inode *inode;
  841. struct path path;
  842. struct super_block *sb;
  843. struct qstr quick_string;
  844. int hstate_idx;
  845. hstate_idx = get_hstate_idx(page_size_log);
  846. if (hstate_idx < 0)
  847. return ERR_PTR(-ENODEV);
  848. *user = NULL;
  849. if (!hugetlbfs_vfsmount[hstate_idx])
  850. return ERR_PTR(-ENOENT);
  851. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  852. *user = current_user();
  853. if (user_shm_lock(size, *user)) {
  854. task_lock(current);
  855. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  856. current->comm, current->pid);
  857. task_unlock(current);
  858. } else {
  859. *user = NULL;
  860. return ERR_PTR(-EPERM);
  861. }
  862. }
  863. sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
  864. quick_string.name = name;
  865. quick_string.len = strlen(quick_string.name);
  866. quick_string.hash = 0;
  867. path.dentry = d_alloc_pseudo(sb, &quick_string);
  868. if (!path.dentry)
  869. goto out_shm_unlock;
  870. d_set_d_op(path.dentry, &anon_ops);
  871. path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
  872. file = ERR_PTR(-ENOSPC);
  873. inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
  874. if (!inode)
  875. goto out_dentry;
  876. if (creat_flags == HUGETLB_SHMFS_INODE)
  877. inode->i_flags |= S_PRIVATE;
  878. file = ERR_PTR(-ENOMEM);
  879. if (hugetlb_reserve_pages(inode, 0,
  880. size >> huge_page_shift(hstate_inode(inode)), NULL,
  881. acctflag))
  882. goto out_inode;
  883. d_instantiate(path.dentry, inode);
  884. inode->i_size = size;
  885. clear_nlink(inode);
  886. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  887. &hugetlbfs_file_operations);
  888. if (IS_ERR(file))
  889. goto out_dentry; /* inode is already attached */
  890. return file;
  891. out_inode:
  892. iput(inode);
  893. out_dentry:
  894. path_put(&path);
  895. out_shm_unlock:
  896. if (*user) {
  897. user_shm_unlock(size, *user);
  898. *user = NULL;
  899. }
  900. return file;
  901. }
  902. static int __init init_hugetlbfs_fs(void)
  903. {
  904. struct hstate *h;
  905. int error;
  906. int i;
  907. if (!hugepages_supported()) {
  908. pr_info("disabling because there are no supported hugepage sizes\n");
  909. return -ENOTSUPP;
  910. }
  911. error = -ENOMEM;
  912. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  913. sizeof(struct hugetlbfs_inode_info),
  914. 0, 0, init_once);
  915. if (hugetlbfs_inode_cachep == NULL)
  916. goto out2;
  917. error = register_filesystem(&hugetlbfs_fs_type);
  918. if (error)
  919. goto out;
  920. i = 0;
  921. for_each_hstate(h) {
  922. char buf[50];
  923. unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
  924. snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
  925. hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
  926. buf);
  927. if (IS_ERR(hugetlbfs_vfsmount[i])) {
  928. pr_err("Cannot mount internal hugetlbfs for "
  929. "page size %uK", ps_kb);
  930. error = PTR_ERR(hugetlbfs_vfsmount[i]);
  931. hugetlbfs_vfsmount[i] = NULL;
  932. }
  933. i++;
  934. }
  935. /* Non default hstates are optional */
  936. if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
  937. return 0;
  938. out:
  939. kmem_cache_destroy(hugetlbfs_inode_cachep);
  940. out2:
  941. return error;
  942. }
  943. static void __exit exit_hugetlbfs_fs(void)
  944. {
  945. struct hstate *h;
  946. int i;
  947. /*
  948. * Make sure all delayed rcu free inodes are flushed before we
  949. * destroy cache.
  950. */
  951. rcu_barrier();
  952. kmem_cache_destroy(hugetlbfs_inode_cachep);
  953. i = 0;
  954. for_each_hstate(h)
  955. kern_unmount(hugetlbfs_vfsmount[i++]);
  956. unregister_filesystem(&hugetlbfs_fs_type);
  957. }
  958. module_init(init_hugetlbfs_fs)
  959. module_exit(exit_hugetlbfs_fs)
  960. MODULE_LICENSE("GPL");