inode.c 26 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. /*
  2. * hugetlbpage-backed filesystem. Based on ramfs.
  3. *
  4. * Nadia Yvette Chambers, 2002
  5. *
  6. * Copyright (C) 2002 Linus Torvalds.
  7. */
  8. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  9. #include <linux/module.h>
  10. #include <linux/thread_info.h>
  11. #include <asm/current.h>
  12. #include <linux/sched.h> /* remove ASAP */
  13. #include <linux/fs.h>
  14. #include <linux/mount.h>
  15. #include <linux/file.h>
  16. #include <linux/kernel.h>
  17. #include <linux/writeback.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/highmem.h>
  20. #include <linux/init.h>
  21. #include <linux/string.h>
  22. #include <linux/capability.h>
  23. #include <linux/ctype.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/hugetlb.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/parser.h>
  28. #include <linux/mman.h>
  29. #include <linux/slab.h>
  30. #include <linux/dnotify.h>
  31. #include <linux/statfs.h>
  32. #include <linux/security.h>
  33. #include <linux/magic.h>
  34. #include <linux/migrate.h>
  35. #include <asm/uaccess.h>
  36. static const struct super_operations hugetlbfs_ops;
  37. static const struct address_space_operations hugetlbfs_aops;
  38. const struct file_operations hugetlbfs_file_operations;
  39. static const struct inode_operations hugetlbfs_dir_inode_operations;
  40. static const struct inode_operations hugetlbfs_inode_operations;
  41. struct hugetlbfs_config {
  42. kuid_t uid;
  43. kgid_t gid;
  44. umode_t mode;
  45. long nr_blocks;
  46. long nr_inodes;
  47. struct hstate *hstate;
  48. };
  49. struct hugetlbfs_inode_info {
  50. struct shared_policy policy;
  51. struct inode vfs_inode;
  52. };
  53. static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
  54. {
  55. return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
  56. }
  57. static struct backing_dev_info hugetlbfs_backing_dev_info = {
  58. .name = "hugetlbfs",
  59. .ra_pages = 0, /* No readahead */
  60. .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
  61. };
  62. int sysctl_hugetlb_shm_group;
  63. enum {
  64. Opt_size, Opt_nr_inodes,
  65. Opt_mode, Opt_uid, Opt_gid,
  66. Opt_pagesize,
  67. Opt_err,
  68. };
  69. static const match_table_t tokens = {
  70. {Opt_size, "size=%s"},
  71. {Opt_nr_inodes, "nr_inodes=%s"},
  72. {Opt_mode, "mode=%o"},
  73. {Opt_uid, "uid=%u"},
  74. {Opt_gid, "gid=%u"},
  75. {Opt_pagesize, "pagesize=%s"},
  76. {Opt_err, NULL},
  77. };
  78. static void huge_pagevec_release(struct pagevec *pvec)
  79. {
  80. int i;
  81. for (i = 0; i < pagevec_count(pvec); ++i)
  82. put_page(pvec->pages[i]);
  83. pagevec_reinit(pvec);
  84. }
  85. static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
  86. {
  87. struct inode *inode = file_inode(file);
  88. loff_t len, vma_len;
  89. int ret;
  90. struct hstate *h = hstate_file(file);
  91. /*
  92. * vma address alignment (but not the pgoff alignment) has
  93. * already been checked by prepare_hugepage_range. If you add
  94. * any error returns here, do so after setting VM_HUGETLB, so
  95. * is_vm_hugetlb_page tests below unmap_region go the right
  96. * way when do_mmap_pgoff unwinds (may be important on powerpc
  97. * and ia64).
  98. */
  99. vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
  100. vma->vm_ops = &hugetlb_vm_ops;
  101. if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
  102. return -EINVAL;
  103. vma_len = (loff_t)(vma->vm_end - vma->vm_start);
  104. mutex_lock(&inode->i_mutex);
  105. file_accessed(file);
  106. ret = -ENOMEM;
  107. len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  108. if (hugetlb_reserve_pages(inode,
  109. vma->vm_pgoff >> huge_page_order(h),
  110. len >> huge_page_shift(h), vma,
  111. vma->vm_flags))
  112. goto out;
  113. ret = 0;
  114. hugetlb_prefault_arch_hook(vma->vm_mm);
  115. if (vma->vm_flags & VM_WRITE && inode->i_size < len)
  116. inode->i_size = len;
  117. out:
  118. mutex_unlock(&inode->i_mutex);
  119. return ret;
  120. }
  121. /*
  122. * Called under down_write(mmap_sem).
  123. */
  124. #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  125. static unsigned long
  126. hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
  127. unsigned long len, unsigned long pgoff, unsigned long flags)
  128. {
  129. struct mm_struct *mm = current->mm;
  130. struct vm_area_struct *vma;
  131. struct hstate *h = hstate_file(file);
  132. struct vm_unmapped_area_info info;
  133. if (len & ~huge_page_mask(h))
  134. return -EINVAL;
  135. if (len > TASK_SIZE)
  136. return -ENOMEM;
  137. if (flags & MAP_FIXED) {
  138. if (prepare_hugepage_range(file, addr, len))
  139. return -EINVAL;
  140. return addr;
  141. }
  142. if (addr) {
  143. addr = ALIGN(addr, huge_page_size(h));
  144. vma = find_vma(mm, addr);
  145. if (TASK_SIZE - len >= addr &&
  146. (!vma || addr + len <= vma->vm_start))
  147. return addr;
  148. }
  149. info.flags = 0;
  150. info.length = len;
  151. info.low_limit = TASK_UNMAPPED_BASE;
  152. info.high_limit = TASK_SIZE;
  153. info.align_mask = PAGE_MASK & ~huge_page_mask(h);
  154. info.align_offset = 0;
  155. return vm_unmapped_area(&info);
  156. }
  157. #endif
  158. static int
  159. hugetlbfs_read_actor(struct page *page, unsigned long offset,
  160. char __user *buf, unsigned long count,
  161. unsigned long size)
  162. {
  163. char *kaddr;
  164. unsigned long left, copied = 0;
  165. int i, chunksize;
  166. if (size > count)
  167. size = count;
  168. /* Find which 4k chunk and offset with in that chunk */
  169. i = offset >> PAGE_CACHE_SHIFT;
  170. offset = offset & ~PAGE_CACHE_MASK;
  171. while (size) {
  172. chunksize = PAGE_CACHE_SIZE;
  173. if (offset)
  174. chunksize -= offset;
  175. if (chunksize > size)
  176. chunksize = size;
  177. kaddr = kmap(&page[i]);
  178. left = __copy_to_user(buf, kaddr + offset, chunksize);
  179. kunmap(&page[i]);
  180. if (left) {
  181. copied += (chunksize - left);
  182. break;
  183. }
  184. offset = 0;
  185. size -= chunksize;
  186. buf += chunksize;
  187. copied += chunksize;
  188. i++;
  189. }
  190. return copied ? copied : -EFAULT;
  191. }
  192. /*
  193. * Support for read() - Find the page attached to f_mapping and copy out the
  194. * data. Its *very* similar to do_generic_mapping_read(), we can't use that
  195. * since it has PAGE_CACHE_SIZE assumptions.
  196. */
  197. static ssize_t hugetlbfs_read(struct file *filp, char __user *buf,
  198. size_t len, loff_t *ppos)
  199. {
  200. struct hstate *h = hstate_file(filp);
  201. struct address_space *mapping = filp->f_mapping;
  202. struct inode *inode = mapping->host;
  203. unsigned long index = *ppos >> huge_page_shift(h);
  204. unsigned long offset = *ppos & ~huge_page_mask(h);
  205. unsigned long end_index;
  206. loff_t isize;
  207. ssize_t retval = 0;
  208. /* validate length */
  209. if (len == 0)
  210. goto out;
  211. for (;;) {
  212. struct page *page;
  213. unsigned long nr, ret;
  214. int ra;
  215. /* nr is the maximum number of bytes to copy from this page */
  216. nr = huge_page_size(h);
  217. isize = i_size_read(inode);
  218. if (!isize)
  219. goto out;
  220. end_index = (isize - 1) >> huge_page_shift(h);
  221. if (index >= end_index) {
  222. if (index > end_index)
  223. goto out;
  224. nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
  225. if (nr <= offset)
  226. goto out;
  227. }
  228. nr = nr - offset;
  229. /* Find the page */
  230. page = find_lock_page(mapping, index);
  231. if (unlikely(page == NULL)) {
  232. /*
  233. * We have a HOLE, zero out the user-buffer for the
  234. * length of the hole or request.
  235. */
  236. ret = len < nr ? len : nr;
  237. if (clear_user(buf, ret))
  238. ra = -EFAULT;
  239. else
  240. ra = 0;
  241. } else {
  242. unlock_page(page);
  243. /*
  244. * We have the page, copy it to user space buffer.
  245. */
  246. ra = hugetlbfs_read_actor(page, offset, buf, len, nr);
  247. ret = ra;
  248. page_cache_release(page);
  249. }
  250. if (ra < 0) {
  251. if (retval == 0)
  252. retval = ra;
  253. goto out;
  254. }
  255. offset += ret;
  256. retval += ret;
  257. len -= ret;
  258. index += offset >> huge_page_shift(h);
  259. offset &= ~huge_page_mask(h);
  260. /* short read or no more work */
  261. if ((ret != nr) || (len == 0))
  262. break;
  263. }
  264. out:
  265. *ppos = ((loff_t)index << huge_page_shift(h)) + offset;
  266. return retval;
  267. }
  268. static int hugetlbfs_write_begin(struct file *file,
  269. struct address_space *mapping,
  270. loff_t pos, unsigned len, unsigned flags,
  271. struct page **pagep, void **fsdata)
  272. {
  273. return -EINVAL;
  274. }
  275. static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
  276. loff_t pos, unsigned len, unsigned copied,
  277. struct page *page, void *fsdata)
  278. {
  279. BUG();
  280. return -EINVAL;
  281. }
  282. static void truncate_huge_page(struct page *page)
  283. {
  284. cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
  285. ClearPageUptodate(page);
  286. delete_from_page_cache(page);
  287. }
  288. static void truncate_hugepages(struct inode *inode, loff_t lstart)
  289. {
  290. struct hstate *h = hstate_inode(inode);
  291. struct address_space *mapping = &inode->i_data;
  292. const pgoff_t start = lstart >> huge_page_shift(h);
  293. struct pagevec pvec;
  294. pgoff_t next;
  295. int i, freed = 0;
  296. pagevec_init(&pvec, 0);
  297. next = start;
  298. while (1) {
  299. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  300. if (next == start)
  301. break;
  302. next = start;
  303. continue;
  304. }
  305. for (i = 0; i < pagevec_count(&pvec); ++i) {
  306. struct page *page = pvec.pages[i];
  307. lock_page(page);
  308. if (page->index > next)
  309. next = page->index;
  310. ++next;
  311. truncate_huge_page(page);
  312. unlock_page(page);
  313. freed++;
  314. }
  315. huge_pagevec_release(&pvec);
  316. }
  317. BUG_ON(!lstart && mapping->nrpages);
  318. hugetlb_unreserve_pages(inode, start, freed);
  319. }
  320. static void hugetlbfs_evict_inode(struct inode *inode)
  321. {
  322. struct resv_map *resv_map;
  323. truncate_hugepages(inode, 0);
  324. resv_map = (struct resv_map *)inode->i_mapping->private_data;
  325. /* root inode doesn't have the resv_map, so we should check it */
  326. if (resv_map)
  327. resv_map_release(&resv_map->refs);
  328. clear_inode(inode);
  329. }
  330. static inline void
  331. hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
  332. {
  333. struct vm_area_struct *vma;
  334. vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
  335. unsigned long v_offset;
  336. /*
  337. * Can the expression below overflow on 32-bit arches?
  338. * No, because the interval tree returns us only those vmas
  339. * which overlap the truncated area starting at pgoff,
  340. * and no vma on a 32-bit arch can span beyond the 4GB.
  341. */
  342. if (vma->vm_pgoff < pgoff)
  343. v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
  344. else
  345. v_offset = 0;
  346. unmap_hugepage_range(vma, vma->vm_start + v_offset,
  347. vma->vm_end, NULL);
  348. }
  349. }
  350. static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
  351. {
  352. pgoff_t pgoff;
  353. struct address_space *mapping = inode->i_mapping;
  354. struct hstate *h = hstate_inode(inode);
  355. BUG_ON(offset & ~huge_page_mask(h));
  356. pgoff = offset >> PAGE_SHIFT;
  357. i_size_write(inode, offset);
  358. mutex_lock(&mapping->i_mmap_mutex);
  359. if (!RB_EMPTY_ROOT(&mapping->i_mmap))
  360. hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
  361. mutex_unlock(&mapping->i_mmap_mutex);
  362. truncate_hugepages(inode, offset);
  363. return 0;
  364. }
  365. static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
  366. {
  367. struct inode *inode = dentry->d_inode;
  368. struct hstate *h = hstate_inode(inode);
  369. int error;
  370. unsigned int ia_valid = attr->ia_valid;
  371. BUG_ON(!inode);
  372. error = inode_change_ok(inode, attr);
  373. if (error)
  374. return error;
  375. if (ia_valid & ATTR_SIZE) {
  376. error = -EINVAL;
  377. if (attr->ia_size & ~huge_page_mask(h))
  378. return -EINVAL;
  379. error = hugetlb_vmtruncate(inode, attr->ia_size);
  380. if (error)
  381. return error;
  382. }
  383. setattr_copy(inode, attr);
  384. mark_inode_dirty(inode);
  385. return 0;
  386. }
  387. static struct inode *hugetlbfs_get_root(struct super_block *sb,
  388. struct hugetlbfs_config *config)
  389. {
  390. struct inode *inode;
  391. inode = new_inode(sb);
  392. if (inode) {
  393. struct hugetlbfs_inode_info *info;
  394. inode->i_ino = get_next_ino();
  395. inode->i_mode = S_IFDIR | config->mode;
  396. inode->i_uid = config->uid;
  397. inode->i_gid = config->gid;
  398. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  399. info = HUGETLBFS_I(inode);
  400. mpol_shared_policy_init(&info->policy, NULL);
  401. inode->i_op = &hugetlbfs_dir_inode_operations;
  402. inode->i_fop = &simple_dir_operations;
  403. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  404. inc_nlink(inode);
  405. lockdep_annotate_inode_mutex_key(inode);
  406. }
  407. return inode;
  408. }
  409. /*
  410. * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never
  411. * be taken from reclaim -- unlike regular filesystems. This needs an
  412. * annotation because huge_pmd_share() does an allocation under
  413. * i_mmap_mutex.
  414. */
  415. static struct lock_class_key hugetlbfs_i_mmap_mutex_key;
  416. static struct inode *hugetlbfs_get_inode(struct super_block *sb,
  417. struct inode *dir,
  418. umode_t mode, dev_t dev)
  419. {
  420. struct inode *inode;
  421. struct resv_map *resv_map;
  422. resv_map = resv_map_alloc();
  423. if (!resv_map)
  424. return NULL;
  425. inode = new_inode(sb);
  426. if (inode) {
  427. struct hugetlbfs_inode_info *info;
  428. inode->i_ino = get_next_ino();
  429. inode_init_owner(inode, dir, mode);
  430. lockdep_set_class(&inode->i_mapping->i_mmap_mutex,
  431. &hugetlbfs_i_mmap_mutex_key);
  432. inode->i_mapping->a_ops = &hugetlbfs_aops;
  433. inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info;
  434. inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  435. inode->i_mapping->private_data = resv_map;
  436. info = HUGETLBFS_I(inode);
  437. /*
  438. * The policy is initialized here even if we are creating a
  439. * private inode because initialization simply creates an
  440. * an empty rb tree and calls spin_lock_init(), later when we
  441. * call mpol_free_shared_policy() it will just return because
  442. * the rb tree will still be empty.
  443. */
  444. mpol_shared_policy_init(&info->policy, NULL);
  445. switch (mode & S_IFMT) {
  446. default:
  447. init_special_inode(inode, mode, dev);
  448. break;
  449. case S_IFREG:
  450. inode->i_op = &hugetlbfs_inode_operations;
  451. inode->i_fop = &hugetlbfs_file_operations;
  452. break;
  453. case S_IFDIR:
  454. inode->i_op = &hugetlbfs_dir_inode_operations;
  455. inode->i_fop = &simple_dir_operations;
  456. /* directory inodes start off with i_nlink == 2 (for "." entry) */
  457. inc_nlink(inode);
  458. break;
  459. case S_IFLNK:
  460. inode->i_op = &page_symlink_inode_operations;
  461. break;
  462. }
  463. lockdep_annotate_inode_mutex_key(inode);
  464. } else
  465. kref_put(&resv_map->refs, resv_map_release);
  466. return inode;
  467. }
  468. /*
  469. * File creation. Allocate an inode, and we're done..
  470. */
  471. static int hugetlbfs_mknod(struct inode *dir,
  472. struct dentry *dentry, umode_t mode, dev_t dev)
  473. {
  474. struct inode *inode;
  475. int error = -ENOSPC;
  476. inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
  477. if (inode) {
  478. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  479. d_instantiate(dentry, inode);
  480. dget(dentry); /* Extra count - pin the dentry in core */
  481. error = 0;
  482. }
  483. return error;
  484. }
  485. static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
  486. {
  487. int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
  488. if (!retval)
  489. inc_nlink(dir);
  490. return retval;
  491. }
  492. static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
  493. {
  494. return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
  495. }
  496. static int hugetlbfs_symlink(struct inode *dir,
  497. struct dentry *dentry, const char *symname)
  498. {
  499. struct inode *inode;
  500. int error = -ENOSPC;
  501. inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
  502. if (inode) {
  503. int l = strlen(symname)+1;
  504. error = page_symlink(inode, symname, l);
  505. if (!error) {
  506. d_instantiate(dentry, inode);
  507. dget(dentry);
  508. } else
  509. iput(inode);
  510. }
  511. dir->i_ctime = dir->i_mtime = CURRENT_TIME;
  512. return error;
  513. }
  514. /*
  515. * mark the head page dirty
  516. */
  517. static int hugetlbfs_set_page_dirty(struct page *page)
  518. {
  519. struct page *head = compound_head(page);
  520. SetPageDirty(head);
  521. return 0;
  522. }
  523. static int hugetlbfs_migrate_page(struct address_space *mapping,
  524. struct page *newpage, struct page *page,
  525. enum migrate_mode mode)
  526. {
  527. int rc;
  528. rc = migrate_huge_page_move_mapping(mapping, newpage, page);
  529. if (rc != MIGRATEPAGE_SUCCESS)
  530. return rc;
  531. migrate_page_copy(newpage, page);
  532. return MIGRATEPAGE_SUCCESS;
  533. }
  534. static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  535. {
  536. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
  537. struct hstate *h = hstate_inode(dentry->d_inode);
  538. buf->f_type = HUGETLBFS_MAGIC;
  539. buf->f_bsize = huge_page_size(h);
  540. if (sbinfo) {
  541. spin_lock(&sbinfo->stat_lock);
  542. /* If no limits set, just report 0 for max/free/used
  543. * blocks, like simple_statfs() */
  544. if (sbinfo->spool) {
  545. long free_pages;
  546. spin_lock(&sbinfo->spool->lock);
  547. buf->f_blocks = sbinfo->spool->max_hpages;
  548. free_pages = sbinfo->spool->max_hpages
  549. - sbinfo->spool->used_hpages;
  550. buf->f_bavail = buf->f_bfree = free_pages;
  551. spin_unlock(&sbinfo->spool->lock);
  552. buf->f_files = sbinfo->max_inodes;
  553. buf->f_ffree = sbinfo->free_inodes;
  554. }
  555. spin_unlock(&sbinfo->stat_lock);
  556. }
  557. buf->f_namelen = NAME_MAX;
  558. return 0;
  559. }
  560. static void hugetlbfs_put_super(struct super_block *sb)
  561. {
  562. struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
  563. if (sbi) {
  564. sb->s_fs_info = NULL;
  565. if (sbi->spool)
  566. hugepage_put_subpool(sbi->spool);
  567. kfree(sbi);
  568. }
  569. }
  570. static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  571. {
  572. if (sbinfo->free_inodes >= 0) {
  573. spin_lock(&sbinfo->stat_lock);
  574. if (unlikely(!sbinfo->free_inodes)) {
  575. spin_unlock(&sbinfo->stat_lock);
  576. return 0;
  577. }
  578. sbinfo->free_inodes--;
  579. spin_unlock(&sbinfo->stat_lock);
  580. }
  581. return 1;
  582. }
  583. static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
  584. {
  585. if (sbinfo->free_inodes >= 0) {
  586. spin_lock(&sbinfo->stat_lock);
  587. sbinfo->free_inodes++;
  588. spin_unlock(&sbinfo->stat_lock);
  589. }
  590. }
  591. static struct kmem_cache *hugetlbfs_inode_cachep;
  592. static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
  593. {
  594. struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
  595. struct hugetlbfs_inode_info *p;
  596. if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
  597. return NULL;
  598. p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
  599. if (unlikely(!p)) {
  600. hugetlbfs_inc_free_inodes(sbinfo);
  601. return NULL;
  602. }
  603. return &p->vfs_inode;
  604. }
  605. static void hugetlbfs_i_callback(struct rcu_head *head)
  606. {
  607. struct inode *inode = container_of(head, struct inode, i_rcu);
  608. kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
  609. }
  610. static void hugetlbfs_destroy_inode(struct inode *inode)
  611. {
  612. hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
  613. mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
  614. call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
  615. }
  616. static const struct address_space_operations hugetlbfs_aops = {
  617. .write_begin = hugetlbfs_write_begin,
  618. .write_end = hugetlbfs_write_end,
  619. .set_page_dirty = hugetlbfs_set_page_dirty,
  620. .migratepage = hugetlbfs_migrate_page,
  621. };
  622. static void init_once(void *foo)
  623. {
  624. struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
  625. inode_init_once(&ei->vfs_inode);
  626. }
  627. const struct file_operations hugetlbfs_file_operations = {
  628. .read = hugetlbfs_read,
  629. .mmap = hugetlbfs_file_mmap,
  630. .fsync = noop_fsync,
  631. .get_unmapped_area = hugetlb_get_unmapped_area,
  632. .llseek = default_llseek,
  633. };
  634. static const struct inode_operations hugetlbfs_dir_inode_operations = {
  635. .create = hugetlbfs_create,
  636. .lookup = simple_lookup,
  637. .link = simple_link,
  638. .unlink = simple_unlink,
  639. .symlink = hugetlbfs_symlink,
  640. .mkdir = hugetlbfs_mkdir,
  641. .rmdir = simple_rmdir,
  642. .mknod = hugetlbfs_mknod,
  643. .rename = simple_rename,
  644. .setattr = hugetlbfs_setattr,
  645. };
  646. static const struct inode_operations hugetlbfs_inode_operations = {
  647. .setattr = hugetlbfs_setattr,
  648. };
  649. static const struct super_operations hugetlbfs_ops = {
  650. .alloc_inode = hugetlbfs_alloc_inode,
  651. .destroy_inode = hugetlbfs_destroy_inode,
  652. .evict_inode = hugetlbfs_evict_inode,
  653. .statfs = hugetlbfs_statfs,
  654. .put_super = hugetlbfs_put_super,
  655. .show_options = generic_show_options,
  656. };
  657. static int
  658. hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
  659. {
  660. char *p, *rest;
  661. substring_t args[MAX_OPT_ARGS];
  662. int option;
  663. unsigned long long size = 0;
  664. enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
  665. if (!options)
  666. return 0;
  667. while ((p = strsep(&options, ",")) != NULL) {
  668. int token;
  669. if (!*p)
  670. continue;
  671. token = match_token(p, tokens, args);
  672. switch (token) {
  673. case Opt_uid:
  674. if (match_int(&args[0], &option))
  675. goto bad_val;
  676. pconfig->uid = make_kuid(current_user_ns(), option);
  677. if (!uid_valid(pconfig->uid))
  678. goto bad_val;
  679. break;
  680. case Opt_gid:
  681. if (match_int(&args[0], &option))
  682. goto bad_val;
  683. pconfig->gid = make_kgid(current_user_ns(), option);
  684. if (!gid_valid(pconfig->gid))
  685. goto bad_val;
  686. break;
  687. case Opt_mode:
  688. if (match_octal(&args[0], &option))
  689. goto bad_val;
  690. pconfig->mode = option & 01777U;
  691. break;
  692. case Opt_size: {
  693. /* memparse() will accept a K/M/G without a digit */
  694. if (!isdigit(*args[0].from))
  695. goto bad_val;
  696. size = memparse(args[0].from, &rest);
  697. setsize = SIZE_STD;
  698. if (*rest == '%')
  699. setsize = SIZE_PERCENT;
  700. break;
  701. }
  702. case Opt_nr_inodes:
  703. /* memparse() will accept a K/M/G without a digit */
  704. if (!isdigit(*args[0].from))
  705. goto bad_val;
  706. pconfig->nr_inodes = memparse(args[0].from, &rest);
  707. break;
  708. case Opt_pagesize: {
  709. unsigned long ps;
  710. ps = memparse(args[0].from, &rest);
  711. pconfig->hstate = size_to_hstate(ps);
  712. if (!pconfig->hstate) {
  713. pr_err("Unsupported page size %lu MB\n",
  714. ps >> 20);
  715. return -EINVAL;
  716. }
  717. break;
  718. }
  719. default:
  720. pr_err("Bad mount option: \"%s\"\n", p);
  721. return -EINVAL;
  722. break;
  723. }
  724. }
  725. /* Do size after hstate is set up */
  726. if (setsize > NO_SIZE) {
  727. struct hstate *h = pconfig->hstate;
  728. if (setsize == SIZE_PERCENT) {
  729. size <<= huge_page_shift(h);
  730. size *= h->max_huge_pages;
  731. do_div(size, 100);
  732. }
  733. pconfig->nr_blocks = (size >> huge_page_shift(h));
  734. }
  735. return 0;
  736. bad_val:
  737. pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
  738. return -EINVAL;
  739. }
  740. static int
  741. hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
  742. {
  743. int ret;
  744. struct hugetlbfs_config config;
  745. struct hugetlbfs_sb_info *sbinfo;
  746. save_mount_options(sb, data);
  747. config.nr_blocks = -1; /* No limit on size by default */
  748. config.nr_inodes = -1; /* No limit on number of inodes by default */
  749. config.uid = current_fsuid();
  750. config.gid = current_fsgid();
  751. config.mode = 0755;
  752. config.hstate = &default_hstate;
  753. ret = hugetlbfs_parse_options(data, &config);
  754. if (ret)
  755. return ret;
  756. sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
  757. if (!sbinfo)
  758. return -ENOMEM;
  759. sb->s_fs_info = sbinfo;
  760. sbinfo->hstate = config.hstate;
  761. spin_lock_init(&sbinfo->stat_lock);
  762. sbinfo->max_inodes = config.nr_inodes;
  763. sbinfo->free_inodes = config.nr_inodes;
  764. sbinfo->spool = NULL;
  765. if (config.nr_blocks != -1) {
  766. sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
  767. if (!sbinfo->spool)
  768. goto out_free;
  769. }
  770. sb->s_maxbytes = MAX_LFS_FILESIZE;
  771. sb->s_blocksize = huge_page_size(config.hstate);
  772. sb->s_blocksize_bits = huge_page_shift(config.hstate);
  773. sb->s_magic = HUGETLBFS_MAGIC;
  774. sb->s_op = &hugetlbfs_ops;
  775. sb->s_time_gran = 1;
  776. sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
  777. if (!sb->s_root)
  778. goto out_free;
  779. return 0;
  780. out_free:
  781. kfree(sbinfo->spool);
  782. kfree(sbinfo);
  783. return -ENOMEM;
  784. }
  785. static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
  786. int flags, const char *dev_name, void *data)
  787. {
  788. return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
  789. }
  790. static struct file_system_type hugetlbfs_fs_type = {
  791. .name = "hugetlbfs",
  792. .mount = hugetlbfs_mount,
  793. .kill_sb = kill_litter_super,
  794. };
  795. MODULE_ALIAS_FS("hugetlbfs");
  796. static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
  797. static int can_do_hugetlb_shm(void)
  798. {
  799. kgid_t shm_group;
  800. shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
  801. return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
  802. }
  803. static int get_hstate_idx(int page_size_log)
  804. {
  805. struct hstate *h = hstate_sizelog(page_size_log);
  806. if (!h)
  807. return -1;
  808. return h - hstates;
  809. }
  810. static const struct dentry_operations anon_ops = {
  811. .d_dname = simple_dname
  812. };
  813. /*
  814. * Note that size should be aligned to proper hugepage size in caller side,
  815. * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
  816. */
  817. struct file *hugetlb_file_setup(const char *name, size_t size,
  818. vm_flags_t acctflag, struct user_struct **user,
  819. int creat_flags, int page_size_log)
  820. {
  821. struct file *file = ERR_PTR(-ENOMEM);
  822. struct inode *inode;
  823. struct path path;
  824. struct super_block *sb;
  825. struct qstr quick_string;
  826. int hstate_idx;
  827. hstate_idx = get_hstate_idx(page_size_log);
  828. if (hstate_idx < 0)
  829. return ERR_PTR(-ENODEV);
  830. *user = NULL;
  831. if (!hugetlbfs_vfsmount[hstate_idx])
  832. return ERR_PTR(-ENOENT);
  833. if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
  834. *user = current_user();
  835. if (user_shm_lock(size, *user)) {
  836. task_lock(current);
  837. pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
  838. current->comm, current->pid);
  839. task_unlock(current);
  840. } else {
  841. *user = NULL;
  842. return ERR_PTR(-EPERM);
  843. }
  844. }
  845. sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
  846. quick_string.name = name;
  847. quick_string.len = strlen(quick_string.name);
  848. quick_string.hash = 0;
  849. path.dentry = d_alloc_pseudo(sb, &quick_string);
  850. if (!path.dentry)
  851. goto out_shm_unlock;
  852. d_set_d_op(path.dentry, &anon_ops);
  853. path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
  854. file = ERR_PTR(-ENOSPC);
  855. inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
  856. if (!inode)
  857. goto out_dentry;
  858. file = ERR_PTR(-ENOMEM);
  859. if (hugetlb_reserve_pages(inode, 0,
  860. size >> huge_page_shift(hstate_inode(inode)), NULL,
  861. acctflag))
  862. goto out_inode;
  863. d_instantiate(path.dentry, inode);
  864. inode->i_size = size;
  865. clear_nlink(inode);
  866. file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
  867. &hugetlbfs_file_operations);
  868. if (IS_ERR(file))
  869. goto out_dentry; /* inode is already attached */
  870. return file;
  871. out_inode:
  872. iput(inode);
  873. out_dentry:
  874. path_put(&path);
  875. out_shm_unlock:
  876. if (*user) {
  877. user_shm_unlock(size, *user);
  878. *user = NULL;
  879. }
  880. return file;
  881. }
  882. static int __init init_hugetlbfs_fs(void)
  883. {
  884. struct hstate *h;
  885. int error;
  886. int i;
  887. if (!hugepages_supported()) {
  888. pr_info("disabling because there are no supported hugepage sizes\n");
  889. return -ENOTSUPP;
  890. }
  891. error = bdi_init(&hugetlbfs_backing_dev_info);
  892. if (error)
  893. return error;
  894. error = -ENOMEM;
  895. hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
  896. sizeof(struct hugetlbfs_inode_info),
  897. 0, 0, init_once);
  898. if (hugetlbfs_inode_cachep == NULL)
  899. goto out2;
  900. error = register_filesystem(&hugetlbfs_fs_type);
  901. if (error)
  902. goto out;
  903. i = 0;
  904. for_each_hstate(h) {
  905. char buf[50];
  906. unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
  907. snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
  908. hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
  909. buf);
  910. if (IS_ERR(hugetlbfs_vfsmount[i])) {
  911. pr_err("Cannot mount internal hugetlbfs for "
  912. "page size %uK", ps_kb);
  913. error = PTR_ERR(hugetlbfs_vfsmount[i]);
  914. hugetlbfs_vfsmount[i] = NULL;
  915. }
  916. i++;
  917. }
  918. /* Non default hstates are optional */
  919. if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
  920. return 0;
  921. out:
  922. kmem_cache_destroy(hugetlbfs_inode_cachep);
  923. out2:
  924. bdi_destroy(&hugetlbfs_backing_dev_info);
  925. return error;
  926. }
  927. static void __exit exit_hugetlbfs_fs(void)
  928. {
  929. struct hstate *h;
  930. int i;
  931. /*
  932. * Make sure all delayed rcu free inodes are flushed before we
  933. * destroy cache.
  934. */
  935. rcu_barrier();
  936. kmem_cache_destroy(hugetlbfs_inode_cachep);
  937. i = 0;
  938. for_each_hstate(h)
  939. kern_unmount(hugetlbfs_vfsmount[i++]);
  940. unregister_filesystem(&hugetlbfs_fs_type);
  941. bdi_destroy(&hugetlbfs_backing_dev_info);
  942. }
  943. module_init(init_hugetlbfs_fs)
  944. module_exit(exit_hugetlbfs_fs)
  945. MODULE_LICENSE("GPL");