inode.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114
  1. /*
  2. * inode.c - NILFS inode operations.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. *
  22. */
  23. #include <linux/buffer_head.h>
  24. #include <linux/gfp.h>
  25. #include <linux/mpage.h>
  26. #include <linux/writeback.h>
  27. #include <linux/aio.h>
  28. #include "nilfs.h"
  29. #include "btnode.h"
  30. #include "segment.h"
  31. #include "page.h"
  32. #include "mdt.h"
  33. #include "cpfile.h"
  34. #include "ifile.h"
  35. /**
  36. * struct nilfs_iget_args - arguments used during comparison between inodes
  37. * @ino: inode number
  38. * @cno: checkpoint number
  39. * @root: pointer on NILFS root object (mounted checkpoint)
  40. * @for_gc: inode for GC flag
  41. */
  42. struct nilfs_iget_args {
  43. u64 ino;
  44. __u64 cno;
  45. struct nilfs_root *root;
  46. int for_gc;
  47. };
  48. void nilfs_inode_add_blocks(struct inode *inode, int n)
  49. {
  50. struct nilfs_root *root = NILFS_I(inode)->i_root;
  51. inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
  52. if (root)
  53. atomic64_add(n, &root->blocks_count);
  54. }
  55. void nilfs_inode_sub_blocks(struct inode *inode, int n)
  56. {
  57. struct nilfs_root *root = NILFS_I(inode)->i_root;
  58. inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
  59. if (root)
  60. atomic64_sub(n, &root->blocks_count);
  61. }
  62. /**
  63. * nilfs_get_block() - get a file block on the filesystem (callback function)
  64. * @inode - inode struct of the target file
  65. * @blkoff - file block number
  66. * @bh_result - buffer head to be mapped on
  67. * @create - indicate whether allocating the block or not when it has not
  68. * been allocated yet.
  69. *
  70. * This function does not issue actual read request of the specified data
  71. * block. It is done by VFS.
  72. */
  73. int nilfs_get_block(struct inode *inode, sector_t blkoff,
  74. struct buffer_head *bh_result, int create)
  75. {
  76. struct nilfs_inode_info *ii = NILFS_I(inode);
  77. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  78. __u64 blknum = 0;
  79. int err = 0, ret;
  80. unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
  81. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  82. ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  83. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  84. if (ret >= 0) { /* found */
  85. map_bh(bh_result, inode->i_sb, blknum);
  86. if (ret > 0)
  87. bh_result->b_size = (ret << inode->i_blkbits);
  88. goto out;
  89. }
  90. /* data block was not found */
  91. if (ret == -ENOENT && create) {
  92. struct nilfs_transaction_info ti;
  93. bh_result->b_blocknr = 0;
  94. err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  95. if (unlikely(err))
  96. goto out;
  97. err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
  98. (unsigned long)bh_result);
  99. if (unlikely(err != 0)) {
  100. if (err == -EEXIST) {
  101. /*
  102. * The get_block() function could be called
  103. * from multiple callers for an inode.
  104. * However, the page having this block must
  105. * be locked in this case.
  106. */
  107. printk(KERN_WARNING
  108. "nilfs_get_block: a race condition "
  109. "while inserting a data block. "
  110. "(inode number=%lu, file block "
  111. "offset=%llu)\n",
  112. inode->i_ino,
  113. (unsigned long long)blkoff);
  114. err = 0;
  115. }
  116. nilfs_transaction_abort(inode->i_sb);
  117. goto out;
  118. }
  119. nilfs_mark_inode_dirty(inode);
  120. nilfs_transaction_commit(inode->i_sb); /* never fails */
  121. /* Error handling should be detailed */
  122. set_buffer_new(bh_result);
  123. set_buffer_delay(bh_result);
  124. map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
  125. to proper value */
  126. } else if (ret == -ENOENT) {
  127. /* not found is not error (e.g. hole); must return without
  128. the mapped state flag. */
  129. ;
  130. } else {
  131. err = ret;
  132. }
  133. out:
  134. return err;
  135. }
  136. /**
  137. * nilfs_readpage() - implement readpage() method of nilfs_aops {}
  138. * address_space_operations.
  139. * @file - file struct of the file to be read
  140. * @page - the page to be read
  141. */
  142. static int nilfs_readpage(struct file *file, struct page *page)
  143. {
  144. return mpage_readpage(page, nilfs_get_block);
  145. }
  146. /**
  147. * nilfs_readpages() - implement readpages() method of nilfs_aops {}
  148. * address_space_operations.
  149. * @file - file struct of the file to be read
  150. * @mapping - address_space struct used for reading multiple pages
  151. * @pages - the pages to be read
  152. * @nr_pages - number of pages to be read
  153. */
  154. static int nilfs_readpages(struct file *file, struct address_space *mapping,
  155. struct list_head *pages, unsigned nr_pages)
  156. {
  157. return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
  158. }
  159. static int nilfs_writepages(struct address_space *mapping,
  160. struct writeback_control *wbc)
  161. {
  162. struct inode *inode = mapping->host;
  163. int err = 0;
  164. if (inode->i_sb->s_flags & MS_RDONLY) {
  165. nilfs_clear_dirty_pages(mapping, false);
  166. return -EROFS;
  167. }
  168. if (wbc->sync_mode == WB_SYNC_ALL)
  169. err = nilfs_construct_dsync_segment(inode->i_sb, inode,
  170. wbc->range_start,
  171. wbc->range_end);
  172. return err;
  173. }
  174. static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
  175. {
  176. struct inode *inode = page->mapping->host;
  177. int err;
  178. if (inode->i_sb->s_flags & MS_RDONLY) {
  179. /*
  180. * It means that filesystem was remounted in read-only
  181. * mode because of error or metadata corruption. But we
  182. * have dirty pages that try to be flushed in background.
  183. * So, here we simply discard this dirty page.
  184. */
  185. nilfs_clear_dirty_page(page, false);
  186. unlock_page(page);
  187. return -EROFS;
  188. }
  189. redirty_page_for_writepage(wbc, page);
  190. unlock_page(page);
  191. if (wbc->sync_mode == WB_SYNC_ALL) {
  192. err = nilfs_construct_segment(inode->i_sb);
  193. if (unlikely(err))
  194. return err;
  195. } else if (wbc->for_reclaim)
  196. nilfs_flush_segment(inode->i_sb, inode->i_ino);
  197. return 0;
  198. }
  199. static int nilfs_set_page_dirty(struct page *page)
  200. {
  201. int ret = __set_page_dirty_nobuffers(page);
  202. if (page_has_buffers(page)) {
  203. struct inode *inode = page->mapping->host;
  204. unsigned nr_dirty = 0;
  205. struct buffer_head *bh, *head;
  206. /*
  207. * This page is locked by callers, and no other thread
  208. * concurrently marks its buffers dirty since they are
  209. * only dirtied through routines in fs/buffer.c in
  210. * which call sites of mark_buffer_dirty are protected
  211. * by page lock.
  212. */
  213. bh = head = page_buffers(page);
  214. do {
  215. /* Do not mark hole blocks dirty */
  216. if (buffer_dirty(bh) || !buffer_mapped(bh))
  217. continue;
  218. set_buffer_dirty(bh);
  219. nr_dirty++;
  220. } while (bh = bh->b_this_page, bh != head);
  221. if (nr_dirty)
  222. nilfs_set_file_dirty(inode, nr_dirty);
  223. }
  224. return ret;
  225. }
  226. void nilfs_write_failed(struct address_space *mapping, loff_t to)
  227. {
  228. struct inode *inode = mapping->host;
  229. if (to > inode->i_size) {
  230. truncate_pagecache(inode, inode->i_size);
  231. nilfs_truncate(inode);
  232. }
  233. }
  234. static int nilfs_write_begin(struct file *file, struct address_space *mapping,
  235. loff_t pos, unsigned len, unsigned flags,
  236. struct page **pagep, void **fsdata)
  237. {
  238. struct inode *inode = mapping->host;
  239. int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
  240. if (unlikely(err))
  241. return err;
  242. err = block_write_begin(mapping, pos, len, flags, pagep,
  243. nilfs_get_block);
  244. if (unlikely(err)) {
  245. nilfs_write_failed(mapping, pos + len);
  246. nilfs_transaction_abort(inode->i_sb);
  247. }
  248. return err;
  249. }
  250. static int nilfs_write_end(struct file *file, struct address_space *mapping,
  251. loff_t pos, unsigned len, unsigned copied,
  252. struct page *page, void *fsdata)
  253. {
  254. struct inode *inode = mapping->host;
  255. unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  256. unsigned nr_dirty;
  257. int err;
  258. nr_dirty = nilfs_page_count_clean_buffers(page, start,
  259. start + copied);
  260. copied = generic_write_end(file, mapping, pos, len, copied, page,
  261. fsdata);
  262. nilfs_set_file_dirty(inode, nr_dirty);
  263. err = nilfs_transaction_commit(inode->i_sb);
  264. return err ? : copied;
  265. }
  266. static ssize_t
  267. nilfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter,
  268. loff_t offset)
  269. {
  270. struct file *file = iocb->ki_filp;
  271. struct address_space *mapping = file->f_mapping;
  272. struct inode *inode = file->f_mapping->host;
  273. size_t count = iov_iter_count(iter);
  274. ssize_t size;
  275. if (rw == WRITE)
  276. return 0;
  277. /* Needs synchronization with the cleaner */
  278. size = blockdev_direct_IO(rw, iocb, inode, iter, offset,
  279. nilfs_get_block);
  280. /*
  281. * In case of error extending write may have instantiated a few
  282. * blocks outside i_size. Trim these off again.
  283. */
  284. if (unlikely((rw & WRITE) && size < 0)) {
  285. loff_t isize = i_size_read(inode);
  286. loff_t end = offset + count;
  287. if (end > isize)
  288. nilfs_write_failed(mapping, end);
  289. }
  290. return size;
  291. }
  292. const struct address_space_operations nilfs_aops = {
  293. .writepage = nilfs_writepage,
  294. .readpage = nilfs_readpage,
  295. .writepages = nilfs_writepages,
  296. .set_page_dirty = nilfs_set_page_dirty,
  297. .readpages = nilfs_readpages,
  298. .write_begin = nilfs_write_begin,
  299. .write_end = nilfs_write_end,
  300. /* .releasepage = nilfs_releasepage, */
  301. .invalidatepage = block_invalidatepage,
  302. .direct_IO = nilfs_direct_IO,
  303. .is_partially_uptodate = block_is_partially_uptodate,
  304. };
  305. struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
  306. {
  307. struct super_block *sb = dir->i_sb;
  308. struct the_nilfs *nilfs = sb->s_fs_info;
  309. struct inode *inode;
  310. struct nilfs_inode_info *ii;
  311. struct nilfs_root *root;
  312. int err = -ENOMEM;
  313. ino_t ino;
  314. inode = new_inode(sb);
  315. if (unlikely(!inode))
  316. goto failed;
  317. mapping_set_gfp_mask(inode->i_mapping,
  318. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  319. root = NILFS_I(dir)->i_root;
  320. ii = NILFS_I(inode);
  321. ii->i_state = 1 << NILFS_I_NEW;
  322. ii->i_root = root;
  323. err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
  324. if (unlikely(err))
  325. goto failed_ifile_create_inode;
  326. /* reference count of i_bh inherits from nilfs_mdt_read_block() */
  327. atomic64_inc(&root->inodes_count);
  328. inode_init_owner(inode, dir, mode);
  329. inode->i_ino = ino;
  330. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  331. if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
  332. err = nilfs_bmap_read(ii->i_bmap, NULL);
  333. if (err < 0)
  334. goto failed_bmap;
  335. set_bit(NILFS_I_BMAP, &ii->i_state);
  336. /* No lock is needed; iget() ensures it. */
  337. }
  338. ii->i_flags = nilfs_mask_flags(
  339. mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
  340. /* ii->i_file_acl = 0; */
  341. /* ii->i_dir_acl = 0; */
  342. ii->i_dir_start_lookup = 0;
  343. nilfs_set_inode_flags(inode);
  344. spin_lock(&nilfs->ns_next_gen_lock);
  345. inode->i_generation = nilfs->ns_next_generation++;
  346. spin_unlock(&nilfs->ns_next_gen_lock);
  347. insert_inode_hash(inode);
  348. err = nilfs_init_acl(inode, dir);
  349. if (unlikely(err))
  350. goto failed_acl; /* never occur. When supporting
  351. nilfs_init_acl(), proper cancellation of
  352. above jobs should be considered */
  353. return inode;
  354. failed_acl:
  355. failed_bmap:
  356. clear_nlink(inode);
  357. iput(inode); /* raw_inode will be deleted through
  358. generic_delete_inode() */
  359. goto failed;
  360. failed_ifile_create_inode:
  361. make_bad_inode(inode);
  362. iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
  363. called */
  364. failed:
  365. return ERR_PTR(err);
  366. }
  367. void nilfs_set_inode_flags(struct inode *inode)
  368. {
  369. unsigned int flags = NILFS_I(inode)->i_flags;
  370. inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
  371. S_DIRSYNC);
  372. if (flags & FS_SYNC_FL)
  373. inode->i_flags |= S_SYNC;
  374. if (flags & FS_APPEND_FL)
  375. inode->i_flags |= S_APPEND;
  376. if (flags & FS_IMMUTABLE_FL)
  377. inode->i_flags |= S_IMMUTABLE;
  378. if (flags & FS_NOATIME_FL)
  379. inode->i_flags |= S_NOATIME;
  380. if (flags & FS_DIRSYNC_FL)
  381. inode->i_flags |= S_DIRSYNC;
  382. mapping_set_gfp_mask(inode->i_mapping,
  383. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  384. }
  385. int nilfs_read_inode_common(struct inode *inode,
  386. struct nilfs_inode *raw_inode)
  387. {
  388. struct nilfs_inode_info *ii = NILFS_I(inode);
  389. int err;
  390. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  391. i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
  392. i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
  393. set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
  394. inode->i_size = le64_to_cpu(raw_inode->i_size);
  395. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  396. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  397. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  398. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  399. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  400. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  401. if (inode->i_nlink == 0 && inode->i_mode == 0)
  402. return -EINVAL; /* this inode is deleted */
  403. inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
  404. ii->i_flags = le32_to_cpu(raw_inode->i_flags);
  405. #if 0
  406. ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  407. ii->i_dir_acl = S_ISREG(inode->i_mode) ?
  408. 0 : le32_to_cpu(raw_inode->i_dir_acl);
  409. #endif
  410. ii->i_dir_start_lookup = 0;
  411. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  412. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  413. S_ISLNK(inode->i_mode)) {
  414. err = nilfs_bmap_read(ii->i_bmap, raw_inode);
  415. if (err < 0)
  416. return err;
  417. set_bit(NILFS_I_BMAP, &ii->i_state);
  418. /* No lock is needed; iget() ensures it. */
  419. }
  420. return 0;
  421. }
  422. static int __nilfs_read_inode(struct super_block *sb,
  423. struct nilfs_root *root, unsigned long ino,
  424. struct inode *inode)
  425. {
  426. struct the_nilfs *nilfs = sb->s_fs_info;
  427. struct buffer_head *bh;
  428. struct nilfs_inode *raw_inode;
  429. int err;
  430. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  431. err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
  432. if (unlikely(err))
  433. goto bad_inode;
  434. raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
  435. err = nilfs_read_inode_common(inode, raw_inode);
  436. if (err)
  437. goto failed_unmap;
  438. if (S_ISREG(inode->i_mode)) {
  439. inode->i_op = &nilfs_file_inode_operations;
  440. inode->i_fop = &nilfs_file_operations;
  441. inode->i_mapping->a_ops = &nilfs_aops;
  442. } else if (S_ISDIR(inode->i_mode)) {
  443. inode->i_op = &nilfs_dir_inode_operations;
  444. inode->i_fop = &nilfs_dir_operations;
  445. inode->i_mapping->a_ops = &nilfs_aops;
  446. } else if (S_ISLNK(inode->i_mode)) {
  447. inode->i_op = &nilfs_symlink_inode_operations;
  448. inode->i_mapping->a_ops = &nilfs_aops;
  449. } else {
  450. inode->i_op = &nilfs_special_inode_operations;
  451. init_special_inode(
  452. inode, inode->i_mode,
  453. huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
  454. }
  455. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  456. brelse(bh);
  457. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  458. nilfs_set_inode_flags(inode);
  459. return 0;
  460. failed_unmap:
  461. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  462. brelse(bh);
  463. bad_inode:
  464. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  465. return err;
  466. }
  467. static int nilfs_iget_test(struct inode *inode, void *opaque)
  468. {
  469. struct nilfs_iget_args *args = opaque;
  470. struct nilfs_inode_info *ii;
  471. if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
  472. return 0;
  473. ii = NILFS_I(inode);
  474. if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
  475. return !args->for_gc;
  476. return args->for_gc && args->cno == ii->i_cno;
  477. }
  478. static int nilfs_iget_set(struct inode *inode, void *opaque)
  479. {
  480. struct nilfs_iget_args *args = opaque;
  481. inode->i_ino = args->ino;
  482. if (args->for_gc) {
  483. NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
  484. NILFS_I(inode)->i_cno = args->cno;
  485. NILFS_I(inode)->i_root = NULL;
  486. } else {
  487. if (args->root && args->ino == NILFS_ROOT_INO)
  488. nilfs_get_root(args->root);
  489. NILFS_I(inode)->i_root = args->root;
  490. }
  491. return 0;
  492. }
  493. struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
  494. unsigned long ino)
  495. {
  496. struct nilfs_iget_args args = {
  497. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  498. };
  499. return ilookup5(sb, ino, nilfs_iget_test, &args);
  500. }
  501. struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
  502. unsigned long ino)
  503. {
  504. struct nilfs_iget_args args = {
  505. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  506. };
  507. return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  508. }
  509. struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
  510. unsigned long ino)
  511. {
  512. struct inode *inode;
  513. int err;
  514. inode = nilfs_iget_locked(sb, root, ino);
  515. if (unlikely(!inode))
  516. return ERR_PTR(-ENOMEM);
  517. if (!(inode->i_state & I_NEW))
  518. return inode;
  519. err = __nilfs_read_inode(sb, root, ino, inode);
  520. if (unlikely(err)) {
  521. iget_failed(inode);
  522. return ERR_PTR(err);
  523. }
  524. unlock_new_inode(inode);
  525. return inode;
  526. }
  527. struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
  528. __u64 cno)
  529. {
  530. struct nilfs_iget_args args = {
  531. .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
  532. };
  533. struct inode *inode;
  534. int err;
  535. inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  536. if (unlikely(!inode))
  537. return ERR_PTR(-ENOMEM);
  538. if (!(inode->i_state & I_NEW))
  539. return inode;
  540. err = nilfs_init_gcinode(inode);
  541. if (unlikely(err)) {
  542. iget_failed(inode);
  543. return ERR_PTR(err);
  544. }
  545. unlock_new_inode(inode);
  546. return inode;
  547. }
  548. void nilfs_write_inode_common(struct inode *inode,
  549. struct nilfs_inode *raw_inode, int has_bmap)
  550. {
  551. struct nilfs_inode_info *ii = NILFS_I(inode);
  552. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  553. raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
  554. raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
  555. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  556. raw_inode->i_size = cpu_to_le64(inode->i_size);
  557. raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  558. raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  559. raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  560. raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  561. raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
  562. raw_inode->i_flags = cpu_to_le32(ii->i_flags);
  563. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  564. if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
  565. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  566. /* zero-fill unused portion in the case of super root block */
  567. raw_inode->i_xattr = 0;
  568. raw_inode->i_pad = 0;
  569. memset((void *)raw_inode + sizeof(*raw_inode), 0,
  570. nilfs->ns_inode_size - sizeof(*raw_inode));
  571. }
  572. if (has_bmap)
  573. nilfs_bmap_write(ii->i_bmap, raw_inode);
  574. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
  575. raw_inode->i_device_code =
  576. cpu_to_le64(huge_encode_dev(inode->i_rdev));
  577. /* When extending inode, nilfs->ns_inode_size should be checked
  578. for substitutions of appended fields */
  579. }
  580. void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
  581. {
  582. ino_t ino = inode->i_ino;
  583. struct nilfs_inode_info *ii = NILFS_I(inode);
  584. struct inode *ifile = ii->i_root->ifile;
  585. struct nilfs_inode *raw_inode;
  586. raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
  587. if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
  588. memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
  589. set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
  590. nilfs_write_inode_common(inode, raw_inode, 0);
  591. /* XXX: call with has_bmap = 0 is a workaround to avoid
  592. deadlock of bmap. This delays update of i_bmap to just
  593. before writing */
  594. nilfs_ifile_unmap_inode(ifile, ino, ibh);
  595. }
  596. #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
  597. static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
  598. unsigned long from)
  599. {
  600. unsigned long b;
  601. int ret;
  602. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  603. return;
  604. repeat:
  605. ret = nilfs_bmap_last_key(ii->i_bmap, &b);
  606. if (ret == -ENOENT)
  607. return;
  608. else if (ret < 0)
  609. goto failed;
  610. if (b < from)
  611. return;
  612. b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
  613. ret = nilfs_bmap_truncate(ii->i_bmap, b);
  614. nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
  615. if (!ret || (ret == -ENOMEM &&
  616. nilfs_bmap_truncate(ii->i_bmap, b) == 0))
  617. goto repeat;
  618. failed:
  619. nilfs_warning(ii->vfs_inode.i_sb, __func__,
  620. "failed to truncate bmap (ino=%lu, err=%d)",
  621. ii->vfs_inode.i_ino, ret);
  622. }
  623. void nilfs_truncate(struct inode *inode)
  624. {
  625. unsigned long blkoff;
  626. unsigned int blocksize;
  627. struct nilfs_transaction_info ti;
  628. struct super_block *sb = inode->i_sb;
  629. struct nilfs_inode_info *ii = NILFS_I(inode);
  630. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  631. return;
  632. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  633. return;
  634. blocksize = sb->s_blocksize;
  635. blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
  636. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  637. block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
  638. nilfs_truncate_bmap(ii, blkoff);
  639. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  640. if (IS_SYNC(inode))
  641. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  642. nilfs_mark_inode_dirty(inode);
  643. nilfs_set_file_dirty(inode, 0);
  644. nilfs_transaction_commit(sb);
  645. /* May construct a logical segment and may fail in sync mode.
  646. But truncate has no return value. */
  647. }
  648. static void nilfs_clear_inode(struct inode *inode)
  649. {
  650. struct nilfs_inode_info *ii = NILFS_I(inode);
  651. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  652. /*
  653. * Free resources allocated in nilfs_read_inode(), here.
  654. */
  655. BUG_ON(!list_empty(&ii->i_dirty));
  656. brelse(ii->i_bh);
  657. ii->i_bh = NULL;
  658. if (mdi && mdi->mi_palloc_cache)
  659. nilfs_palloc_destroy_cache(inode);
  660. if (test_bit(NILFS_I_BMAP, &ii->i_state))
  661. nilfs_bmap_clear(ii->i_bmap);
  662. nilfs_btnode_cache_clear(&ii->i_btnode_cache);
  663. if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
  664. nilfs_put_root(ii->i_root);
  665. }
  666. void nilfs_evict_inode(struct inode *inode)
  667. {
  668. struct nilfs_transaction_info ti;
  669. struct super_block *sb = inode->i_sb;
  670. struct nilfs_inode_info *ii = NILFS_I(inode);
  671. int ret;
  672. if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
  673. truncate_inode_pages_final(&inode->i_data);
  674. clear_inode(inode);
  675. nilfs_clear_inode(inode);
  676. return;
  677. }
  678. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  679. truncate_inode_pages_final(&inode->i_data);
  680. /* TODO: some of the following operations may fail. */
  681. nilfs_truncate_bmap(ii, 0);
  682. nilfs_mark_inode_dirty(inode);
  683. clear_inode(inode);
  684. ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
  685. if (!ret)
  686. atomic64_dec(&ii->i_root->inodes_count);
  687. nilfs_clear_inode(inode);
  688. if (IS_SYNC(inode))
  689. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  690. nilfs_transaction_commit(sb);
  691. /* May construct a logical segment and may fail in sync mode.
  692. But delete_inode has no return value. */
  693. }
  694. int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
  695. {
  696. struct nilfs_transaction_info ti;
  697. struct inode *inode = dentry->d_inode;
  698. struct super_block *sb = inode->i_sb;
  699. int err;
  700. err = inode_change_ok(inode, iattr);
  701. if (err)
  702. return err;
  703. err = nilfs_transaction_begin(sb, &ti, 0);
  704. if (unlikely(err))
  705. return err;
  706. if ((iattr->ia_valid & ATTR_SIZE) &&
  707. iattr->ia_size != i_size_read(inode)) {
  708. inode_dio_wait(inode);
  709. truncate_setsize(inode, iattr->ia_size);
  710. nilfs_truncate(inode);
  711. }
  712. setattr_copy(inode, iattr);
  713. mark_inode_dirty(inode);
  714. if (iattr->ia_valid & ATTR_MODE) {
  715. err = nilfs_acl_chmod(inode);
  716. if (unlikely(err))
  717. goto out_err;
  718. }
  719. return nilfs_transaction_commit(sb);
  720. out_err:
  721. nilfs_transaction_abort(sb);
  722. return err;
  723. }
  724. int nilfs_permission(struct inode *inode, int mask)
  725. {
  726. struct nilfs_root *root = NILFS_I(inode)->i_root;
  727. if ((mask & MAY_WRITE) && root &&
  728. root->cno != NILFS_CPTREE_CURRENT_CNO)
  729. return -EROFS; /* snapshot is not writable */
  730. return generic_permission(inode, mask);
  731. }
  732. int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
  733. {
  734. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  735. struct nilfs_inode_info *ii = NILFS_I(inode);
  736. int err;
  737. spin_lock(&nilfs->ns_inode_lock);
  738. if (ii->i_bh == NULL) {
  739. spin_unlock(&nilfs->ns_inode_lock);
  740. err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
  741. inode->i_ino, pbh);
  742. if (unlikely(err))
  743. return err;
  744. spin_lock(&nilfs->ns_inode_lock);
  745. if (ii->i_bh == NULL)
  746. ii->i_bh = *pbh;
  747. else {
  748. brelse(*pbh);
  749. *pbh = ii->i_bh;
  750. }
  751. } else
  752. *pbh = ii->i_bh;
  753. get_bh(*pbh);
  754. spin_unlock(&nilfs->ns_inode_lock);
  755. return 0;
  756. }
  757. int nilfs_inode_dirty(struct inode *inode)
  758. {
  759. struct nilfs_inode_info *ii = NILFS_I(inode);
  760. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  761. int ret = 0;
  762. if (!list_empty(&ii->i_dirty)) {
  763. spin_lock(&nilfs->ns_inode_lock);
  764. ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
  765. test_bit(NILFS_I_BUSY, &ii->i_state);
  766. spin_unlock(&nilfs->ns_inode_lock);
  767. }
  768. return ret;
  769. }
  770. int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
  771. {
  772. struct nilfs_inode_info *ii = NILFS_I(inode);
  773. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  774. atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
  775. if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
  776. return 0;
  777. spin_lock(&nilfs->ns_inode_lock);
  778. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  779. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  780. /* Because this routine may race with nilfs_dispose_list(),
  781. we have to check NILFS_I_QUEUED here, too. */
  782. if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
  783. /* This will happen when somebody is freeing
  784. this inode. */
  785. nilfs_warning(inode->i_sb, __func__,
  786. "cannot get inode (ino=%lu)\n",
  787. inode->i_ino);
  788. spin_unlock(&nilfs->ns_inode_lock);
  789. return -EINVAL; /* NILFS_I_DIRTY may remain for
  790. freeing inode */
  791. }
  792. list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
  793. set_bit(NILFS_I_QUEUED, &ii->i_state);
  794. }
  795. spin_unlock(&nilfs->ns_inode_lock);
  796. return 0;
  797. }
  798. int nilfs_mark_inode_dirty(struct inode *inode)
  799. {
  800. struct buffer_head *ibh;
  801. int err;
  802. err = nilfs_load_inode_block(inode, &ibh);
  803. if (unlikely(err)) {
  804. nilfs_warning(inode->i_sb, __func__,
  805. "failed to reget inode block.\n");
  806. return err;
  807. }
  808. nilfs_update_inode(inode, ibh);
  809. mark_buffer_dirty(ibh);
  810. nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
  811. brelse(ibh);
  812. return 0;
  813. }
  814. /**
  815. * nilfs_dirty_inode - reflect changes on given inode to an inode block.
  816. * @inode: inode of the file to be registered.
  817. *
  818. * nilfs_dirty_inode() loads a inode block containing the specified
  819. * @inode and copies data from a nilfs_inode to a corresponding inode
  820. * entry in the inode block. This operation is excluded from the segment
  821. * construction. This function can be called both as a single operation
  822. * and as a part of indivisible file operations.
  823. */
  824. void nilfs_dirty_inode(struct inode *inode, int flags)
  825. {
  826. struct nilfs_transaction_info ti;
  827. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  828. if (is_bad_inode(inode)) {
  829. nilfs_warning(inode->i_sb, __func__,
  830. "tried to mark bad_inode dirty. ignored.\n");
  831. dump_stack();
  832. return;
  833. }
  834. if (mdi) {
  835. nilfs_mdt_mark_dirty(inode);
  836. return;
  837. }
  838. nilfs_transaction_begin(inode->i_sb, &ti, 0);
  839. nilfs_mark_inode_dirty(inode);
  840. nilfs_transaction_commit(inode->i_sb); /* never fails */
  841. }
  842. int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  843. __u64 start, __u64 len)
  844. {
  845. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  846. __u64 logical = 0, phys = 0, size = 0;
  847. __u32 flags = 0;
  848. loff_t isize;
  849. sector_t blkoff, end_blkoff;
  850. sector_t delalloc_blkoff;
  851. unsigned long delalloc_blklen;
  852. unsigned int blkbits = inode->i_blkbits;
  853. int ret, n;
  854. ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
  855. if (ret)
  856. return ret;
  857. mutex_lock(&inode->i_mutex);
  858. isize = i_size_read(inode);
  859. blkoff = start >> blkbits;
  860. end_blkoff = (start + len - 1) >> blkbits;
  861. delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
  862. &delalloc_blkoff);
  863. do {
  864. __u64 blkphy;
  865. unsigned int maxblocks;
  866. if (delalloc_blklen && blkoff == delalloc_blkoff) {
  867. if (size) {
  868. /* End of the current extent */
  869. ret = fiemap_fill_next_extent(
  870. fieinfo, logical, phys, size, flags);
  871. if (ret)
  872. break;
  873. }
  874. if (blkoff > end_blkoff)
  875. break;
  876. flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
  877. logical = blkoff << blkbits;
  878. phys = 0;
  879. size = delalloc_blklen << blkbits;
  880. blkoff = delalloc_blkoff + delalloc_blklen;
  881. delalloc_blklen = nilfs_find_uncommitted_extent(
  882. inode, blkoff, &delalloc_blkoff);
  883. continue;
  884. }
  885. /*
  886. * Limit the number of blocks that we look up so as
  887. * not to get into the next delayed allocation extent.
  888. */
  889. maxblocks = INT_MAX;
  890. if (delalloc_blklen)
  891. maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
  892. maxblocks);
  893. blkphy = 0;
  894. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  895. n = nilfs_bmap_lookup_contig(
  896. NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
  897. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  898. if (n < 0) {
  899. int past_eof;
  900. if (unlikely(n != -ENOENT))
  901. break; /* error */
  902. /* HOLE */
  903. blkoff++;
  904. past_eof = ((blkoff << blkbits) >= isize);
  905. if (size) {
  906. /* End of the current extent */
  907. if (past_eof)
  908. flags |= FIEMAP_EXTENT_LAST;
  909. ret = fiemap_fill_next_extent(
  910. fieinfo, logical, phys, size, flags);
  911. if (ret)
  912. break;
  913. size = 0;
  914. }
  915. if (blkoff > end_blkoff || past_eof)
  916. break;
  917. } else {
  918. if (size) {
  919. if (phys && blkphy << blkbits == phys + size) {
  920. /* The current extent goes on */
  921. size += n << blkbits;
  922. } else {
  923. /* Terminate the current extent */
  924. ret = fiemap_fill_next_extent(
  925. fieinfo, logical, phys, size,
  926. flags);
  927. if (ret || blkoff > end_blkoff)
  928. break;
  929. /* Start another extent */
  930. flags = FIEMAP_EXTENT_MERGED;
  931. logical = blkoff << blkbits;
  932. phys = blkphy << blkbits;
  933. size = n << blkbits;
  934. }
  935. } else {
  936. /* Start a new extent */
  937. flags = FIEMAP_EXTENT_MERGED;
  938. logical = blkoff << blkbits;
  939. phys = blkphy << blkbits;
  940. size = n << blkbits;
  941. }
  942. blkoff += n;
  943. }
  944. cond_resched();
  945. } while (true);
  946. /* If ret is 1 then we just hit the end of the extent array */
  947. if (ret == 1)
  948. ret = 0;
  949. mutex_unlock(&inode->i_mutex);
  950. return ret;
  951. }