inode.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113
  1. /*
  2. * inode.c - NILFS inode operations.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. *
  22. */
  23. #include <linux/buffer_head.h>
  24. #include <linux/gfp.h>
  25. #include <linux/mpage.h>
  26. #include <linux/writeback.h>
  27. #include <linux/aio.h>
  28. #include "nilfs.h"
  29. #include "btnode.h"
  30. #include "segment.h"
  31. #include "page.h"
  32. #include "mdt.h"
  33. #include "cpfile.h"
  34. #include "ifile.h"
  35. /**
  36. * struct nilfs_iget_args - arguments used during comparison between inodes
  37. * @ino: inode number
  38. * @cno: checkpoint number
  39. * @root: pointer on NILFS root object (mounted checkpoint)
  40. * @for_gc: inode for GC flag
  41. */
  42. struct nilfs_iget_args {
  43. u64 ino;
  44. __u64 cno;
  45. struct nilfs_root *root;
  46. int for_gc;
  47. };
  48. void nilfs_inode_add_blocks(struct inode *inode, int n)
  49. {
  50. struct nilfs_root *root = NILFS_I(inode)->i_root;
  51. inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
  52. if (root)
  53. atomic64_add(n, &root->blocks_count);
  54. }
  55. void nilfs_inode_sub_blocks(struct inode *inode, int n)
  56. {
  57. struct nilfs_root *root = NILFS_I(inode)->i_root;
  58. inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
  59. if (root)
  60. atomic64_sub(n, &root->blocks_count);
  61. }
  62. /**
  63. * nilfs_get_block() - get a file block on the filesystem (callback function)
  64. * @inode - inode struct of the target file
  65. * @blkoff - file block number
  66. * @bh_result - buffer head to be mapped on
  67. * @create - indicate whether allocating the block or not when it has not
  68. * been allocated yet.
  69. *
  70. * This function does not issue actual read request of the specified data
  71. * block. It is done by VFS.
  72. */
  73. int nilfs_get_block(struct inode *inode, sector_t blkoff,
  74. struct buffer_head *bh_result, int create)
  75. {
  76. struct nilfs_inode_info *ii = NILFS_I(inode);
  77. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  78. __u64 blknum = 0;
  79. int err = 0, ret;
  80. unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
  81. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  82. ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  83. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  84. if (ret >= 0) { /* found */
  85. map_bh(bh_result, inode->i_sb, blknum);
  86. if (ret > 0)
  87. bh_result->b_size = (ret << inode->i_blkbits);
  88. goto out;
  89. }
  90. /* data block was not found */
  91. if (ret == -ENOENT && create) {
  92. struct nilfs_transaction_info ti;
  93. bh_result->b_blocknr = 0;
  94. err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  95. if (unlikely(err))
  96. goto out;
  97. err = nilfs_bmap_insert(ii->i_bmap, (unsigned long)blkoff,
  98. (unsigned long)bh_result);
  99. if (unlikely(err != 0)) {
  100. if (err == -EEXIST) {
  101. /*
  102. * The get_block() function could be called
  103. * from multiple callers for an inode.
  104. * However, the page having this block must
  105. * be locked in this case.
  106. */
  107. printk(KERN_WARNING
  108. "nilfs_get_block: a race condition "
  109. "while inserting a data block. "
  110. "(inode number=%lu, file block "
  111. "offset=%llu)\n",
  112. inode->i_ino,
  113. (unsigned long long)blkoff);
  114. err = 0;
  115. }
  116. nilfs_transaction_abort(inode->i_sb);
  117. goto out;
  118. }
  119. nilfs_mark_inode_dirty(inode);
  120. nilfs_transaction_commit(inode->i_sb); /* never fails */
  121. /* Error handling should be detailed */
  122. set_buffer_new(bh_result);
  123. set_buffer_delay(bh_result);
  124. map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
  125. to proper value */
  126. } else if (ret == -ENOENT) {
  127. /* not found is not error (e.g. hole); must return without
  128. the mapped state flag. */
  129. ;
  130. } else {
  131. err = ret;
  132. }
  133. out:
  134. return err;
  135. }
  136. /**
  137. * nilfs_readpage() - implement readpage() method of nilfs_aops {}
  138. * address_space_operations.
  139. * @file - file struct of the file to be read
  140. * @page - the page to be read
  141. */
  142. static int nilfs_readpage(struct file *file, struct page *page)
  143. {
  144. return mpage_readpage(page, nilfs_get_block);
  145. }
  146. /**
  147. * nilfs_readpages() - implement readpages() method of nilfs_aops {}
  148. * address_space_operations.
  149. * @file - file struct of the file to be read
  150. * @mapping - address_space struct used for reading multiple pages
  151. * @pages - the pages to be read
  152. * @nr_pages - number of pages to be read
  153. */
  154. static int nilfs_readpages(struct file *file, struct address_space *mapping,
  155. struct list_head *pages, unsigned nr_pages)
  156. {
  157. return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
  158. }
  159. static int nilfs_writepages(struct address_space *mapping,
  160. struct writeback_control *wbc)
  161. {
  162. struct inode *inode = mapping->host;
  163. int err = 0;
  164. if (inode->i_sb->s_flags & MS_RDONLY) {
  165. nilfs_clear_dirty_pages(mapping, false);
  166. return -EROFS;
  167. }
  168. if (wbc->sync_mode == WB_SYNC_ALL)
  169. err = nilfs_construct_dsync_segment(inode->i_sb, inode,
  170. wbc->range_start,
  171. wbc->range_end);
  172. return err;
  173. }
  174. static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
  175. {
  176. struct inode *inode = page->mapping->host;
  177. int err;
  178. if (inode->i_sb->s_flags & MS_RDONLY) {
  179. /*
  180. * It means that filesystem was remounted in read-only
  181. * mode because of error or metadata corruption. But we
  182. * have dirty pages that try to be flushed in background.
  183. * So, here we simply discard this dirty page.
  184. */
  185. nilfs_clear_dirty_page(page, false);
  186. unlock_page(page);
  187. return -EROFS;
  188. }
  189. redirty_page_for_writepage(wbc, page);
  190. unlock_page(page);
  191. if (wbc->sync_mode == WB_SYNC_ALL) {
  192. err = nilfs_construct_segment(inode->i_sb);
  193. if (unlikely(err))
  194. return err;
  195. } else if (wbc->for_reclaim)
  196. nilfs_flush_segment(inode->i_sb, inode->i_ino);
  197. return 0;
  198. }
  199. static int nilfs_set_page_dirty(struct page *page)
  200. {
  201. int ret = __set_page_dirty_nobuffers(page);
  202. if (page_has_buffers(page)) {
  203. struct inode *inode = page->mapping->host;
  204. unsigned nr_dirty = 0;
  205. struct buffer_head *bh, *head;
  206. /*
  207. * This page is locked by callers, and no other thread
  208. * concurrently marks its buffers dirty since they are
  209. * only dirtied through routines in fs/buffer.c in
  210. * which call sites of mark_buffer_dirty are protected
  211. * by page lock.
  212. */
  213. bh = head = page_buffers(page);
  214. do {
  215. /* Do not mark hole blocks dirty */
  216. if (buffer_dirty(bh) || !buffer_mapped(bh))
  217. continue;
  218. set_buffer_dirty(bh);
  219. nr_dirty++;
  220. } while (bh = bh->b_this_page, bh != head);
  221. if (nr_dirty)
  222. nilfs_set_file_dirty(inode, nr_dirty);
  223. }
  224. return ret;
  225. }
  226. void nilfs_write_failed(struct address_space *mapping, loff_t to)
  227. {
  228. struct inode *inode = mapping->host;
  229. if (to > inode->i_size) {
  230. truncate_pagecache(inode, inode->i_size);
  231. nilfs_truncate(inode);
  232. }
  233. }
  234. static int nilfs_write_begin(struct file *file, struct address_space *mapping,
  235. loff_t pos, unsigned len, unsigned flags,
  236. struct page **pagep, void **fsdata)
  237. {
  238. struct inode *inode = mapping->host;
  239. int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
  240. if (unlikely(err))
  241. return err;
  242. err = block_write_begin(mapping, pos, len, flags, pagep,
  243. nilfs_get_block);
  244. if (unlikely(err)) {
  245. nilfs_write_failed(mapping, pos + len);
  246. nilfs_transaction_abort(inode->i_sb);
  247. }
  248. return err;
  249. }
  250. static int nilfs_write_end(struct file *file, struct address_space *mapping,
  251. loff_t pos, unsigned len, unsigned copied,
  252. struct page *page, void *fsdata)
  253. {
  254. struct inode *inode = mapping->host;
  255. unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  256. unsigned nr_dirty;
  257. int err;
  258. nr_dirty = nilfs_page_count_clean_buffers(page, start,
  259. start + copied);
  260. copied = generic_write_end(file, mapping, pos, len, copied, page,
  261. fsdata);
  262. nilfs_set_file_dirty(inode, nr_dirty);
  263. err = nilfs_transaction_commit(inode->i_sb);
  264. return err ? : copied;
  265. }
  266. static ssize_t
  267. nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
  268. loff_t offset, unsigned long nr_segs)
  269. {
  270. struct file *file = iocb->ki_filp;
  271. struct address_space *mapping = file->f_mapping;
  272. struct inode *inode = file->f_mapping->host;
  273. ssize_t size;
  274. if (rw == WRITE)
  275. return 0;
  276. /* Needs synchronization with the cleaner */
  277. size = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
  278. nilfs_get_block);
  279. /*
  280. * In case of error extending write may have instantiated a few
  281. * blocks outside i_size. Trim these off again.
  282. */
  283. if (unlikely((rw & WRITE) && size < 0)) {
  284. loff_t isize = i_size_read(inode);
  285. loff_t end = offset + iov_length(iov, nr_segs);
  286. if (end > isize)
  287. nilfs_write_failed(mapping, end);
  288. }
  289. return size;
  290. }
  291. const struct address_space_operations nilfs_aops = {
  292. .writepage = nilfs_writepage,
  293. .readpage = nilfs_readpage,
  294. .writepages = nilfs_writepages,
  295. .set_page_dirty = nilfs_set_page_dirty,
  296. .readpages = nilfs_readpages,
  297. .write_begin = nilfs_write_begin,
  298. .write_end = nilfs_write_end,
  299. /* .releasepage = nilfs_releasepage, */
  300. .invalidatepage = block_invalidatepage,
  301. .direct_IO = nilfs_direct_IO,
  302. .is_partially_uptodate = block_is_partially_uptodate,
  303. };
  304. struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
  305. {
  306. struct super_block *sb = dir->i_sb;
  307. struct the_nilfs *nilfs = sb->s_fs_info;
  308. struct inode *inode;
  309. struct nilfs_inode_info *ii;
  310. struct nilfs_root *root;
  311. int err = -ENOMEM;
  312. ino_t ino;
  313. inode = new_inode(sb);
  314. if (unlikely(!inode))
  315. goto failed;
  316. mapping_set_gfp_mask(inode->i_mapping,
  317. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  318. root = NILFS_I(dir)->i_root;
  319. ii = NILFS_I(inode);
  320. ii->i_state = 1 << NILFS_I_NEW;
  321. ii->i_root = root;
  322. err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
  323. if (unlikely(err))
  324. goto failed_ifile_create_inode;
  325. /* reference count of i_bh inherits from nilfs_mdt_read_block() */
  326. atomic64_inc(&root->inodes_count);
  327. inode_init_owner(inode, dir, mode);
  328. inode->i_ino = ino;
  329. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  330. if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
  331. err = nilfs_bmap_read(ii->i_bmap, NULL);
  332. if (err < 0)
  333. goto failed_bmap;
  334. set_bit(NILFS_I_BMAP, &ii->i_state);
  335. /* No lock is needed; iget() ensures it. */
  336. }
  337. ii->i_flags = nilfs_mask_flags(
  338. mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
  339. /* ii->i_file_acl = 0; */
  340. /* ii->i_dir_acl = 0; */
  341. ii->i_dir_start_lookup = 0;
  342. nilfs_set_inode_flags(inode);
  343. spin_lock(&nilfs->ns_next_gen_lock);
  344. inode->i_generation = nilfs->ns_next_generation++;
  345. spin_unlock(&nilfs->ns_next_gen_lock);
  346. insert_inode_hash(inode);
  347. err = nilfs_init_acl(inode, dir);
  348. if (unlikely(err))
  349. goto failed_acl; /* never occur. When supporting
  350. nilfs_init_acl(), proper cancellation of
  351. above jobs should be considered */
  352. return inode;
  353. failed_acl:
  354. failed_bmap:
  355. clear_nlink(inode);
  356. iput(inode); /* raw_inode will be deleted through
  357. generic_delete_inode() */
  358. goto failed;
  359. failed_ifile_create_inode:
  360. make_bad_inode(inode);
  361. iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
  362. called */
  363. failed:
  364. return ERR_PTR(err);
  365. }
  366. void nilfs_set_inode_flags(struct inode *inode)
  367. {
  368. unsigned int flags = NILFS_I(inode)->i_flags;
  369. inode->i_flags &= ~(S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME |
  370. S_DIRSYNC);
  371. if (flags & FS_SYNC_FL)
  372. inode->i_flags |= S_SYNC;
  373. if (flags & FS_APPEND_FL)
  374. inode->i_flags |= S_APPEND;
  375. if (flags & FS_IMMUTABLE_FL)
  376. inode->i_flags |= S_IMMUTABLE;
  377. if (flags & FS_NOATIME_FL)
  378. inode->i_flags |= S_NOATIME;
  379. if (flags & FS_DIRSYNC_FL)
  380. inode->i_flags |= S_DIRSYNC;
  381. mapping_set_gfp_mask(inode->i_mapping,
  382. mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
  383. }
  384. int nilfs_read_inode_common(struct inode *inode,
  385. struct nilfs_inode *raw_inode)
  386. {
  387. struct nilfs_inode_info *ii = NILFS_I(inode);
  388. int err;
  389. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  390. i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
  391. i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
  392. set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
  393. inode->i_size = le64_to_cpu(raw_inode->i_size);
  394. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  395. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  396. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  397. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  398. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  399. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  400. if (inode->i_nlink == 0 && inode->i_mode == 0)
  401. return -EINVAL; /* this inode is deleted */
  402. inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
  403. ii->i_flags = le32_to_cpu(raw_inode->i_flags);
  404. #if 0
  405. ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  406. ii->i_dir_acl = S_ISREG(inode->i_mode) ?
  407. 0 : le32_to_cpu(raw_inode->i_dir_acl);
  408. #endif
  409. ii->i_dir_start_lookup = 0;
  410. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  411. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  412. S_ISLNK(inode->i_mode)) {
  413. err = nilfs_bmap_read(ii->i_bmap, raw_inode);
  414. if (err < 0)
  415. return err;
  416. set_bit(NILFS_I_BMAP, &ii->i_state);
  417. /* No lock is needed; iget() ensures it. */
  418. }
  419. return 0;
  420. }
  421. static int __nilfs_read_inode(struct super_block *sb,
  422. struct nilfs_root *root, unsigned long ino,
  423. struct inode *inode)
  424. {
  425. struct the_nilfs *nilfs = sb->s_fs_info;
  426. struct buffer_head *bh;
  427. struct nilfs_inode *raw_inode;
  428. int err;
  429. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  430. err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
  431. if (unlikely(err))
  432. goto bad_inode;
  433. raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
  434. err = nilfs_read_inode_common(inode, raw_inode);
  435. if (err)
  436. goto failed_unmap;
  437. if (S_ISREG(inode->i_mode)) {
  438. inode->i_op = &nilfs_file_inode_operations;
  439. inode->i_fop = &nilfs_file_operations;
  440. inode->i_mapping->a_ops = &nilfs_aops;
  441. } else if (S_ISDIR(inode->i_mode)) {
  442. inode->i_op = &nilfs_dir_inode_operations;
  443. inode->i_fop = &nilfs_dir_operations;
  444. inode->i_mapping->a_ops = &nilfs_aops;
  445. } else if (S_ISLNK(inode->i_mode)) {
  446. inode->i_op = &nilfs_symlink_inode_operations;
  447. inode->i_mapping->a_ops = &nilfs_aops;
  448. } else {
  449. inode->i_op = &nilfs_special_inode_operations;
  450. init_special_inode(
  451. inode, inode->i_mode,
  452. huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
  453. }
  454. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  455. brelse(bh);
  456. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  457. nilfs_set_inode_flags(inode);
  458. return 0;
  459. failed_unmap:
  460. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  461. brelse(bh);
  462. bad_inode:
  463. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  464. return err;
  465. }
  466. static int nilfs_iget_test(struct inode *inode, void *opaque)
  467. {
  468. struct nilfs_iget_args *args = opaque;
  469. struct nilfs_inode_info *ii;
  470. if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
  471. return 0;
  472. ii = NILFS_I(inode);
  473. if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
  474. return !args->for_gc;
  475. return args->for_gc && args->cno == ii->i_cno;
  476. }
  477. static int nilfs_iget_set(struct inode *inode, void *opaque)
  478. {
  479. struct nilfs_iget_args *args = opaque;
  480. inode->i_ino = args->ino;
  481. if (args->for_gc) {
  482. NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
  483. NILFS_I(inode)->i_cno = args->cno;
  484. NILFS_I(inode)->i_root = NULL;
  485. } else {
  486. if (args->root && args->ino == NILFS_ROOT_INO)
  487. nilfs_get_root(args->root);
  488. NILFS_I(inode)->i_root = args->root;
  489. }
  490. return 0;
  491. }
  492. struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
  493. unsigned long ino)
  494. {
  495. struct nilfs_iget_args args = {
  496. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  497. };
  498. return ilookup5(sb, ino, nilfs_iget_test, &args);
  499. }
  500. struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
  501. unsigned long ino)
  502. {
  503. struct nilfs_iget_args args = {
  504. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  505. };
  506. return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  507. }
  508. struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
  509. unsigned long ino)
  510. {
  511. struct inode *inode;
  512. int err;
  513. inode = nilfs_iget_locked(sb, root, ino);
  514. if (unlikely(!inode))
  515. return ERR_PTR(-ENOMEM);
  516. if (!(inode->i_state & I_NEW))
  517. return inode;
  518. err = __nilfs_read_inode(sb, root, ino, inode);
  519. if (unlikely(err)) {
  520. iget_failed(inode);
  521. return ERR_PTR(err);
  522. }
  523. unlock_new_inode(inode);
  524. return inode;
  525. }
  526. struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
  527. __u64 cno)
  528. {
  529. struct nilfs_iget_args args = {
  530. .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
  531. };
  532. struct inode *inode;
  533. int err;
  534. inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  535. if (unlikely(!inode))
  536. return ERR_PTR(-ENOMEM);
  537. if (!(inode->i_state & I_NEW))
  538. return inode;
  539. err = nilfs_init_gcinode(inode);
  540. if (unlikely(err)) {
  541. iget_failed(inode);
  542. return ERR_PTR(err);
  543. }
  544. unlock_new_inode(inode);
  545. return inode;
  546. }
  547. void nilfs_write_inode_common(struct inode *inode,
  548. struct nilfs_inode *raw_inode, int has_bmap)
  549. {
  550. struct nilfs_inode_info *ii = NILFS_I(inode);
  551. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  552. raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
  553. raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
  554. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  555. raw_inode->i_size = cpu_to_le64(inode->i_size);
  556. raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  557. raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  558. raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  559. raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  560. raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
  561. raw_inode->i_flags = cpu_to_le32(ii->i_flags);
  562. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  563. if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
  564. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  565. /* zero-fill unused portion in the case of super root block */
  566. raw_inode->i_xattr = 0;
  567. raw_inode->i_pad = 0;
  568. memset((void *)raw_inode + sizeof(*raw_inode), 0,
  569. nilfs->ns_inode_size - sizeof(*raw_inode));
  570. }
  571. if (has_bmap)
  572. nilfs_bmap_write(ii->i_bmap, raw_inode);
  573. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
  574. raw_inode->i_device_code =
  575. cpu_to_le64(huge_encode_dev(inode->i_rdev));
  576. /* When extending inode, nilfs->ns_inode_size should be checked
  577. for substitutions of appended fields */
  578. }
  579. void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh)
  580. {
  581. ino_t ino = inode->i_ino;
  582. struct nilfs_inode_info *ii = NILFS_I(inode);
  583. struct inode *ifile = ii->i_root->ifile;
  584. struct nilfs_inode *raw_inode;
  585. raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
  586. if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
  587. memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
  588. set_bit(NILFS_I_INODE_DIRTY, &ii->i_state);
  589. nilfs_write_inode_common(inode, raw_inode, 0);
  590. /* XXX: call with has_bmap = 0 is a workaround to avoid
  591. deadlock of bmap. This delays update of i_bmap to just
  592. before writing */
  593. nilfs_ifile_unmap_inode(ifile, ino, ibh);
  594. }
  595. #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
  596. static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
  597. unsigned long from)
  598. {
  599. unsigned long b;
  600. int ret;
  601. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  602. return;
  603. repeat:
  604. ret = nilfs_bmap_last_key(ii->i_bmap, &b);
  605. if (ret == -ENOENT)
  606. return;
  607. else if (ret < 0)
  608. goto failed;
  609. if (b < from)
  610. return;
  611. b -= min_t(unsigned long, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
  612. ret = nilfs_bmap_truncate(ii->i_bmap, b);
  613. nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
  614. if (!ret || (ret == -ENOMEM &&
  615. nilfs_bmap_truncate(ii->i_bmap, b) == 0))
  616. goto repeat;
  617. failed:
  618. nilfs_warning(ii->vfs_inode.i_sb, __func__,
  619. "failed to truncate bmap (ino=%lu, err=%d)",
  620. ii->vfs_inode.i_ino, ret);
  621. }
  622. void nilfs_truncate(struct inode *inode)
  623. {
  624. unsigned long blkoff;
  625. unsigned int blocksize;
  626. struct nilfs_transaction_info ti;
  627. struct super_block *sb = inode->i_sb;
  628. struct nilfs_inode_info *ii = NILFS_I(inode);
  629. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  630. return;
  631. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  632. return;
  633. blocksize = sb->s_blocksize;
  634. blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
  635. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  636. block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
  637. nilfs_truncate_bmap(ii, blkoff);
  638. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  639. if (IS_SYNC(inode))
  640. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  641. nilfs_mark_inode_dirty(inode);
  642. nilfs_set_file_dirty(inode, 0);
  643. nilfs_transaction_commit(sb);
  644. /* May construct a logical segment and may fail in sync mode.
  645. But truncate has no return value. */
  646. }
  647. static void nilfs_clear_inode(struct inode *inode)
  648. {
  649. struct nilfs_inode_info *ii = NILFS_I(inode);
  650. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  651. /*
  652. * Free resources allocated in nilfs_read_inode(), here.
  653. */
  654. BUG_ON(!list_empty(&ii->i_dirty));
  655. brelse(ii->i_bh);
  656. ii->i_bh = NULL;
  657. if (mdi && mdi->mi_palloc_cache)
  658. nilfs_palloc_destroy_cache(inode);
  659. if (test_bit(NILFS_I_BMAP, &ii->i_state))
  660. nilfs_bmap_clear(ii->i_bmap);
  661. nilfs_btnode_cache_clear(&ii->i_btnode_cache);
  662. if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
  663. nilfs_put_root(ii->i_root);
  664. }
  665. void nilfs_evict_inode(struct inode *inode)
  666. {
  667. struct nilfs_transaction_info ti;
  668. struct super_block *sb = inode->i_sb;
  669. struct nilfs_inode_info *ii = NILFS_I(inode);
  670. int ret;
  671. if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
  672. truncate_inode_pages_final(&inode->i_data);
  673. clear_inode(inode);
  674. nilfs_clear_inode(inode);
  675. return;
  676. }
  677. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  678. truncate_inode_pages_final(&inode->i_data);
  679. /* TODO: some of the following operations may fail. */
  680. nilfs_truncate_bmap(ii, 0);
  681. nilfs_mark_inode_dirty(inode);
  682. clear_inode(inode);
  683. ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
  684. if (!ret)
  685. atomic64_dec(&ii->i_root->inodes_count);
  686. nilfs_clear_inode(inode);
  687. if (IS_SYNC(inode))
  688. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  689. nilfs_transaction_commit(sb);
  690. /* May construct a logical segment and may fail in sync mode.
  691. But delete_inode has no return value. */
  692. }
  693. int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
  694. {
  695. struct nilfs_transaction_info ti;
  696. struct inode *inode = dentry->d_inode;
  697. struct super_block *sb = inode->i_sb;
  698. int err;
  699. err = inode_change_ok(inode, iattr);
  700. if (err)
  701. return err;
  702. err = nilfs_transaction_begin(sb, &ti, 0);
  703. if (unlikely(err))
  704. return err;
  705. if ((iattr->ia_valid & ATTR_SIZE) &&
  706. iattr->ia_size != i_size_read(inode)) {
  707. inode_dio_wait(inode);
  708. truncate_setsize(inode, iattr->ia_size);
  709. nilfs_truncate(inode);
  710. }
  711. setattr_copy(inode, iattr);
  712. mark_inode_dirty(inode);
  713. if (iattr->ia_valid & ATTR_MODE) {
  714. err = nilfs_acl_chmod(inode);
  715. if (unlikely(err))
  716. goto out_err;
  717. }
  718. return nilfs_transaction_commit(sb);
  719. out_err:
  720. nilfs_transaction_abort(sb);
  721. return err;
  722. }
  723. int nilfs_permission(struct inode *inode, int mask)
  724. {
  725. struct nilfs_root *root = NILFS_I(inode)->i_root;
  726. if ((mask & MAY_WRITE) && root &&
  727. root->cno != NILFS_CPTREE_CURRENT_CNO)
  728. return -EROFS; /* snapshot is not writable */
  729. return generic_permission(inode, mask);
  730. }
  731. int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
  732. {
  733. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  734. struct nilfs_inode_info *ii = NILFS_I(inode);
  735. int err;
  736. spin_lock(&nilfs->ns_inode_lock);
  737. if (ii->i_bh == NULL) {
  738. spin_unlock(&nilfs->ns_inode_lock);
  739. err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
  740. inode->i_ino, pbh);
  741. if (unlikely(err))
  742. return err;
  743. spin_lock(&nilfs->ns_inode_lock);
  744. if (ii->i_bh == NULL)
  745. ii->i_bh = *pbh;
  746. else {
  747. brelse(*pbh);
  748. *pbh = ii->i_bh;
  749. }
  750. } else
  751. *pbh = ii->i_bh;
  752. get_bh(*pbh);
  753. spin_unlock(&nilfs->ns_inode_lock);
  754. return 0;
  755. }
  756. int nilfs_inode_dirty(struct inode *inode)
  757. {
  758. struct nilfs_inode_info *ii = NILFS_I(inode);
  759. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  760. int ret = 0;
  761. if (!list_empty(&ii->i_dirty)) {
  762. spin_lock(&nilfs->ns_inode_lock);
  763. ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
  764. test_bit(NILFS_I_BUSY, &ii->i_state);
  765. spin_unlock(&nilfs->ns_inode_lock);
  766. }
  767. return ret;
  768. }
  769. int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
  770. {
  771. struct nilfs_inode_info *ii = NILFS_I(inode);
  772. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  773. atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
  774. if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
  775. return 0;
  776. spin_lock(&nilfs->ns_inode_lock);
  777. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  778. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  779. /* Because this routine may race with nilfs_dispose_list(),
  780. we have to check NILFS_I_QUEUED here, too. */
  781. if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
  782. /* This will happen when somebody is freeing
  783. this inode. */
  784. nilfs_warning(inode->i_sb, __func__,
  785. "cannot get inode (ino=%lu)\n",
  786. inode->i_ino);
  787. spin_unlock(&nilfs->ns_inode_lock);
  788. return -EINVAL; /* NILFS_I_DIRTY may remain for
  789. freeing inode */
  790. }
  791. list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
  792. set_bit(NILFS_I_QUEUED, &ii->i_state);
  793. }
  794. spin_unlock(&nilfs->ns_inode_lock);
  795. return 0;
  796. }
  797. int nilfs_mark_inode_dirty(struct inode *inode)
  798. {
  799. struct buffer_head *ibh;
  800. int err;
  801. err = nilfs_load_inode_block(inode, &ibh);
  802. if (unlikely(err)) {
  803. nilfs_warning(inode->i_sb, __func__,
  804. "failed to reget inode block.\n");
  805. return err;
  806. }
  807. nilfs_update_inode(inode, ibh);
  808. mark_buffer_dirty(ibh);
  809. nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
  810. brelse(ibh);
  811. return 0;
  812. }
  813. /**
  814. * nilfs_dirty_inode - reflect changes on given inode to an inode block.
  815. * @inode: inode of the file to be registered.
  816. *
  817. * nilfs_dirty_inode() loads a inode block containing the specified
  818. * @inode and copies data from a nilfs_inode to a corresponding inode
  819. * entry in the inode block. This operation is excluded from the segment
  820. * construction. This function can be called both as a single operation
  821. * and as a part of indivisible file operations.
  822. */
  823. void nilfs_dirty_inode(struct inode *inode, int flags)
  824. {
  825. struct nilfs_transaction_info ti;
  826. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  827. if (is_bad_inode(inode)) {
  828. nilfs_warning(inode->i_sb, __func__,
  829. "tried to mark bad_inode dirty. ignored.\n");
  830. dump_stack();
  831. return;
  832. }
  833. if (mdi) {
  834. nilfs_mdt_mark_dirty(inode);
  835. return;
  836. }
  837. nilfs_transaction_begin(inode->i_sb, &ti, 0);
  838. nilfs_mark_inode_dirty(inode);
  839. nilfs_transaction_commit(inode->i_sb); /* never fails */
  840. }
  841. int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  842. __u64 start, __u64 len)
  843. {
  844. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  845. __u64 logical = 0, phys = 0, size = 0;
  846. __u32 flags = 0;
  847. loff_t isize;
  848. sector_t blkoff, end_blkoff;
  849. sector_t delalloc_blkoff;
  850. unsigned long delalloc_blklen;
  851. unsigned int blkbits = inode->i_blkbits;
  852. int ret, n;
  853. ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
  854. if (ret)
  855. return ret;
  856. mutex_lock(&inode->i_mutex);
  857. isize = i_size_read(inode);
  858. blkoff = start >> blkbits;
  859. end_blkoff = (start + len - 1) >> blkbits;
  860. delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
  861. &delalloc_blkoff);
  862. do {
  863. __u64 blkphy;
  864. unsigned int maxblocks;
  865. if (delalloc_blklen && blkoff == delalloc_blkoff) {
  866. if (size) {
  867. /* End of the current extent */
  868. ret = fiemap_fill_next_extent(
  869. fieinfo, logical, phys, size, flags);
  870. if (ret)
  871. break;
  872. }
  873. if (blkoff > end_blkoff)
  874. break;
  875. flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
  876. logical = blkoff << blkbits;
  877. phys = 0;
  878. size = delalloc_blklen << blkbits;
  879. blkoff = delalloc_blkoff + delalloc_blklen;
  880. delalloc_blklen = nilfs_find_uncommitted_extent(
  881. inode, blkoff, &delalloc_blkoff);
  882. continue;
  883. }
  884. /*
  885. * Limit the number of blocks that we look up so as
  886. * not to get into the next delayed allocation extent.
  887. */
  888. maxblocks = INT_MAX;
  889. if (delalloc_blklen)
  890. maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
  891. maxblocks);
  892. blkphy = 0;
  893. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  894. n = nilfs_bmap_lookup_contig(
  895. NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
  896. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  897. if (n < 0) {
  898. int past_eof;
  899. if (unlikely(n != -ENOENT))
  900. break; /* error */
  901. /* HOLE */
  902. blkoff++;
  903. past_eof = ((blkoff << blkbits) >= isize);
  904. if (size) {
  905. /* End of the current extent */
  906. if (past_eof)
  907. flags |= FIEMAP_EXTENT_LAST;
  908. ret = fiemap_fill_next_extent(
  909. fieinfo, logical, phys, size, flags);
  910. if (ret)
  911. break;
  912. size = 0;
  913. }
  914. if (blkoff > end_blkoff || past_eof)
  915. break;
  916. } else {
  917. if (size) {
  918. if (phys && blkphy << blkbits == phys + size) {
  919. /* The current extent goes on */
  920. size += n << blkbits;
  921. } else {
  922. /* Terminate the current extent */
  923. ret = fiemap_fill_next_extent(
  924. fieinfo, logical, phys, size,
  925. flags);
  926. if (ret || blkoff > end_blkoff)
  927. break;
  928. /* Start another extent */
  929. flags = FIEMAP_EXTENT_MERGED;
  930. logical = blkoff << blkbits;
  931. phys = blkphy << blkbits;
  932. size = n << blkbits;
  933. }
  934. } else {
  935. /* Start a new extent */
  936. flags = FIEMAP_EXTENT_MERGED;
  937. logical = blkoff << blkbits;
  938. phys = blkphy << blkbits;
  939. size = n << blkbits;
  940. }
  941. blkoff += n;
  942. }
  943. cond_resched();
  944. } while (true);
  945. /* If ret is 1 then we just hit the end of the extent array */
  946. if (ret == 1)
  947. ret = 0;
  948. mutex_unlock(&inode->i_mutex);
  949. return ret;
  950. }