inode.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135
  1. /*
  2. * inode.c - NILFS inode operations.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Written by Ryusuke Konishi.
  17. *
  18. */
  19. #include <linux/buffer_head.h>
  20. #include <linux/gfp.h>
  21. #include <linux/mpage.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/writeback.h>
  24. #include <linux/uio.h>
  25. #include "nilfs.h"
  26. #include "btnode.h"
  27. #include "segment.h"
  28. #include "page.h"
  29. #include "mdt.h"
  30. #include "cpfile.h"
  31. #include "ifile.h"
  32. /**
  33. * struct nilfs_iget_args - arguments used during comparison between inodes
  34. * @ino: inode number
  35. * @cno: checkpoint number
  36. * @root: pointer on NILFS root object (mounted checkpoint)
  37. * @for_gc: inode for GC flag
  38. */
  39. struct nilfs_iget_args {
  40. u64 ino;
  41. __u64 cno;
  42. struct nilfs_root *root;
  43. int for_gc;
  44. };
  45. static int nilfs_iget_test(struct inode *inode, void *opaque);
  46. void nilfs_inode_add_blocks(struct inode *inode, int n)
  47. {
  48. struct nilfs_root *root = NILFS_I(inode)->i_root;
  49. inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
  50. if (root)
  51. atomic64_add(n, &root->blocks_count);
  52. }
  53. void nilfs_inode_sub_blocks(struct inode *inode, int n)
  54. {
  55. struct nilfs_root *root = NILFS_I(inode)->i_root;
  56. inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
  57. if (root)
  58. atomic64_sub(n, &root->blocks_count);
  59. }
  60. /**
  61. * nilfs_get_block() - get a file block on the filesystem (callback function)
  62. * @inode - inode struct of the target file
  63. * @blkoff - file block number
  64. * @bh_result - buffer head to be mapped on
  65. * @create - indicate whether allocating the block or not when it has not
  66. * been allocated yet.
  67. *
  68. * This function does not issue actual read request of the specified data
  69. * block. It is done by VFS.
  70. */
  71. int nilfs_get_block(struct inode *inode, sector_t blkoff,
  72. struct buffer_head *bh_result, int create)
  73. {
  74. struct nilfs_inode_info *ii = NILFS_I(inode);
  75. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  76. __u64 blknum = 0;
  77. int err = 0, ret;
  78. unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits;
  79. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  80. ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  81. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  82. if (ret >= 0) { /* found */
  83. map_bh(bh_result, inode->i_sb, blknum);
  84. if (ret > 0)
  85. bh_result->b_size = (ret << inode->i_blkbits);
  86. goto out;
  87. }
  88. /* data block was not found */
  89. if (ret == -ENOENT && create) {
  90. struct nilfs_transaction_info ti;
  91. bh_result->b_blocknr = 0;
  92. err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  93. if (unlikely(err))
  94. goto out;
  95. err = nilfs_bmap_insert(ii->i_bmap, blkoff,
  96. (unsigned long)bh_result);
  97. if (unlikely(err != 0)) {
  98. if (err == -EEXIST) {
  99. /*
  100. * The get_block() function could be called
  101. * from multiple callers for an inode.
  102. * However, the page having this block must
  103. * be locked in this case.
  104. */
  105. printk(KERN_WARNING
  106. "nilfs_get_block: a race condition "
  107. "while inserting a data block. "
  108. "(inode number=%lu, file block "
  109. "offset=%llu)\n",
  110. inode->i_ino,
  111. (unsigned long long)blkoff);
  112. err = 0;
  113. }
  114. nilfs_transaction_abort(inode->i_sb);
  115. goto out;
  116. }
  117. nilfs_mark_inode_dirty_sync(inode);
  118. nilfs_transaction_commit(inode->i_sb); /* never fails */
  119. /* Error handling should be detailed */
  120. set_buffer_new(bh_result);
  121. set_buffer_delay(bh_result);
  122. map_bh(bh_result, inode->i_sb, 0);
  123. /* Disk block number must be changed to proper value */
  124. } else if (ret == -ENOENT) {
  125. /*
  126. * not found is not error (e.g. hole); must return without
  127. * the mapped state flag.
  128. */
  129. ;
  130. } else {
  131. err = ret;
  132. }
  133. out:
  134. return err;
  135. }
  136. /**
  137. * nilfs_readpage() - implement readpage() method of nilfs_aops {}
  138. * address_space_operations.
  139. * @file - file struct of the file to be read
  140. * @page - the page to be read
  141. */
  142. static int nilfs_readpage(struct file *file, struct page *page)
  143. {
  144. return mpage_readpage(page, nilfs_get_block);
  145. }
  146. /**
  147. * nilfs_readpages() - implement readpages() method of nilfs_aops {}
  148. * address_space_operations.
  149. * @file - file struct of the file to be read
  150. * @mapping - address_space struct used for reading multiple pages
  151. * @pages - the pages to be read
  152. * @nr_pages - number of pages to be read
  153. */
  154. static int nilfs_readpages(struct file *file, struct address_space *mapping,
  155. struct list_head *pages, unsigned int nr_pages)
  156. {
  157. return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
  158. }
  159. static int nilfs_writepages(struct address_space *mapping,
  160. struct writeback_control *wbc)
  161. {
  162. struct inode *inode = mapping->host;
  163. int err = 0;
  164. if (inode->i_sb->s_flags & MS_RDONLY) {
  165. nilfs_clear_dirty_pages(mapping, false);
  166. return -EROFS;
  167. }
  168. if (wbc->sync_mode == WB_SYNC_ALL)
  169. err = nilfs_construct_dsync_segment(inode->i_sb, inode,
  170. wbc->range_start,
  171. wbc->range_end);
  172. return err;
  173. }
  174. static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
  175. {
  176. struct inode *inode = page->mapping->host;
  177. int err;
  178. if (inode->i_sb->s_flags & MS_RDONLY) {
  179. /*
  180. * It means that filesystem was remounted in read-only
  181. * mode because of error or metadata corruption. But we
  182. * have dirty pages that try to be flushed in background.
  183. * So, here we simply discard this dirty page.
  184. */
  185. nilfs_clear_dirty_page(page, false);
  186. unlock_page(page);
  187. return -EROFS;
  188. }
  189. redirty_page_for_writepage(wbc, page);
  190. unlock_page(page);
  191. if (wbc->sync_mode == WB_SYNC_ALL) {
  192. err = nilfs_construct_segment(inode->i_sb);
  193. if (unlikely(err))
  194. return err;
  195. } else if (wbc->for_reclaim)
  196. nilfs_flush_segment(inode->i_sb, inode->i_ino);
  197. return 0;
  198. }
  199. static int nilfs_set_page_dirty(struct page *page)
  200. {
  201. struct inode *inode = page->mapping->host;
  202. int ret = __set_page_dirty_nobuffers(page);
  203. if (page_has_buffers(page)) {
  204. unsigned int nr_dirty = 0;
  205. struct buffer_head *bh, *head;
  206. /*
  207. * This page is locked by callers, and no other thread
  208. * concurrently marks its buffers dirty since they are
  209. * only dirtied through routines in fs/buffer.c in
  210. * which call sites of mark_buffer_dirty are protected
  211. * by page lock.
  212. */
  213. bh = head = page_buffers(page);
  214. do {
  215. /* Do not mark hole blocks dirty */
  216. if (buffer_dirty(bh) || !buffer_mapped(bh))
  217. continue;
  218. set_buffer_dirty(bh);
  219. nr_dirty++;
  220. } while (bh = bh->b_this_page, bh != head);
  221. if (nr_dirty)
  222. nilfs_set_file_dirty(inode, nr_dirty);
  223. } else if (ret) {
  224. unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits);
  225. nilfs_set_file_dirty(inode, nr_dirty);
  226. }
  227. return ret;
  228. }
  229. void nilfs_write_failed(struct address_space *mapping, loff_t to)
  230. {
  231. struct inode *inode = mapping->host;
  232. if (to > inode->i_size) {
  233. truncate_pagecache(inode, inode->i_size);
  234. nilfs_truncate(inode);
  235. }
  236. }
  237. static int nilfs_write_begin(struct file *file, struct address_space *mapping,
  238. loff_t pos, unsigned len, unsigned flags,
  239. struct page **pagep, void **fsdata)
  240. {
  241. struct inode *inode = mapping->host;
  242. int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
  243. if (unlikely(err))
  244. return err;
  245. err = block_write_begin(mapping, pos, len, flags, pagep,
  246. nilfs_get_block);
  247. if (unlikely(err)) {
  248. nilfs_write_failed(mapping, pos + len);
  249. nilfs_transaction_abort(inode->i_sb);
  250. }
  251. return err;
  252. }
  253. static int nilfs_write_end(struct file *file, struct address_space *mapping,
  254. loff_t pos, unsigned len, unsigned copied,
  255. struct page *page, void *fsdata)
  256. {
  257. struct inode *inode = mapping->host;
  258. unsigned int start = pos & (PAGE_SIZE - 1);
  259. unsigned int nr_dirty;
  260. int err;
  261. nr_dirty = nilfs_page_count_clean_buffers(page, start,
  262. start + copied);
  263. copied = generic_write_end(file, mapping, pos, len, copied, page,
  264. fsdata);
  265. nilfs_set_file_dirty(inode, nr_dirty);
  266. err = nilfs_transaction_commit(inode->i_sb);
  267. return err ? : copied;
  268. }
  269. static ssize_t
  270. nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
  271. {
  272. struct inode *inode = file_inode(iocb->ki_filp);
  273. if (iov_iter_rw(iter) == WRITE)
  274. return 0;
  275. /* Needs synchronization with the cleaner */
  276. return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block);
  277. }
  278. const struct address_space_operations nilfs_aops = {
  279. .writepage = nilfs_writepage,
  280. .readpage = nilfs_readpage,
  281. .writepages = nilfs_writepages,
  282. .set_page_dirty = nilfs_set_page_dirty,
  283. .readpages = nilfs_readpages,
  284. .write_begin = nilfs_write_begin,
  285. .write_end = nilfs_write_end,
  286. /* .releasepage = nilfs_releasepage, */
  287. .invalidatepage = block_invalidatepage,
  288. .direct_IO = nilfs_direct_IO,
  289. .is_partially_uptodate = block_is_partially_uptodate,
  290. };
  291. static int nilfs_insert_inode_locked(struct inode *inode,
  292. struct nilfs_root *root,
  293. unsigned long ino)
  294. {
  295. struct nilfs_iget_args args = {
  296. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  297. };
  298. return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
  299. }
  300. struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
  301. {
  302. struct super_block *sb = dir->i_sb;
  303. struct the_nilfs *nilfs = sb->s_fs_info;
  304. struct inode *inode;
  305. struct nilfs_inode_info *ii;
  306. struct nilfs_root *root;
  307. int err = -ENOMEM;
  308. ino_t ino;
  309. inode = new_inode(sb);
  310. if (unlikely(!inode))
  311. goto failed;
  312. mapping_set_gfp_mask(inode->i_mapping,
  313. mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
  314. root = NILFS_I(dir)->i_root;
  315. ii = NILFS_I(inode);
  316. ii->i_state = 1 << NILFS_I_NEW;
  317. ii->i_root = root;
  318. err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
  319. if (unlikely(err))
  320. goto failed_ifile_create_inode;
  321. /* reference count of i_bh inherits from nilfs_mdt_read_block() */
  322. atomic64_inc(&root->inodes_count);
  323. inode_init_owner(inode, dir, mode);
  324. inode->i_ino = ino;
  325. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  326. if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
  327. err = nilfs_bmap_read(ii->i_bmap, NULL);
  328. if (err < 0)
  329. goto failed_after_creation;
  330. set_bit(NILFS_I_BMAP, &ii->i_state);
  331. /* No lock is needed; iget() ensures it. */
  332. }
  333. ii->i_flags = nilfs_mask_flags(
  334. mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
  335. /* ii->i_file_acl = 0; */
  336. /* ii->i_dir_acl = 0; */
  337. ii->i_dir_start_lookup = 0;
  338. nilfs_set_inode_flags(inode);
  339. spin_lock(&nilfs->ns_next_gen_lock);
  340. inode->i_generation = nilfs->ns_next_generation++;
  341. spin_unlock(&nilfs->ns_next_gen_lock);
  342. if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
  343. err = -EIO;
  344. goto failed_after_creation;
  345. }
  346. err = nilfs_init_acl(inode, dir);
  347. if (unlikely(err))
  348. /*
  349. * Never occur. When supporting nilfs_init_acl(),
  350. * proper cancellation of above jobs should be considered.
  351. */
  352. goto failed_after_creation;
  353. return inode;
  354. failed_after_creation:
  355. clear_nlink(inode);
  356. unlock_new_inode(inode);
  357. iput(inode); /*
  358. * raw_inode will be deleted through
  359. * nilfs_evict_inode().
  360. */
  361. goto failed;
  362. failed_ifile_create_inode:
  363. make_bad_inode(inode);
  364. iput(inode);
  365. failed:
  366. return ERR_PTR(err);
  367. }
  368. void nilfs_set_inode_flags(struct inode *inode)
  369. {
  370. unsigned int flags = NILFS_I(inode)->i_flags;
  371. unsigned int new_fl = 0;
  372. if (flags & FS_SYNC_FL)
  373. new_fl |= S_SYNC;
  374. if (flags & FS_APPEND_FL)
  375. new_fl |= S_APPEND;
  376. if (flags & FS_IMMUTABLE_FL)
  377. new_fl |= S_IMMUTABLE;
  378. if (flags & FS_NOATIME_FL)
  379. new_fl |= S_NOATIME;
  380. if (flags & FS_DIRSYNC_FL)
  381. new_fl |= S_DIRSYNC;
  382. inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
  383. S_NOATIME | S_DIRSYNC);
  384. }
  385. int nilfs_read_inode_common(struct inode *inode,
  386. struct nilfs_inode *raw_inode)
  387. {
  388. struct nilfs_inode_info *ii = NILFS_I(inode);
  389. int err;
  390. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  391. i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
  392. i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
  393. set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
  394. inode->i_size = le64_to_cpu(raw_inode->i_size);
  395. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  396. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  397. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  398. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  399. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  400. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  401. if (inode->i_nlink == 0)
  402. return -ESTALE; /* this inode is deleted */
  403. inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
  404. ii->i_flags = le32_to_cpu(raw_inode->i_flags);
  405. #if 0
  406. ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  407. ii->i_dir_acl = S_ISREG(inode->i_mode) ?
  408. 0 : le32_to_cpu(raw_inode->i_dir_acl);
  409. #endif
  410. ii->i_dir_start_lookup = 0;
  411. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  412. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  413. S_ISLNK(inode->i_mode)) {
  414. err = nilfs_bmap_read(ii->i_bmap, raw_inode);
  415. if (err < 0)
  416. return err;
  417. set_bit(NILFS_I_BMAP, &ii->i_state);
  418. /* No lock is needed; iget() ensures it. */
  419. }
  420. return 0;
  421. }
  422. static int __nilfs_read_inode(struct super_block *sb,
  423. struct nilfs_root *root, unsigned long ino,
  424. struct inode *inode)
  425. {
  426. struct the_nilfs *nilfs = sb->s_fs_info;
  427. struct buffer_head *bh;
  428. struct nilfs_inode *raw_inode;
  429. int err;
  430. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  431. err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
  432. if (unlikely(err))
  433. goto bad_inode;
  434. raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
  435. err = nilfs_read_inode_common(inode, raw_inode);
  436. if (err)
  437. goto failed_unmap;
  438. if (S_ISREG(inode->i_mode)) {
  439. inode->i_op = &nilfs_file_inode_operations;
  440. inode->i_fop = &nilfs_file_operations;
  441. inode->i_mapping->a_ops = &nilfs_aops;
  442. } else if (S_ISDIR(inode->i_mode)) {
  443. inode->i_op = &nilfs_dir_inode_operations;
  444. inode->i_fop = &nilfs_dir_operations;
  445. inode->i_mapping->a_ops = &nilfs_aops;
  446. } else if (S_ISLNK(inode->i_mode)) {
  447. inode->i_op = &nilfs_symlink_inode_operations;
  448. inode_nohighmem(inode);
  449. inode->i_mapping->a_ops = &nilfs_aops;
  450. } else {
  451. inode->i_op = &nilfs_special_inode_operations;
  452. init_special_inode(
  453. inode, inode->i_mode,
  454. huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
  455. }
  456. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  457. brelse(bh);
  458. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  459. nilfs_set_inode_flags(inode);
  460. mapping_set_gfp_mask(inode->i_mapping,
  461. mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
  462. return 0;
  463. failed_unmap:
  464. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  465. brelse(bh);
  466. bad_inode:
  467. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  468. return err;
  469. }
  470. static int nilfs_iget_test(struct inode *inode, void *opaque)
  471. {
  472. struct nilfs_iget_args *args = opaque;
  473. struct nilfs_inode_info *ii;
  474. if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
  475. return 0;
  476. ii = NILFS_I(inode);
  477. if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
  478. return !args->for_gc;
  479. return args->for_gc && args->cno == ii->i_cno;
  480. }
  481. static int nilfs_iget_set(struct inode *inode, void *opaque)
  482. {
  483. struct nilfs_iget_args *args = opaque;
  484. inode->i_ino = args->ino;
  485. if (args->for_gc) {
  486. NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
  487. NILFS_I(inode)->i_cno = args->cno;
  488. NILFS_I(inode)->i_root = NULL;
  489. } else {
  490. if (args->root && args->ino == NILFS_ROOT_INO)
  491. nilfs_get_root(args->root);
  492. NILFS_I(inode)->i_root = args->root;
  493. }
  494. return 0;
  495. }
  496. struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
  497. unsigned long ino)
  498. {
  499. struct nilfs_iget_args args = {
  500. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  501. };
  502. return ilookup5(sb, ino, nilfs_iget_test, &args);
  503. }
  504. struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
  505. unsigned long ino)
  506. {
  507. struct nilfs_iget_args args = {
  508. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  509. };
  510. return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  511. }
  512. struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
  513. unsigned long ino)
  514. {
  515. struct inode *inode;
  516. int err;
  517. inode = nilfs_iget_locked(sb, root, ino);
  518. if (unlikely(!inode))
  519. return ERR_PTR(-ENOMEM);
  520. if (!(inode->i_state & I_NEW))
  521. return inode;
  522. err = __nilfs_read_inode(sb, root, ino, inode);
  523. if (unlikely(err)) {
  524. iget_failed(inode);
  525. return ERR_PTR(err);
  526. }
  527. unlock_new_inode(inode);
  528. return inode;
  529. }
  530. struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
  531. __u64 cno)
  532. {
  533. struct nilfs_iget_args args = {
  534. .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
  535. };
  536. struct inode *inode;
  537. int err;
  538. inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  539. if (unlikely(!inode))
  540. return ERR_PTR(-ENOMEM);
  541. if (!(inode->i_state & I_NEW))
  542. return inode;
  543. err = nilfs_init_gcinode(inode);
  544. if (unlikely(err)) {
  545. iget_failed(inode);
  546. return ERR_PTR(err);
  547. }
  548. unlock_new_inode(inode);
  549. return inode;
  550. }
  551. void nilfs_write_inode_common(struct inode *inode,
  552. struct nilfs_inode *raw_inode, int has_bmap)
  553. {
  554. struct nilfs_inode_info *ii = NILFS_I(inode);
  555. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  556. raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
  557. raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
  558. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  559. raw_inode->i_size = cpu_to_le64(inode->i_size);
  560. raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  561. raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  562. raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  563. raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  564. raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
  565. raw_inode->i_flags = cpu_to_le32(ii->i_flags);
  566. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  567. if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
  568. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  569. /* zero-fill unused portion in the case of super root block */
  570. raw_inode->i_xattr = 0;
  571. raw_inode->i_pad = 0;
  572. memset((void *)raw_inode + sizeof(*raw_inode), 0,
  573. nilfs->ns_inode_size - sizeof(*raw_inode));
  574. }
  575. if (has_bmap)
  576. nilfs_bmap_write(ii->i_bmap, raw_inode);
  577. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
  578. raw_inode->i_device_code =
  579. cpu_to_le64(huge_encode_dev(inode->i_rdev));
  580. /*
  581. * When extending inode, nilfs->ns_inode_size should be checked
  582. * for substitutions of appended fields.
  583. */
  584. }
  585. void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
  586. {
  587. ino_t ino = inode->i_ino;
  588. struct nilfs_inode_info *ii = NILFS_I(inode);
  589. struct inode *ifile = ii->i_root->ifile;
  590. struct nilfs_inode *raw_inode;
  591. raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
  592. if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
  593. memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
  594. if (flags & I_DIRTY_DATASYNC)
  595. set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
  596. nilfs_write_inode_common(inode, raw_inode, 0);
  597. /*
  598. * XXX: call with has_bmap = 0 is a workaround to avoid
  599. * deadlock of bmap. This delays update of i_bmap to just
  600. * before writing.
  601. */
  602. nilfs_ifile_unmap_inode(ifile, ino, ibh);
  603. }
  604. #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
  605. static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
  606. unsigned long from)
  607. {
  608. __u64 b;
  609. int ret;
  610. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  611. return;
  612. repeat:
  613. ret = nilfs_bmap_last_key(ii->i_bmap, &b);
  614. if (ret == -ENOENT)
  615. return;
  616. else if (ret < 0)
  617. goto failed;
  618. if (b < from)
  619. return;
  620. b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
  621. ret = nilfs_bmap_truncate(ii->i_bmap, b);
  622. nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
  623. if (!ret || (ret == -ENOMEM &&
  624. nilfs_bmap_truncate(ii->i_bmap, b) == 0))
  625. goto repeat;
  626. failed:
  627. nilfs_warning(ii->vfs_inode.i_sb, __func__,
  628. "failed to truncate bmap (ino=%lu, err=%d)",
  629. ii->vfs_inode.i_ino, ret);
  630. }
  631. void nilfs_truncate(struct inode *inode)
  632. {
  633. unsigned long blkoff;
  634. unsigned int blocksize;
  635. struct nilfs_transaction_info ti;
  636. struct super_block *sb = inode->i_sb;
  637. struct nilfs_inode_info *ii = NILFS_I(inode);
  638. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  639. return;
  640. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  641. return;
  642. blocksize = sb->s_blocksize;
  643. blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
  644. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  645. block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
  646. nilfs_truncate_bmap(ii, blkoff);
  647. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  648. if (IS_SYNC(inode))
  649. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  650. nilfs_mark_inode_dirty(inode);
  651. nilfs_set_file_dirty(inode, 0);
  652. nilfs_transaction_commit(sb);
  653. /*
  654. * May construct a logical segment and may fail in sync mode.
  655. * But truncate has no return value.
  656. */
  657. }
  658. static void nilfs_clear_inode(struct inode *inode)
  659. {
  660. struct nilfs_inode_info *ii = NILFS_I(inode);
  661. /*
  662. * Free resources allocated in nilfs_read_inode(), here.
  663. */
  664. BUG_ON(!list_empty(&ii->i_dirty));
  665. brelse(ii->i_bh);
  666. ii->i_bh = NULL;
  667. if (nilfs_is_metadata_file_inode(inode))
  668. nilfs_mdt_clear(inode);
  669. if (test_bit(NILFS_I_BMAP, &ii->i_state))
  670. nilfs_bmap_clear(ii->i_bmap);
  671. nilfs_btnode_cache_clear(&ii->i_btnode_cache);
  672. if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
  673. nilfs_put_root(ii->i_root);
  674. }
  675. void nilfs_evict_inode(struct inode *inode)
  676. {
  677. struct nilfs_transaction_info ti;
  678. struct super_block *sb = inode->i_sb;
  679. struct nilfs_inode_info *ii = NILFS_I(inode);
  680. int ret;
  681. if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
  682. truncate_inode_pages_final(&inode->i_data);
  683. clear_inode(inode);
  684. nilfs_clear_inode(inode);
  685. return;
  686. }
  687. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  688. truncate_inode_pages_final(&inode->i_data);
  689. /* TODO: some of the following operations may fail. */
  690. nilfs_truncate_bmap(ii, 0);
  691. nilfs_mark_inode_dirty(inode);
  692. clear_inode(inode);
  693. ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
  694. if (!ret)
  695. atomic64_dec(&ii->i_root->inodes_count);
  696. nilfs_clear_inode(inode);
  697. if (IS_SYNC(inode))
  698. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  699. nilfs_transaction_commit(sb);
  700. /*
  701. * May construct a logical segment and may fail in sync mode.
  702. * But delete_inode has no return value.
  703. */
  704. }
  705. int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
  706. {
  707. struct nilfs_transaction_info ti;
  708. struct inode *inode = d_inode(dentry);
  709. struct super_block *sb = inode->i_sb;
  710. int err;
  711. err = inode_change_ok(inode, iattr);
  712. if (err)
  713. return err;
  714. err = nilfs_transaction_begin(sb, &ti, 0);
  715. if (unlikely(err))
  716. return err;
  717. if ((iattr->ia_valid & ATTR_SIZE) &&
  718. iattr->ia_size != i_size_read(inode)) {
  719. inode_dio_wait(inode);
  720. truncate_setsize(inode, iattr->ia_size);
  721. nilfs_truncate(inode);
  722. }
  723. setattr_copy(inode, iattr);
  724. mark_inode_dirty(inode);
  725. if (iattr->ia_valid & ATTR_MODE) {
  726. err = nilfs_acl_chmod(inode);
  727. if (unlikely(err))
  728. goto out_err;
  729. }
  730. return nilfs_transaction_commit(sb);
  731. out_err:
  732. nilfs_transaction_abort(sb);
  733. return err;
  734. }
  735. int nilfs_permission(struct inode *inode, int mask)
  736. {
  737. struct nilfs_root *root = NILFS_I(inode)->i_root;
  738. if ((mask & MAY_WRITE) && root &&
  739. root->cno != NILFS_CPTREE_CURRENT_CNO)
  740. return -EROFS; /* snapshot is not writable */
  741. return generic_permission(inode, mask);
  742. }
  743. int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
  744. {
  745. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  746. struct nilfs_inode_info *ii = NILFS_I(inode);
  747. int err;
  748. spin_lock(&nilfs->ns_inode_lock);
  749. if (ii->i_bh == NULL) {
  750. spin_unlock(&nilfs->ns_inode_lock);
  751. err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
  752. inode->i_ino, pbh);
  753. if (unlikely(err))
  754. return err;
  755. spin_lock(&nilfs->ns_inode_lock);
  756. if (ii->i_bh == NULL)
  757. ii->i_bh = *pbh;
  758. else {
  759. brelse(*pbh);
  760. *pbh = ii->i_bh;
  761. }
  762. } else
  763. *pbh = ii->i_bh;
  764. get_bh(*pbh);
  765. spin_unlock(&nilfs->ns_inode_lock);
  766. return 0;
  767. }
  768. int nilfs_inode_dirty(struct inode *inode)
  769. {
  770. struct nilfs_inode_info *ii = NILFS_I(inode);
  771. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  772. int ret = 0;
  773. if (!list_empty(&ii->i_dirty)) {
  774. spin_lock(&nilfs->ns_inode_lock);
  775. ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
  776. test_bit(NILFS_I_BUSY, &ii->i_state);
  777. spin_unlock(&nilfs->ns_inode_lock);
  778. }
  779. return ret;
  780. }
  781. int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty)
  782. {
  783. struct nilfs_inode_info *ii = NILFS_I(inode);
  784. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  785. atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
  786. if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
  787. return 0;
  788. spin_lock(&nilfs->ns_inode_lock);
  789. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  790. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  791. /*
  792. * Because this routine may race with nilfs_dispose_list(),
  793. * we have to check NILFS_I_QUEUED here, too.
  794. */
  795. if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
  796. /*
  797. * This will happen when somebody is freeing
  798. * this inode.
  799. */
  800. nilfs_warning(inode->i_sb, __func__,
  801. "cannot get inode (ino=%lu)",
  802. inode->i_ino);
  803. spin_unlock(&nilfs->ns_inode_lock);
  804. return -EINVAL; /*
  805. * NILFS_I_DIRTY may remain for
  806. * freeing inode.
  807. */
  808. }
  809. list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
  810. set_bit(NILFS_I_QUEUED, &ii->i_state);
  811. }
  812. spin_unlock(&nilfs->ns_inode_lock);
  813. return 0;
  814. }
  815. int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
  816. {
  817. struct buffer_head *ibh;
  818. int err;
  819. err = nilfs_load_inode_block(inode, &ibh);
  820. if (unlikely(err)) {
  821. nilfs_warning(inode->i_sb, __func__,
  822. "failed to reget inode block.");
  823. return err;
  824. }
  825. nilfs_update_inode(inode, ibh, flags);
  826. mark_buffer_dirty(ibh);
  827. nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
  828. brelse(ibh);
  829. return 0;
  830. }
  831. /**
  832. * nilfs_dirty_inode - reflect changes on given inode to an inode block.
  833. * @inode: inode of the file to be registered.
  834. *
  835. * nilfs_dirty_inode() loads a inode block containing the specified
  836. * @inode and copies data from a nilfs_inode to a corresponding inode
  837. * entry in the inode block. This operation is excluded from the segment
  838. * construction. This function can be called both as a single operation
  839. * and as a part of indivisible file operations.
  840. */
  841. void nilfs_dirty_inode(struct inode *inode, int flags)
  842. {
  843. struct nilfs_transaction_info ti;
  844. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  845. if (is_bad_inode(inode)) {
  846. nilfs_warning(inode->i_sb, __func__,
  847. "tried to mark bad_inode dirty. ignored.");
  848. dump_stack();
  849. return;
  850. }
  851. if (mdi) {
  852. nilfs_mdt_mark_dirty(inode);
  853. return;
  854. }
  855. nilfs_transaction_begin(inode->i_sb, &ti, 0);
  856. __nilfs_mark_inode_dirty(inode, flags);
  857. nilfs_transaction_commit(inode->i_sb); /* never fails */
  858. }
  859. int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  860. __u64 start, __u64 len)
  861. {
  862. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  863. __u64 logical = 0, phys = 0, size = 0;
  864. __u32 flags = 0;
  865. loff_t isize;
  866. sector_t blkoff, end_blkoff;
  867. sector_t delalloc_blkoff;
  868. unsigned long delalloc_blklen;
  869. unsigned int blkbits = inode->i_blkbits;
  870. int ret, n;
  871. ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
  872. if (ret)
  873. return ret;
  874. inode_lock(inode);
  875. isize = i_size_read(inode);
  876. blkoff = start >> blkbits;
  877. end_blkoff = (start + len - 1) >> blkbits;
  878. delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
  879. &delalloc_blkoff);
  880. do {
  881. __u64 blkphy;
  882. unsigned int maxblocks;
  883. if (delalloc_blklen && blkoff == delalloc_blkoff) {
  884. if (size) {
  885. /* End of the current extent */
  886. ret = fiemap_fill_next_extent(
  887. fieinfo, logical, phys, size, flags);
  888. if (ret)
  889. break;
  890. }
  891. if (blkoff > end_blkoff)
  892. break;
  893. flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
  894. logical = blkoff << blkbits;
  895. phys = 0;
  896. size = delalloc_blklen << blkbits;
  897. blkoff = delalloc_blkoff + delalloc_blklen;
  898. delalloc_blklen = nilfs_find_uncommitted_extent(
  899. inode, blkoff, &delalloc_blkoff);
  900. continue;
  901. }
  902. /*
  903. * Limit the number of blocks that we look up so as
  904. * not to get into the next delayed allocation extent.
  905. */
  906. maxblocks = INT_MAX;
  907. if (delalloc_blklen)
  908. maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
  909. maxblocks);
  910. blkphy = 0;
  911. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  912. n = nilfs_bmap_lookup_contig(
  913. NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
  914. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  915. if (n < 0) {
  916. int past_eof;
  917. if (unlikely(n != -ENOENT))
  918. break; /* error */
  919. /* HOLE */
  920. blkoff++;
  921. past_eof = ((blkoff << blkbits) >= isize);
  922. if (size) {
  923. /* End of the current extent */
  924. if (past_eof)
  925. flags |= FIEMAP_EXTENT_LAST;
  926. ret = fiemap_fill_next_extent(
  927. fieinfo, logical, phys, size, flags);
  928. if (ret)
  929. break;
  930. size = 0;
  931. }
  932. if (blkoff > end_blkoff || past_eof)
  933. break;
  934. } else {
  935. if (size) {
  936. if (phys && blkphy << blkbits == phys + size) {
  937. /* The current extent goes on */
  938. size += n << blkbits;
  939. } else {
  940. /* Terminate the current extent */
  941. ret = fiemap_fill_next_extent(
  942. fieinfo, logical, phys, size,
  943. flags);
  944. if (ret || blkoff > end_blkoff)
  945. break;
  946. /* Start another extent */
  947. flags = FIEMAP_EXTENT_MERGED;
  948. logical = blkoff << blkbits;
  949. phys = blkphy << blkbits;
  950. size = n << blkbits;
  951. }
  952. } else {
  953. /* Start a new extent */
  954. flags = FIEMAP_EXTENT_MERGED;
  955. logical = blkoff << blkbits;
  956. phys = blkphy << blkbits;
  957. size = n << blkbits;
  958. }
  959. blkoff += n;
  960. }
  961. cond_resched();
  962. } while (true);
  963. /* If ret is 1 then we just hit the end of the extent array */
  964. if (ret == 1)
  965. ret = 0;
  966. inode_unlock(inode);
  967. return ret;
  968. }