inode.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118
  1. /*
  2. * inode.c - NILFS inode operations.
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  19. *
  20. * Written by Ryusuke Konishi <ryusuke@osrg.net>
  21. *
  22. */
  23. #include <linux/buffer_head.h>
  24. #include <linux/gfp.h>
  25. #include <linux/mpage.h>
  26. #include <linux/pagemap.h>
  27. #include <linux/writeback.h>
  28. #include <linux/uio.h>
  29. #include "nilfs.h"
  30. #include "btnode.h"
  31. #include "segment.h"
  32. #include "page.h"
  33. #include "mdt.h"
  34. #include "cpfile.h"
  35. #include "ifile.h"
  36. /**
  37. * struct nilfs_iget_args - arguments used during comparison between inodes
  38. * @ino: inode number
  39. * @cno: checkpoint number
  40. * @root: pointer on NILFS root object (mounted checkpoint)
  41. * @for_gc: inode for GC flag
  42. */
  43. struct nilfs_iget_args {
  44. u64 ino;
  45. __u64 cno;
  46. struct nilfs_root *root;
  47. int for_gc;
  48. };
  49. static int nilfs_iget_test(struct inode *inode, void *opaque);
  50. void nilfs_inode_add_blocks(struct inode *inode, int n)
  51. {
  52. struct nilfs_root *root = NILFS_I(inode)->i_root;
  53. inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
  54. if (root)
  55. atomic64_add(n, &root->blocks_count);
  56. }
  57. void nilfs_inode_sub_blocks(struct inode *inode, int n)
  58. {
  59. struct nilfs_root *root = NILFS_I(inode)->i_root;
  60. inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
  61. if (root)
  62. atomic64_sub(n, &root->blocks_count);
  63. }
  64. /**
  65. * nilfs_get_block() - get a file block on the filesystem (callback function)
  66. * @inode - inode struct of the target file
  67. * @blkoff - file block number
  68. * @bh_result - buffer head to be mapped on
  69. * @create - indicate whether allocating the block or not when it has not
  70. * been allocated yet.
  71. *
  72. * This function does not issue actual read request of the specified data
  73. * block. It is done by VFS.
  74. */
  75. int nilfs_get_block(struct inode *inode, sector_t blkoff,
  76. struct buffer_head *bh_result, int create)
  77. {
  78. struct nilfs_inode_info *ii = NILFS_I(inode);
  79. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  80. __u64 blknum = 0;
  81. int err = 0, ret;
  82. unsigned maxblocks = bh_result->b_size >> inode->i_blkbits;
  83. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  84. ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
  85. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  86. if (ret >= 0) { /* found */
  87. map_bh(bh_result, inode->i_sb, blknum);
  88. if (ret > 0)
  89. bh_result->b_size = (ret << inode->i_blkbits);
  90. goto out;
  91. }
  92. /* data block was not found */
  93. if (ret == -ENOENT && create) {
  94. struct nilfs_transaction_info ti;
  95. bh_result->b_blocknr = 0;
  96. err = nilfs_transaction_begin(inode->i_sb, &ti, 1);
  97. if (unlikely(err))
  98. goto out;
  99. err = nilfs_bmap_insert(ii->i_bmap, blkoff,
  100. (unsigned long)bh_result);
  101. if (unlikely(err != 0)) {
  102. if (err == -EEXIST) {
  103. /*
  104. * The get_block() function could be called
  105. * from multiple callers for an inode.
  106. * However, the page having this block must
  107. * be locked in this case.
  108. */
  109. printk(KERN_WARNING
  110. "nilfs_get_block: a race condition "
  111. "while inserting a data block. "
  112. "(inode number=%lu, file block "
  113. "offset=%llu)\n",
  114. inode->i_ino,
  115. (unsigned long long)blkoff);
  116. err = 0;
  117. }
  118. nilfs_transaction_abort(inode->i_sb);
  119. goto out;
  120. }
  121. nilfs_mark_inode_dirty_sync(inode);
  122. nilfs_transaction_commit(inode->i_sb); /* never fails */
  123. /* Error handling should be detailed */
  124. set_buffer_new(bh_result);
  125. set_buffer_delay(bh_result);
  126. map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed
  127. to proper value */
  128. } else if (ret == -ENOENT) {
  129. /* not found is not error (e.g. hole); must return without
  130. the mapped state flag. */
  131. ;
  132. } else {
  133. err = ret;
  134. }
  135. out:
  136. return err;
  137. }
  138. /**
  139. * nilfs_readpage() - implement readpage() method of nilfs_aops {}
  140. * address_space_operations.
  141. * @file - file struct of the file to be read
  142. * @page - the page to be read
  143. */
  144. static int nilfs_readpage(struct file *file, struct page *page)
  145. {
  146. return mpage_readpage(page, nilfs_get_block);
  147. }
  148. /**
  149. * nilfs_readpages() - implement readpages() method of nilfs_aops {}
  150. * address_space_operations.
  151. * @file - file struct of the file to be read
  152. * @mapping - address_space struct used for reading multiple pages
  153. * @pages - the pages to be read
  154. * @nr_pages - number of pages to be read
  155. */
  156. static int nilfs_readpages(struct file *file, struct address_space *mapping,
  157. struct list_head *pages, unsigned nr_pages)
  158. {
  159. return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block);
  160. }
  161. static int nilfs_writepages(struct address_space *mapping,
  162. struct writeback_control *wbc)
  163. {
  164. struct inode *inode = mapping->host;
  165. int err = 0;
  166. if (inode->i_sb->s_flags & MS_RDONLY) {
  167. nilfs_clear_dirty_pages(mapping, false);
  168. return -EROFS;
  169. }
  170. if (wbc->sync_mode == WB_SYNC_ALL)
  171. err = nilfs_construct_dsync_segment(inode->i_sb, inode,
  172. wbc->range_start,
  173. wbc->range_end);
  174. return err;
  175. }
  176. static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
  177. {
  178. struct inode *inode = page->mapping->host;
  179. int err;
  180. if (inode->i_sb->s_flags & MS_RDONLY) {
  181. /*
  182. * It means that filesystem was remounted in read-only
  183. * mode because of error or metadata corruption. But we
  184. * have dirty pages that try to be flushed in background.
  185. * So, here we simply discard this dirty page.
  186. */
  187. nilfs_clear_dirty_page(page, false);
  188. unlock_page(page);
  189. return -EROFS;
  190. }
  191. redirty_page_for_writepage(wbc, page);
  192. unlock_page(page);
  193. if (wbc->sync_mode == WB_SYNC_ALL) {
  194. err = nilfs_construct_segment(inode->i_sb);
  195. if (unlikely(err))
  196. return err;
  197. } else if (wbc->for_reclaim)
  198. nilfs_flush_segment(inode->i_sb, inode->i_ino);
  199. return 0;
  200. }
  201. static int nilfs_set_page_dirty(struct page *page)
  202. {
  203. struct inode *inode = page->mapping->host;
  204. int ret = __set_page_dirty_nobuffers(page);
  205. if (page_has_buffers(page)) {
  206. unsigned nr_dirty = 0;
  207. struct buffer_head *bh, *head;
  208. /*
  209. * This page is locked by callers, and no other thread
  210. * concurrently marks its buffers dirty since they are
  211. * only dirtied through routines in fs/buffer.c in
  212. * which call sites of mark_buffer_dirty are protected
  213. * by page lock.
  214. */
  215. bh = head = page_buffers(page);
  216. do {
  217. /* Do not mark hole blocks dirty */
  218. if (buffer_dirty(bh) || !buffer_mapped(bh))
  219. continue;
  220. set_buffer_dirty(bh);
  221. nr_dirty++;
  222. } while (bh = bh->b_this_page, bh != head);
  223. if (nr_dirty)
  224. nilfs_set_file_dirty(inode, nr_dirty);
  225. } else if (ret) {
  226. unsigned nr_dirty = 1 << (PAGE_CACHE_SHIFT - inode->i_blkbits);
  227. nilfs_set_file_dirty(inode, nr_dirty);
  228. }
  229. return ret;
  230. }
  231. void nilfs_write_failed(struct address_space *mapping, loff_t to)
  232. {
  233. struct inode *inode = mapping->host;
  234. if (to > inode->i_size) {
  235. truncate_pagecache(inode, inode->i_size);
  236. nilfs_truncate(inode);
  237. }
  238. }
  239. static int nilfs_write_begin(struct file *file, struct address_space *mapping,
  240. loff_t pos, unsigned len, unsigned flags,
  241. struct page **pagep, void **fsdata)
  242. {
  243. struct inode *inode = mapping->host;
  244. int err = nilfs_transaction_begin(inode->i_sb, NULL, 1);
  245. if (unlikely(err))
  246. return err;
  247. err = block_write_begin(mapping, pos, len, flags, pagep,
  248. nilfs_get_block);
  249. if (unlikely(err)) {
  250. nilfs_write_failed(mapping, pos + len);
  251. nilfs_transaction_abort(inode->i_sb);
  252. }
  253. return err;
  254. }
  255. static int nilfs_write_end(struct file *file, struct address_space *mapping,
  256. loff_t pos, unsigned len, unsigned copied,
  257. struct page *page, void *fsdata)
  258. {
  259. struct inode *inode = mapping->host;
  260. unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  261. unsigned nr_dirty;
  262. int err;
  263. nr_dirty = nilfs_page_count_clean_buffers(page, start,
  264. start + copied);
  265. copied = generic_write_end(file, mapping, pos, len, copied, page,
  266. fsdata);
  267. nilfs_set_file_dirty(inode, nr_dirty);
  268. err = nilfs_transaction_commit(inode->i_sb);
  269. return err ? : copied;
  270. }
  271. static ssize_t
  272. nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset)
  273. {
  274. struct inode *inode = file_inode(iocb->ki_filp);
  275. if (iov_iter_rw(iter) == WRITE)
  276. return 0;
  277. /* Needs synchronization with the cleaner */
  278. return blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block);
  279. }
  280. const struct address_space_operations nilfs_aops = {
  281. .writepage = nilfs_writepage,
  282. .readpage = nilfs_readpage,
  283. .writepages = nilfs_writepages,
  284. .set_page_dirty = nilfs_set_page_dirty,
  285. .readpages = nilfs_readpages,
  286. .write_begin = nilfs_write_begin,
  287. .write_end = nilfs_write_end,
  288. /* .releasepage = nilfs_releasepage, */
  289. .invalidatepage = block_invalidatepage,
  290. .direct_IO = nilfs_direct_IO,
  291. .is_partially_uptodate = block_is_partially_uptodate,
  292. };
  293. static int nilfs_insert_inode_locked(struct inode *inode,
  294. struct nilfs_root *root,
  295. unsigned long ino)
  296. {
  297. struct nilfs_iget_args args = {
  298. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  299. };
  300. return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
  301. }
  302. struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
  303. {
  304. struct super_block *sb = dir->i_sb;
  305. struct the_nilfs *nilfs = sb->s_fs_info;
  306. struct inode *inode;
  307. struct nilfs_inode_info *ii;
  308. struct nilfs_root *root;
  309. int err = -ENOMEM;
  310. ino_t ino;
  311. inode = new_inode(sb);
  312. if (unlikely(!inode))
  313. goto failed;
  314. mapping_set_gfp_mask(inode->i_mapping,
  315. mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
  316. root = NILFS_I(dir)->i_root;
  317. ii = NILFS_I(inode);
  318. ii->i_state = 1 << NILFS_I_NEW;
  319. ii->i_root = root;
  320. err = nilfs_ifile_create_inode(root->ifile, &ino, &ii->i_bh);
  321. if (unlikely(err))
  322. goto failed_ifile_create_inode;
  323. /* reference count of i_bh inherits from nilfs_mdt_read_block() */
  324. atomic64_inc(&root->inodes_count);
  325. inode_init_owner(inode, dir, mode);
  326. inode->i_ino = ino;
  327. inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
  328. if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
  329. err = nilfs_bmap_read(ii->i_bmap, NULL);
  330. if (err < 0)
  331. goto failed_after_creation;
  332. set_bit(NILFS_I_BMAP, &ii->i_state);
  333. /* No lock is needed; iget() ensures it. */
  334. }
  335. ii->i_flags = nilfs_mask_flags(
  336. mode, NILFS_I(dir)->i_flags & NILFS_FL_INHERITED);
  337. /* ii->i_file_acl = 0; */
  338. /* ii->i_dir_acl = 0; */
  339. ii->i_dir_start_lookup = 0;
  340. nilfs_set_inode_flags(inode);
  341. spin_lock(&nilfs->ns_next_gen_lock);
  342. inode->i_generation = nilfs->ns_next_generation++;
  343. spin_unlock(&nilfs->ns_next_gen_lock);
  344. if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
  345. err = -EIO;
  346. goto failed_after_creation;
  347. }
  348. err = nilfs_init_acl(inode, dir);
  349. if (unlikely(err))
  350. goto failed_after_creation; /* never occur. When supporting
  351. nilfs_init_acl(), proper cancellation of
  352. above jobs should be considered */
  353. return inode;
  354. failed_after_creation:
  355. clear_nlink(inode);
  356. unlock_new_inode(inode);
  357. iput(inode); /* raw_inode will be deleted through
  358. nilfs_evict_inode() */
  359. goto failed;
  360. failed_ifile_create_inode:
  361. make_bad_inode(inode);
  362. iput(inode); /* if i_nlink == 1, generic_forget_inode() will be
  363. called */
  364. failed:
  365. return ERR_PTR(err);
  366. }
  367. void nilfs_set_inode_flags(struct inode *inode)
  368. {
  369. unsigned int flags = NILFS_I(inode)->i_flags;
  370. unsigned int new_fl = 0;
  371. if (flags & FS_SYNC_FL)
  372. new_fl |= S_SYNC;
  373. if (flags & FS_APPEND_FL)
  374. new_fl |= S_APPEND;
  375. if (flags & FS_IMMUTABLE_FL)
  376. new_fl |= S_IMMUTABLE;
  377. if (flags & FS_NOATIME_FL)
  378. new_fl |= S_NOATIME;
  379. if (flags & FS_DIRSYNC_FL)
  380. new_fl |= S_DIRSYNC;
  381. inode_set_flags(inode, new_fl, S_SYNC | S_APPEND | S_IMMUTABLE |
  382. S_NOATIME | S_DIRSYNC);
  383. }
  384. int nilfs_read_inode_common(struct inode *inode,
  385. struct nilfs_inode *raw_inode)
  386. {
  387. struct nilfs_inode_info *ii = NILFS_I(inode);
  388. int err;
  389. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  390. i_uid_write(inode, le32_to_cpu(raw_inode->i_uid));
  391. i_gid_write(inode, le32_to_cpu(raw_inode->i_gid));
  392. set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
  393. inode->i_size = le64_to_cpu(raw_inode->i_size);
  394. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  395. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  396. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  397. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  398. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  399. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  400. if (inode->i_nlink == 0)
  401. return -ESTALE; /* this inode is deleted */
  402. inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
  403. ii->i_flags = le32_to_cpu(raw_inode->i_flags);
  404. #if 0
  405. ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
  406. ii->i_dir_acl = S_ISREG(inode->i_mode) ?
  407. 0 : le32_to_cpu(raw_inode->i_dir_acl);
  408. #endif
  409. ii->i_dir_start_lookup = 0;
  410. inode->i_generation = le32_to_cpu(raw_inode->i_generation);
  411. if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
  412. S_ISLNK(inode->i_mode)) {
  413. err = nilfs_bmap_read(ii->i_bmap, raw_inode);
  414. if (err < 0)
  415. return err;
  416. set_bit(NILFS_I_BMAP, &ii->i_state);
  417. /* No lock is needed; iget() ensures it. */
  418. }
  419. return 0;
  420. }
  421. static int __nilfs_read_inode(struct super_block *sb,
  422. struct nilfs_root *root, unsigned long ino,
  423. struct inode *inode)
  424. {
  425. struct the_nilfs *nilfs = sb->s_fs_info;
  426. struct buffer_head *bh;
  427. struct nilfs_inode *raw_inode;
  428. int err;
  429. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  430. err = nilfs_ifile_get_inode_block(root->ifile, ino, &bh);
  431. if (unlikely(err))
  432. goto bad_inode;
  433. raw_inode = nilfs_ifile_map_inode(root->ifile, ino, bh);
  434. err = nilfs_read_inode_common(inode, raw_inode);
  435. if (err)
  436. goto failed_unmap;
  437. if (S_ISREG(inode->i_mode)) {
  438. inode->i_op = &nilfs_file_inode_operations;
  439. inode->i_fop = &nilfs_file_operations;
  440. inode->i_mapping->a_ops = &nilfs_aops;
  441. } else if (S_ISDIR(inode->i_mode)) {
  442. inode->i_op = &nilfs_dir_inode_operations;
  443. inode->i_fop = &nilfs_dir_operations;
  444. inode->i_mapping->a_ops = &nilfs_aops;
  445. } else if (S_ISLNK(inode->i_mode)) {
  446. inode->i_op = &nilfs_symlink_inode_operations;
  447. inode_nohighmem(inode);
  448. inode->i_mapping->a_ops = &nilfs_aops;
  449. } else {
  450. inode->i_op = &nilfs_special_inode_operations;
  451. init_special_inode(
  452. inode, inode->i_mode,
  453. huge_decode_dev(le64_to_cpu(raw_inode->i_device_code)));
  454. }
  455. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  456. brelse(bh);
  457. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  458. nilfs_set_inode_flags(inode);
  459. mapping_set_gfp_mask(inode->i_mapping,
  460. mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS));
  461. return 0;
  462. failed_unmap:
  463. nilfs_ifile_unmap_inode(root->ifile, ino, bh);
  464. brelse(bh);
  465. bad_inode:
  466. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  467. return err;
  468. }
  469. static int nilfs_iget_test(struct inode *inode, void *opaque)
  470. {
  471. struct nilfs_iget_args *args = opaque;
  472. struct nilfs_inode_info *ii;
  473. if (args->ino != inode->i_ino || args->root != NILFS_I(inode)->i_root)
  474. return 0;
  475. ii = NILFS_I(inode);
  476. if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
  477. return !args->for_gc;
  478. return args->for_gc && args->cno == ii->i_cno;
  479. }
  480. static int nilfs_iget_set(struct inode *inode, void *opaque)
  481. {
  482. struct nilfs_iget_args *args = opaque;
  483. inode->i_ino = args->ino;
  484. if (args->for_gc) {
  485. NILFS_I(inode)->i_state = 1 << NILFS_I_GCINODE;
  486. NILFS_I(inode)->i_cno = args->cno;
  487. NILFS_I(inode)->i_root = NULL;
  488. } else {
  489. if (args->root && args->ino == NILFS_ROOT_INO)
  490. nilfs_get_root(args->root);
  491. NILFS_I(inode)->i_root = args->root;
  492. }
  493. return 0;
  494. }
  495. struct inode *nilfs_ilookup(struct super_block *sb, struct nilfs_root *root,
  496. unsigned long ino)
  497. {
  498. struct nilfs_iget_args args = {
  499. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  500. };
  501. return ilookup5(sb, ino, nilfs_iget_test, &args);
  502. }
  503. struct inode *nilfs_iget_locked(struct super_block *sb, struct nilfs_root *root,
  504. unsigned long ino)
  505. {
  506. struct nilfs_iget_args args = {
  507. .ino = ino, .root = root, .cno = 0, .for_gc = 0
  508. };
  509. return iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  510. }
  511. struct inode *nilfs_iget(struct super_block *sb, struct nilfs_root *root,
  512. unsigned long ino)
  513. {
  514. struct inode *inode;
  515. int err;
  516. inode = nilfs_iget_locked(sb, root, ino);
  517. if (unlikely(!inode))
  518. return ERR_PTR(-ENOMEM);
  519. if (!(inode->i_state & I_NEW))
  520. return inode;
  521. err = __nilfs_read_inode(sb, root, ino, inode);
  522. if (unlikely(err)) {
  523. iget_failed(inode);
  524. return ERR_PTR(err);
  525. }
  526. unlock_new_inode(inode);
  527. return inode;
  528. }
  529. struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
  530. __u64 cno)
  531. {
  532. struct nilfs_iget_args args = {
  533. .ino = ino, .root = NULL, .cno = cno, .for_gc = 1
  534. };
  535. struct inode *inode;
  536. int err;
  537. inode = iget5_locked(sb, ino, nilfs_iget_test, nilfs_iget_set, &args);
  538. if (unlikely(!inode))
  539. return ERR_PTR(-ENOMEM);
  540. if (!(inode->i_state & I_NEW))
  541. return inode;
  542. err = nilfs_init_gcinode(inode);
  543. if (unlikely(err)) {
  544. iget_failed(inode);
  545. return ERR_PTR(err);
  546. }
  547. unlock_new_inode(inode);
  548. return inode;
  549. }
  550. void nilfs_write_inode_common(struct inode *inode,
  551. struct nilfs_inode *raw_inode, int has_bmap)
  552. {
  553. struct nilfs_inode_info *ii = NILFS_I(inode);
  554. raw_inode->i_mode = cpu_to_le16(inode->i_mode);
  555. raw_inode->i_uid = cpu_to_le32(i_uid_read(inode));
  556. raw_inode->i_gid = cpu_to_le32(i_gid_read(inode));
  557. raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
  558. raw_inode->i_size = cpu_to_le64(inode->i_size);
  559. raw_inode->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec);
  560. raw_inode->i_mtime = cpu_to_le64(inode->i_mtime.tv_sec);
  561. raw_inode->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
  562. raw_inode->i_mtime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
  563. raw_inode->i_blocks = cpu_to_le64(inode->i_blocks);
  564. raw_inode->i_flags = cpu_to_le32(ii->i_flags);
  565. raw_inode->i_generation = cpu_to_le32(inode->i_generation);
  566. if (NILFS_ROOT_METADATA_FILE(inode->i_ino)) {
  567. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  568. /* zero-fill unused portion in the case of super root block */
  569. raw_inode->i_xattr = 0;
  570. raw_inode->i_pad = 0;
  571. memset((void *)raw_inode + sizeof(*raw_inode), 0,
  572. nilfs->ns_inode_size - sizeof(*raw_inode));
  573. }
  574. if (has_bmap)
  575. nilfs_bmap_write(ii->i_bmap, raw_inode);
  576. else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode))
  577. raw_inode->i_device_code =
  578. cpu_to_le64(huge_encode_dev(inode->i_rdev));
  579. /* When extending inode, nilfs->ns_inode_size should be checked
  580. for substitutions of appended fields */
  581. }
  582. void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags)
  583. {
  584. ino_t ino = inode->i_ino;
  585. struct nilfs_inode_info *ii = NILFS_I(inode);
  586. struct inode *ifile = ii->i_root->ifile;
  587. struct nilfs_inode *raw_inode;
  588. raw_inode = nilfs_ifile_map_inode(ifile, ino, ibh);
  589. if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
  590. memset(raw_inode, 0, NILFS_MDT(ifile)->mi_entry_size);
  591. if (flags & I_DIRTY_DATASYNC)
  592. set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
  593. nilfs_write_inode_common(inode, raw_inode, 0);
  594. /* XXX: call with has_bmap = 0 is a workaround to avoid
  595. deadlock of bmap. This delays update of i_bmap to just
  596. before writing */
  597. nilfs_ifile_unmap_inode(ifile, ino, ibh);
  598. }
  599. #define NILFS_MAX_TRUNCATE_BLOCKS 16384 /* 64MB for 4KB block */
  600. static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
  601. unsigned long from)
  602. {
  603. __u64 b;
  604. int ret;
  605. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  606. return;
  607. repeat:
  608. ret = nilfs_bmap_last_key(ii->i_bmap, &b);
  609. if (ret == -ENOENT)
  610. return;
  611. else if (ret < 0)
  612. goto failed;
  613. if (b < from)
  614. return;
  615. b -= min_t(__u64, NILFS_MAX_TRUNCATE_BLOCKS, b - from);
  616. ret = nilfs_bmap_truncate(ii->i_bmap, b);
  617. nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
  618. if (!ret || (ret == -ENOMEM &&
  619. nilfs_bmap_truncate(ii->i_bmap, b) == 0))
  620. goto repeat;
  621. failed:
  622. nilfs_warning(ii->vfs_inode.i_sb, __func__,
  623. "failed to truncate bmap (ino=%lu, err=%d)",
  624. ii->vfs_inode.i_ino, ret);
  625. }
  626. void nilfs_truncate(struct inode *inode)
  627. {
  628. unsigned long blkoff;
  629. unsigned int blocksize;
  630. struct nilfs_transaction_info ti;
  631. struct super_block *sb = inode->i_sb;
  632. struct nilfs_inode_info *ii = NILFS_I(inode);
  633. if (!test_bit(NILFS_I_BMAP, &ii->i_state))
  634. return;
  635. if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
  636. return;
  637. blocksize = sb->s_blocksize;
  638. blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
  639. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  640. block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
  641. nilfs_truncate_bmap(ii, blkoff);
  642. inode->i_mtime = inode->i_ctime = CURRENT_TIME;
  643. if (IS_SYNC(inode))
  644. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  645. nilfs_mark_inode_dirty(inode);
  646. nilfs_set_file_dirty(inode, 0);
  647. nilfs_transaction_commit(sb);
  648. /* May construct a logical segment and may fail in sync mode.
  649. But truncate has no return value. */
  650. }
  651. static void nilfs_clear_inode(struct inode *inode)
  652. {
  653. struct nilfs_inode_info *ii = NILFS_I(inode);
  654. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  655. /*
  656. * Free resources allocated in nilfs_read_inode(), here.
  657. */
  658. BUG_ON(!list_empty(&ii->i_dirty));
  659. brelse(ii->i_bh);
  660. ii->i_bh = NULL;
  661. if (mdi && mdi->mi_palloc_cache)
  662. nilfs_palloc_destroy_cache(inode);
  663. if (test_bit(NILFS_I_BMAP, &ii->i_state))
  664. nilfs_bmap_clear(ii->i_bmap);
  665. nilfs_btnode_cache_clear(&ii->i_btnode_cache);
  666. if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
  667. nilfs_put_root(ii->i_root);
  668. }
  669. void nilfs_evict_inode(struct inode *inode)
  670. {
  671. struct nilfs_transaction_info ti;
  672. struct super_block *sb = inode->i_sb;
  673. struct nilfs_inode_info *ii = NILFS_I(inode);
  674. int ret;
  675. if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
  676. truncate_inode_pages_final(&inode->i_data);
  677. clear_inode(inode);
  678. nilfs_clear_inode(inode);
  679. return;
  680. }
  681. nilfs_transaction_begin(sb, &ti, 0); /* never fails */
  682. truncate_inode_pages_final(&inode->i_data);
  683. /* TODO: some of the following operations may fail. */
  684. nilfs_truncate_bmap(ii, 0);
  685. nilfs_mark_inode_dirty(inode);
  686. clear_inode(inode);
  687. ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
  688. if (!ret)
  689. atomic64_dec(&ii->i_root->inodes_count);
  690. nilfs_clear_inode(inode);
  691. if (IS_SYNC(inode))
  692. nilfs_set_transaction_flag(NILFS_TI_SYNC);
  693. nilfs_transaction_commit(sb);
  694. /* May construct a logical segment and may fail in sync mode.
  695. But delete_inode has no return value. */
  696. }
  697. int nilfs_setattr(struct dentry *dentry, struct iattr *iattr)
  698. {
  699. struct nilfs_transaction_info ti;
  700. struct inode *inode = d_inode(dentry);
  701. struct super_block *sb = inode->i_sb;
  702. int err;
  703. err = inode_change_ok(inode, iattr);
  704. if (err)
  705. return err;
  706. err = nilfs_transaction_begin(sb, &ti, 0);
  707. if (unlikely(err))
  708. return err;
  709. if ((iattr->ia_valid & ATTR_SIZE) &&
  710. iattr->ia_size != i_size_read(inode)) {
  711. inode_dio_wait(inode);
  712. truncate_setsize(inode, iattr->ia_size);
  713. nilfs_truncate(inode);
  714. }
  715. setattr_copy(inode, iattr);
  716. mark_inode_dirty(inode);
  717. if (iattr->ia_valid & ATTR_MODE) {
  718. err = nilfs_acl_chmod(inode);
  719. if (unlikely(err))
  720. goto out_err;
  721. }
  722. return nilfs_transaction_commit(sb);
  723. out_err:
  724. nilfs_transaction_abort(sb);
  725. return err;
  726. }
  727. int nilfs_permission(struct inode *inode, int mask)
  728. {
  729. struct nilfs_root *root = NILFS_I(inode)->i_root;
  730. if ((mask & MAY_WRITE) && root &&
  731. root->cno != NILFS_CPTREE_CURRENT_CNO)
  732. return -EROFS; /* snapshot is not writable */
  733. return generic_permission(inode, mask);
  734. }
  735. int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh)
  736. {
  737. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  738. struct nilfs_inode_info *ii = NILFS_I(inode);
  739. int err;
  740. spin_lock(&nilfs->ns_inode_lock);
  741. if (ii->i_bh == NULL) {
  742. spin_unlock(&nilfs->ns_inode_lock);
  743. err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
  744. inode->i_ino, pbh);
  745. if (unlikely(err))
  746. return err;
  747. spin_lock(&nilfs->ns_inode_lock);
  748. if (ii->i_bh == NULL)
  749. ii->i_bh = *pbh;
  750. else {
  751. brelse(*pbh);
  752. *pbh = ii->i_bh;
  753. }
  754. } else
  755. *pbh = ii->i_bh;
  756. get_bh(*pbh);
  757. spin_unlock(&nilfs->ns_inode_lock);
  758. return 0;
  759. }
  760. int nilfs_inode_dirty(struct inode *inode)
  761. {
  762. struct nilfs_inode_info *ii = NILFS_I(inode);
  763. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  764. int ret = 0;
  765. if (!list_empty(&ii->i_dirty)) {
  766. spin_lock(&nilfs->ns_inode_lock);
  767. ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
  768. test_bit(NILFS_I_BUSY, &ii->i_state);
  769. spin_unlock(&nilfs->ns_inode_lock);
  770. }
  771. return ret;
  772. }
  773. int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty)
  774. {
  775. struct nilfs_inode_info *ii = NILFS_I(inode);
  776. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  777. atomic_add(nr_dirty, &nilfs->ns_ndirtyblks);
  778. if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
  779. return 0;
  780. spin_lock(&nilfs->ns_inode_lock);
  781. if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
  782. !test_bit(NILFS_I_BUSY, &ii->i_state)) {
  783. /* Because this routine may race with nilfs_dispose_list(),
  784. we have to check NILFS_I_QUEUED here, too. */
  785. if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
  786. /* This will happen when somebody is freeing
  787. this inode. */
  788. nilfs_warning(inode->i_sb, __func__,
  789. "cannot get inode (ino=%lu)\n",
  790. inode->i_ino);
  791. spin_unlock(&nilfs->ns_inode_lock);
  792. return -EINVAL; /* NILFS_I_DIRTY may remain for
  793. freeing inode */
  794. }
  795. list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
  796. set_bit(NILFS_I_QUEUED, &ii->i_state);
  797. }
  798. spin_unlock(&nilfs->ns_inode_lock);
  799. return 0;
  800. }
  801. int __nilfs_mark_inode_dirty(struct inode *inode, int flags)
  802. {
  803. struct buffer_head *ibh;
  804. int err;
  805. err = nilfs_load_inode_block(inode, &ibh);
  806. if (unlikely(err)) {
  807. nilfs_warning(inode->i_sb, __func__,
  808. "failed to reget inode block.\n");
  809. return err;
  810. }
  811. nilfs_update_inode(inode, ibh, flags);
  812. mark_buffer_dirty(ibh);
  813. nilfs_mdt_mark_dirty(NILFS_I(inode)->i_root->ifile);
  814. brelse(ibh);
  815. return 0;
  816. }
  817. /**
  818. * nilfs_dirty_inode - reflect changes on given inode to an inode block.
  819. * @inode: inode of the file to be registered.
  820. *
  821. * nilfs_dirty_inode() loads a inode block containing the specified
  822. * @inode and copies data from a nilfs_inode to a corresponding inode
  823. * entry in the inode block. This operation is excluded from the segment
  824. * construction. This function can be called both as a single operation
  825. * and as a part of indivisible file operations.
  826. */
  827. void nilfs_dirty_inode(struct inode *inode, int flags)
  828. {
  829. struct nilfs_transaction_info ti;
  830. struct nilfs_mdt_info *mdi = NILFS_MDT(inode);
  831. if (is_bad_inode(inode)) {
  832. nilfs_warning(inode->i_sb, __func__,
  833. "tried to mark bad_inode dirty. ignored.\n");
  834. dump_stack();
  835. return;
  836. }
  837. if (mdi) {
  838. nilfs_mdt_mark_dirty(inode);
  839. return;
  840. }
  841. nilfs_transaction_begin(inode->i_sb, &ti, 0);
  842. __nilfs_mark_inode_dirty(inode, flags);
  843. nilfs_transaction_commit(inode->i_sb); /* never fails */
  844. }
  845. int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  846. __u64 start, __u64 len)
  847. {
  848. struct the_nilfs *nilfs = inode->i_sb->s_fs_info;
  849. __u64 logical = 0, phys = 0, size = 0;
  850. __u32 flags = 0;
  851. loff_t isize;
  852. sector_t blkoff, end_blkoff;
  853. sector_t delalloc_blkoff;
  854. unsigned long delalloc_blklen;
  855. unsigned int blkbits = inode->i_blkbits;
  856. int ret, n;
  857. ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
  858. if (ret)
  859. return ret;
  860. inode_lock(inode);
  861. isize = i_size_read(inode);
  862. blkoff = start >> blkbits;
  863. end_blkoff = (start + len - 1) >> blkbits;
  864. delalloc_blklen = nilfs_find_uncommitted_extent(inode, blkoff,
  865. &delalloc_blkoff);
  866. do {
  867. __u64 blkphy;
  868. unsigned int maxblocks;
  869. if (delalloc_blklen && blkoff == delalloc_blkoff) {
  870. if (size) {
  871. /* End of the current extent */
  872. ret = fiemap_fill_next_extent(
  873. fieinfo, logical, phys, size, flags);
  874. if (ret)
  875. break;
  876. }
  877. if (blkoff > end_blkoff)
  878. break;
  879. flags = FIEMAP_EXTENT_MERGED | FIEMAP_EXTENT_DELALLOC;
  880. logical = blkoff << blkbits;
  881. phys = 0;
  882. size = delalloc_blklen << blkbits;
  883. blkoff = delalloc_blkoff + delalloc_blklen;
  884. delalloc_blklen = nilfs_find_uncommitted_extent(
  885. inode, blkoff, &delalloc_blkoff);
  886. continue;
  887. }
  888. /*
  889. * Limit the number of blocks that we look up so as
  890. * not to get into the next delayed allocation extent.
  891. */
  892. maxblocks = INT_MAX;
  893. if (delalloc_blklen)
  894. maxblocks = min_t(sector_t, delalloc_blkoff - blkoff,
  895. maxblocks);
  896. blkphy = 0;
  897. down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  898. n = nilfs_bmap_lookup_contig(
  899. NILFS_I(inode)->i_bmap, blkoff, &blkphy, maxblocks);
  900. up_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem);
  901. if (n < 0) {
  902. int past_eof;
  903. if (unlikely(n != -ENOENT))
  904. break; /* error */
  905. /* HOLE */
  906. blkoff++;
  907. past_eof = ((blkoff << blkbits) >= isize);
  908. if (size) {
  909. /* End of the current extent */
  910. if (past_eof)
  911. flags |= FIEMAP_EXTENT_LAST;
  912. ret = fiemap_fill_next_extent(
  913. fieinfo, logical, phys, size, flags);
  914. if (ret)
  915. break;
  916. size = 0;
  917. }
  918. if (blkoff > end_blkoff || past_eof)
  919. break;
  920. } else {
  921. if (size) {
  922. if (phys && blkphy << blkbits == phys + size) {
  923. /* The current extent goes on */
  924. size += n << blkbits;
  925. } else {
  926. /* Terminate the current extent */
  927. ret = fiemap_fill_next_extent(
  928. fieinfo, logical, phys, size,
  929. flags);
  930. if (ret || blkoff > end_blkoff)
  931. break;
  932. /* Start another extent */
  933. flags = FIEMAP_EXTENT_MERGED;
  934. logical = blkoff << blkbits;
  935. phys = blkphy << blkbits;
  936. size = n << blkbits;
  937. }
  938. } else {
  939. /* Start a new extent */
  940. flags = FIEMAP_EXTENT_MERGED;
  941. logical = blkoff << blkbits;
  942. phys = blkphy << blkbits;
  943. size = n << blkbits;
  944. }
  945. blkoff += n;
  946. }
  947. cond_resched();
  948. } while (true);
  949. /* If ret is 1 then we just hit the end of the extent array */
  950. if (ret == 1)
  951. ret = 0;
  952. inode_unlock(inode);
  953. return ret;
  954. }