recovery.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. /*
  2. * fs/f2fs/recovery.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include "f2fs.h"
  14. #include "node.h"
  15. #include "segment.h"
  16. static struct kmem_cache *fsync_entry_slab;
  17. bool space_for_roll_forward(struct f2fs_sb_info *sbi)
  18. {
  19. if (sbi->last_valid_block_count + sbi->alloc_valid_block_count
  20. > sbi->user_block_count)
  21. return false;
  22. return true;
  23. }
  24. static struct fsync_inode_entry *get_fsync_inode(struct list_head *head,
  25. nid_t ino)
  26. {
  27. struct list_head *this;
  28. struct fsync_inode_entry *entry;
  29. list_for_each(this, head) {
  30. entry = list_entry(this, struct fsync_inode_entry, list);
  31. if (entry->inode->i_ino == ino)
  32. return entry;
  33. }
  34. return NULL;
  35. }
  36. static int recover_dentry(struct page *ipage, struct inode *inode)
  37. {
  38. struct f2fs_inode *raw_inode = F2FS_INODE(ipage);
  39. nid_t pino = le32_to_cpu(raw_inode->i_pino);
  40. struct f2fs_dir_entry *de;
  41. struct qstr name;
  42. struct page *page;
  43. struct inode *dir, *einode;
  44. int err = 0;
  45. dir = check_dirty_dir_inode(F2FS_SB(inode->i_sb), pino);
  46. if (!dir) {
  47. dir = f2fs_iget(inode->i_sb, pino);
  48. if (IS_ERR(dir)) {
  49. err = PTR_ERR(dir);
  50. goto out;
  51. }
  52. set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT);
  53. add_dirty_dir_inode(dir);
  54. }
  55. name.len = le32_to_cpu(raw_inode->i_namelen);
  56. name.name = raw_inode->i_name;
  57. if (unlikely(name.len > F2FS_NAME_LEN)) {
  58. WARN_ON(1);
  59. err = -ENAMETOOLONG;
  60. goto out;
  61. }
  62. retry:
  63. de = f2fs_find_entry(dir, &name, &page);
  64. if (de && inode->i_ino == le32_to_cpu(de->ino))
  65. goto out_unmap_put;
  66. if (de) {
  67. einode = f2fs_iget(inode->i_sb, le32_to_cpu(de->ino));
  68. if (IS_ERR(einode)) {
  69. WARN_ON(1);
  70. if (PTR_ERR(einode) == -ENOENT)
  71. err = -EEXIST;
  72. goto out_unmap_put;
  73. }
  74. err = acquire_orphan_inode(F2FS_SB(inode->i_sb));
  75. if (err) {
  76. iput(einode);
  77. goto out_unmap_put;
  78. }
  79. f2fs_delete_entry(de, page, einode);
  80. iput(einode);
  81. goto retry;
  82. }
  83. err = __f2fs_add_link(dir, &name, inode);
  84. goto out;
  85. out_unmap_put:
  86. kunmap(page);
  87. f2fs_put_page(page, 0);
  88. out:
  89. f2fs_msg(inode->i_sb, KERN_NOTICE,
  90. "%s: ino = %x, name = %s, dir = %lx, err = %d",
  91. __func__, ino_of_node(ipage), raw_inode->i_name,
  92. IS_ERR(dir) ? 0 : dir->i_ino, err);
  93. return err;
  94. }
  95. static int recover_inode(struct inode *inode, struct page *node_page)
  96. {
  97. struct f2fs_inode *raw_inode = F2FS_INODE(node_page);
  98. if (!IS_INODE(node_page))
  99. return 0;
  100. inode->i_mode = le16_to_cpu(raw_inode->i_mode);
  101. i_size_write(inode, le64_to_cpu(raw_inode->i_size));
  102. inode->i_atime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  103. inode->i_ctime.tv_sec = le64_to_cpu(raw_inode->i_ctime);
  104. inode->i_mtime.tv_sec = le64_to_cpu(raw_inode->i_mtime);
  105. inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  106. inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
  107. inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
  108. if (is_dent_dnode(node_page))
  109. return recover_dentry(node_page, inode);
  110. f2fs_msg(inode->i_sb, KERN_NOTICE, "recover_inode: ino = %x, name = %s",
  111. ino_of_node(node_page), raw_inode->i_name);
  112. return 0;
  113. }
  114. static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head)
  115. {
  116. unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
  117. struct curseg_info *curseg;
  118. struct page *page;
  119. block_t blkaddr;
  120. int err = 0;
  121. /* get node pages in the current segment */
  122. curseg = CURSEG_I(sbi, CURSEG_WARM_NODE);
  123. blkaddr = START_BLOCK(sbi, curseg->segno) + curseg->next_blkoff;
  124. /* read node page */
  125. page = alloc_page(GFP_F2FS_ZERO);
  126. if (!page)
  127. return -ENOMEM;
  128. lock_page(page);
  129. while (1) {
  130. struct fsync_inode_entry *entry;
  131. err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
  132. if (err)
  133. return err;
  134. lock_page(page);
  135. if (cp_ver != cpver_of_node(page))
  136. break;
  137. if (!is_fsync_dnode(page))
  138. goto next;
  139. entry = get_fsync_inode(head, ino_of_node(page));
  140. if (entry) {
  141. if (IS_INODE(page) && is_dent_dnode(page))
  142. set_inode_flag(F2FS_I(entry->inode),
  143. FI_INC_LINK);
  144. } else {
  145. if (IS_INODE(page) && is_dent_dnode(page)) {
  146. err = recover_inode_page(sbi, page);
  147. if (err)
  148. break;
  149. }
  150. /* add this fsync inode to the list */
  151. entry = kmem_cache_alloc(fsync_entry_slab, GFP_NOFS);
  152. if (!entry) {
  153. err = -ENOMEM;
  154. break;
  155. }
  156. entry->inode = f2fs_iget(sbi->sb, ino_of_node(page));
  157. if (IS_ERR(entry->inode)) {
  158. err = PTR_ERR(entry->inode);
  159. kmem_cache_free(fsync_entry_slab, entry);
  160. break;
  161. }
  162. list_add_tail(&entry->list, head);
  163. }
  164. entry->blkaddr = blkaddr;
  165. err = recover_inode(entry->inode, page);
  166. if (err && err != -ENOENT)
  167. break;
  168. next:
  169. /* check next segment */
  170. blkaddr = next_blkaddr_of_node(page);
  171. }
  172. unlock_page(page);
  173. __free_pages(page, 0);
  174. return err;
  175. }
  176. static void destroy_fsync_dnodes(struct list_head *head)
  177. {
  178. struct fsync_inode_entry *entry, *tmp;
  179. list_for_each_entry_safe(entry, tmp, head, list) {
  180. iput(entry->inode);
  181. list_del(&entry->list);
  182. kmem_cache_free(fsync_entry_slab, entry);
  183. }
  184. }
  185. static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi,
  186. block_t blkaddr, struct dnode_of_data *dn)
  187. {
  188. struct seg_entry *sentry;
  189. unsigned int segno = GET_SEGNO(sbi, blkaddr);
  190. unsigned short blkoff = GET_SEGOFF_FROM_SEG0(sbi, blkaddr) &
  191. (sbi->blocks_per_seg - 1);
  192. struct f2fs_summary sum;
  193. nid_t ino, nid;
  194. void *kaddr;
  195. struct inode *inode;
  196. struct page *node_page;
  197. unsigned int offset;
  198. block_t bidx;
  199. int i;
  200. sentry = get_seg_entry(sbi, segno);
  201. if (!f2fs_test_bit(blkoff, sentry->cur_valid_map))
  202. return 0;
  203. /* Get the previous summary */
  204. for (i = CURSEG_WARM_DATA; i <= CURSEG_COLD_DATA; i++) {
  205. struct curseg_info *curseg = CURSEG_I(sbi, i);
  206. if (curseg->segno == segno) {
  207. sum = curseg->sum_blk->entries[blkoff];
  208. break;
  209. }
  210. }
  211. if (i > CURSEG_COLD_DATA) {
  212. struct page *sum_page = get_sum_page(sbi, segno);
  213. struct f2fs_summary_block *sum_node;
  214. kaddr = page_address(sum_page);
  215. sum_node = (struct f2fs_summary_block *)kaddr;
  216. sum = sum_node->entries[blkoff];
  217. f2fs_put_page(sum_page, 1);
  218. }
  219. /* Use the locked dnode page and inode */
  220. nid = le32_to_cpu(sum.nid);
  221. if (dn->inode->i_ino == nid) {
  222. struct dnode_of_data tdn = *dn;
  223. tdn.nid = nid;
  224. tdn.node_page = dn->inode_page;
  225. tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
  226. truncate_data_blocks_range(&tdn, 1);
  227. return 0;
  228. } else if (dn->nid == nid) {
  229. struct dnode_of_data tdn = *dn;
  230. tdn.ofs_in_node = le16_to_cpu(sum.ofs_in_node);
  231. truncate_data_blocks_range(&tdn, 1);
  232. return 0;
  233. }
  234. /* Get the node page */
  235. node_page = get_node_page(sbi, nid);
  236. if (IS_ERR(node_page))
  237. return PTR_ERR(node_page);
  238. offset = ofs_of_node(node_page);
  239. ino = ino_of_node(node_page);
  240. f2fs_put_page(node_page, 1);
  241. /* Deallocate previous index in the node page */
  242. inode = f2fs_iget(sbi->sb, ino);
  243. if (IS_ERR(inode))
  244. return PTR_ERR(inode);
  245. bidx = start_bidx_of_node(offset, F2FS_I(inode)) +
  246. le16_to_cpu(sum.ofs_in_node);
  247. truncate_hole(inode, bidx, bidx + 1);
  248. iput(inode);
  249. return 0;
  250. }
  251. static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
  252. struct page *page, block_t blkaddr)
  253. {
  254. struct f2fs_inode_info *fi = F2FS_I(inode);
  255. unsigned int start, end;
  256. struct dnode_of_data dn;
  257. struct f2fs_summary sum;
  258. struct node_info ni;
  259. int err = 0, recovered = 0;
  260. if (recover_inline_data(inode, page))
  261. goto out;
  262. start = start_bidx_of_node(ofs_of_node(page), fi);
  263. if (IS_INODE(page))
  264. end = start + ADDRS_PER_INODE(fi);
  265. else
  266. end = start + ADDRS_PER_BLOCK;
  267. f2fs_lock_op(sbi);
  268. set_new_dnode(&dn, inode, NULL, NULL, 0);
  269. err = get_dnode_of_data(&dn, start, ALLOC_NODE);
  270. if (err) {
  271. f2fs_unlock_op(sbi);
  272. goto out;
  273. }
  274. wait_on_page_writeback(dn.node_page);
  275. get_node_info(sbi, dn.nid, &ni);
  276. f2fs_bug_on(ni.ino != ino_of_node(page));
  277. f2fs_bug_on(ofs_of_node(dn.node_page) != ofs_of_node(page));
  278. for (; start < end; start++) {
  279. block_t src, dest;
  280. src = datablock_addr(dn.node_page, dn.ofs_in_node);
  281. dest = datablock_addr(page, dn.ofs_in_node);
  282. if (src != dest && dest != NEW_ADDR && dest != NULL_ADDR) {
  283. if (src == NULL_ADDR) {
  284. err = reserve_new_block(&dn);
  285. /* We should not get -ENOSPC */
  286. f2fs_bug_on(err);
  287. }
  288. /* Check the previous node page having this index */
  289. err = check_index_in_prev_nodes(sbi, dest, &dn);
  290. if (err)
  291. goto err;
  292. set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
  293. /* write dummy data page */
  294. recover_data_page(sbi, NULL, &sum, src, dest);
  295. update_extent_cache(dest, &dn);
  296. recovered++;
  297. }
  298. dn.ofs_in_node++;
  299. }
  300. /* write node page in place */
  301. set_summary(&sum, dn.nid, 0, 0);
  302. if (IS_INODE(dn.node_page))
  303. sync_inode_page(&dn);
  304. copy_node_footer(dn.node_page, page);
  305. fill_node_footer(dn.node_page, dn.nid, ni.ino,
  306. ofs_of_node(page), false);
  307. set_page_dirty(dn.node_page);
  308. recover_node_page(sbi, dn.node_page, &sum, &ni, blkaddr);
  309. err:
  310. f2fs_put_dnode(&dn);
  311. f2fs_unlock_op(sbi);
  312. out:
  313. f2fs_msg(sbi->sb, KERN_NOTICE,
  314. "recover_data: ino = %lx, recovered = %d blocks, err = %d",
  315. inode->i_ino, recovered, err);
  316. return err;
  317. }
  318. static int recover_data(struct f2fs_sb_info *sbi,
  319. struct list_head *head, int type)
  320. {
  321. unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi));
  322. struct curseg_info *curseg;
  323. struct page *page;
  324. int err = 0;
  325. block_t blkaddr;
  326. /* get node pages in the current segment */
  327. curseg = CURSEG_I(sbi, type);
  328. blkaddr = NEXT_FREE_BLKADDR(sbi, curseg);
  329. /* read node page */
  330. page = alloc_page(GFP_F2FS_ZERO);
  331. if (!page)
  332. return -ENOMEM;
  333. lock_page(page);
  334. while (1) {
  335. struct fsync_inode_entry *entry;
  336. err = f2fs_submit_page_bio(sbi, page, blkaddr, READ_SYNC);
  337. if (err)
  338. return err;
  339. lock_page(page);
  340. if (cp_ver != cpver_of_node(page))
  341. break;
  342. entry = get_fsync_inode(head, ino_of_node(page));
  343. if (!entry)
  344. goto next;
  345. err = do_recover_data(sbi, entry->inode, page, blkaddr);
  346. if (err)
  347. break;
  348. if (entry->blkaddr == blkaddr) {
  349. iput(entry->inode);
  350. list_del(&entry->list);
  351. kmem_cache_free(fsync_entry_slab, entry);
  352. }
  353. next:
  354. /* check next segment */
  355. blkaddr = next_blkaddr_of_node(page);
  356. }
  357. unlock_page(page);
  358. __free_pages(page, 0);
  359. if (!err)
  360. allocate_new_segments(sbi);
  361. return err;
  362. }
  363. int recover_fsync_data(struct f2fs_sb_info *sbi)
  364. {
  365. struct list_head inode_list;
  366. int err;
  367. bool need_writecp = false;
  368. fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
  369. sizeof(struct fsync_inode_entry), NULL);
  370. if (!fsync_entry_slab)
  371. return -ENOMEM;
  372. INIT_LIST_HEAD(&inode_list);
  373. /* step #1: find fsynced inode numbers */
  374. sbi->por_doing = true;
  375. err = find_fsync_dnodes(sbi, &inode_list);
  376. if (err)
  377. goto out;
  378. if (list_empty(&inode_list))
  379. goto out;
  380. need_writecp = true;
  381. /* step #2: recover data */
  382. err = recover_data(sbi, &inode_list, CURSEG_WARM_NODE);
  383. f2fs_bug_on(!list_empty(&inode_list));
  384. out:
  385. destroy_fsync_dnodes(&inode_list);
  386. kmem_cache_destroy(fsync_entry_slab);
  387. sbi->por_doing = false;
  388. if (!err && need_writecp)
  389. write_checkpoint(sbi, false);
  390. return err;
  391. }