recovery.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965
  1. /*
  2. * recovery.c - NILFS recovery logic
  3. *
  4. * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; either version 2 of the License, or
  9. * (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * Written by Ryusuke Konishi.
  17. */
  18. #include <linux/buffer_head.h>
  19. #include <linux/blkdev.h>
  20. #include <linux/swap.h>
  21. #include <linux/slab.h>
  22. #include <linux/crc32.h>
  23. #include "nilfs.h"
  24. #include "segment.h"
  25. #include "sufile.h"
  26. #include "page.h"
  27. #include "segbuf.h"
  28. /*
  29. * Segment check result
  30. */
  31. enum {
  32. NILFS_SEG_VALID,
  33. NILFS_SEG_NO_SUPER_ROOT,
  34. NILFS_SEG_FAIL_IO,
  35. NILFS_SEG_FAIL_MAGIC,
  36. NILFS_SEG_FAIL_SEQ,
  37. NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT,
  38. NILFS_SEG_FAIL_CHECKSUM_FULL,
  39. NILFS_SEG_FAIL_CONSISTENCY,
  40. };
  41. /* work structure for recovery */
  42. struct nilfs_recovery_block {
  43. ino_t ino; /*
  44. * Inode number of the file that this block
  45. * belongs to
  46. */
  47. sector_t blocknr; /* block number */
  48. __u64 vblocknr; /* virtual block number */
  49. unsigned long blkoff; /* File offset of the data block (per block) */
  50. struct list_head list;
  51. };
  52. static int nilfs_warn_segment_error(int err)
  53. {
  54. switch (err) {
  55. case NILFS_SEG_FAIL_IO:
  56. printk(KERN_WARNING
  57. "NILFS warning: I/O error on loading last segment\n");
  58. return -EIO;
  59. case NILFS_SEG_FAIL_MAGIC:
  60. printk(KERN_WARNING
  61. "NILFS warning: Segment magic number invalid\n");
  62. break;
  63. case NILFS_SEG_FAIL_SEQ:
  64. printk(KERN_WARNING
  65. "NILFS warning: Sequence number mismatch\n");
  66. break;
  67. case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT:
  68. printk(KERN_WARNING
  69. "NILFS warning: Checksum error in super root\n");
  70. break;
  71. case NILFS_SEG_FAIL_CHECKSUM_FULL:
  72. printk(KERN_WARNING
  73. "NILFS warning: Checksum error in segment payload\n");
  74. break;
  75. case NILFS_SEG_FAIL_CONSISTENCY:
  76. printk(KERN_WARNING
  77. "NILFS warning: Inconsistent segment\n");
  78. break;
  79. case NILFS_SEG_NO_SUPER_ROOT:
  80. printk(KERN_WARNING
  81. "NILFS warning: No super root in the last segment\n");
  82. break;
  83. }
  84. return -EINVAL;
  85. }
  86. /**
  87. * nilfs_compute_checksum - compute checksum of blocks continuously
  88. * @nilfs: nilfs object
  89. * @bhs: buffer head of start block
  90. * @sum: place to store result
  91. * @offset: offset bytes in the first block
  92. * @check_bytes: number of bytes to be checked
  93. * @start: DBN of start block
  94. * @nblock: number of blocks to be checked
  95. */
  96. static int nilfs_compute_checksum(struct the_nilfs *nilfs,
  97. struct buffer_head *bhs, u32 *sum,
  98. unsigned long offset, u64 check_bytes,
  99. sector_t start, unsigned long nblock)
  100. {
  101. unsigned int blocksize = nilfs->ns_blocksize;
  102. unsigned long size;
  103. u32 crc;
  104. BUG_ON(offset >= blocksize);
  105. check_bytes -= offset;
  106. size = min_t(u64, check_bytes, blocksize - offset);
  107. crc = crc32_le(nilfs->ns_crc_seed,
  108. (unsigned char *)bhs->b_data + offset, size);
  109. if (--nblock > 0) {
  110. do {
  111. struct buffer_head *bh;
  112. bh = __bread(nilfs->ns_bdev, ++start, blocksize);
  113. if (!bh)
  114. return -EIO;
  115. check_bytes -= size;
  116. size = min_t(u64, check_bytes, blocksize);
  117. crc = crc32_le(crc, bh->b_data, size);
  118. brelse(bh);
  119. } while (--nblock > 0);
  120. }
  121. *sum = crc;
  122. return 0;
  123. }
  124. /**
  125. * nilfs_read_super_root_block - read super root block
  126. * @nilfs: nilfs object
  127. * @sr_block: disk block number of the super root block
  128. * @pbh: address of a buffer_head pointer to return super root buffer
  129. * @check: CRC check flag
  130. */
  131. int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
  132. struct buffer_head **pbh, int check)
  133. {
  134. struct buffer_head *bh_sr;
  135. struct nilfs_super_root *sr;
  136. u32 crc;
  137. int ret;
  138. *pbh = NULL;
  139. bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize);
  140. if (unlikely(!bh_sr)) {
  141. ret = NILFS_SEG_FAIL_IO;
  142. goto failed;
  143. }
  144. sr = (struct nilfs_super_root *)bh_sr->b_data;
  145. if (check) {
  146. unsigned int bytes = le16_to_cpu(sr->sr_bytes);
  147. if (bytes == 0 || bytes > nilfs->ns_blocksize) {
  148. ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
  149. goto failed_bh;
  150. }
  151. if (nilfs_compute_checksum(
  152. nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes,
  153. sr_block, 1)) {
  154. ret = NILFS_SEG_FAIL_IO;
  155. goto failed_bh;
  156. }
  157. if (crc != le32_to_cpu(sr->sr_sum)) {
  158. ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT;
  159. goto failed_bh;
  160. }
  161. }
  162. *pbh = bh_sr;
  163. return 0;
  164. failed_bh:
  165. brelse(bh_sr);
  166. failed:
  167. return nilfs_warn_segment_error(ret);
  168. }
  169. /**
  170. * nilfs_read_log_header - read summary header of the specified log
  171. * @nilfs: nilfs object
  172. * @start_blocknr: start block number of the log
  173. * @sum: pointer to return segment summary structure
  174. */
  175. static struct buffer_head *
  176. nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
  177. struct nilfs_segment_summary **sum)
  178. {
  179. struct buffer_head *bh_sum;
  180. bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
  181. if (bh_sum)
  182. *sum = (struct nilfs_segment_summary *)bh_sum->b_data;
  183. return bh_sum;
  184. }
  185. /**
  186. * nilfs_validate_log - verify consistency of log
  187. * @nilfs: nilfs object
  188. * @seg_seq: sequence number of segment
  189. * @bh_sum: buffer head of summary block
  190. * @sum: segment summary struct
  191. */
  192. static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
  193. struct buffer_head *bh_sum,
  194. struct nilfs_segment_summary *sum)
  195. {
  196. unsigned long nblock;
  197. u32 crc;
  198. int ret;
  199. ret = NILFS_SEG_FAIL_MAGIC;
  200. if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC)
  201. goto out;
  202. ret = NILFS_SEG_FAIL_SEQ;
  203. if (le64_to_cpu(sum->ss_seq) != seg_seq)
  204. goto out;
  205. nblock = le32_to_cpu(sum->ss_nblocks);
  206. ret = NILFS_SEG_FAIL_CONSISTENCY;
  207. if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment))
  208. /* This limits the number of blocks read in the CRC check */
  209. goto out;
  210. ret = NILFS_SEG_FAIL_IO;
  211. if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum),
  212. ((u64)nblock << nilfs->ns_blocksize_bits),
  213. bh_sum->b_blocknr, nblock))
  214. goto out;
  215. ret = NILFS_SEG_FAIL_CHECKSUM_FULL;
  216. if (crc != le32_to_cpu(sum->ss_datasum))
  217. goto out;
  218. ret = 0;
  219. out:
  220. return ret;
  221. }
  222. /**
  223. * nilfs_read_summary_info - read an item on summary blocks of a log
  224. * @nilfs: nilfs object
  225. * @pbh: the current buffer head on summary blocks [in, out]
  226. * @offset: the current byte offset on summary blocks [in, out]
  227. * @bytes: byte size of the item to be read
  228. */
  229. static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
  230. struct buffer_head **pbh,
  231. unsigned int *offset, unsigned int bytes)
  232. {
  233. void *ptr;
  234. sector_t blocknr;
  235. BUG_ON((*pbh)->b_size < *offset);
  236. if (bytes > (*pbh)->b_size - *offset) {
  237. blocknr = (*pbh)->b_blocknr;
  238. brelse(*pbh);
  239. *pbh = __bread(nilfs->ns_bdev, blocknr + 1,
  240. nilfs->ns_blocksize);
  241. if (unlikely(!*pbh))
  242. return NULL;
  243. *offset = 0;
  244. }
  245. ptr = (*pbh)->b_data + *offset;
  246. *offset += bytes;
  247. return ptr;
  248. }
  249. /**
  250. * nilfs_skip_summary_info - skip items on summary blocks of a log
  251. * @nilfs: nilfs object
  252. * @pbh: the current buffer head on summary blocks [in, out]
  253. * @offset: the current byte offset on summary blocks [in, out]
  254. * @bytes: byte size of the item to be skipped
  255. * @count: number of items to be skipped
  256. */
  257. static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
  258. struct buffer_head **pbh,
  259. unsigned int *offset, unsigned int bytes,
  260. unsigned long count)
  261. {
  262. unsigned int rest_item_in_current_block
  263. = ((*pbh)->b_size - *offset) / bytes;
  264. if (count <= rest_item_in_current_block) {
  265. *offset += bytes * count;
  266. } else {
  267. sector_t blocknr = (*pbh)->b_blocknr;
  268. unsigned int nitem_per_block = (*pbh)->b_size / bytes;
  269. unsigned int bcnt;
  270. count -= rest_item_in_current_block;
  271. bcnt = DIV_ROUND_UP(count, nitem_per_block);
  272. *offset = bytes * (count - (bcnt - 1) * nitem_per_block);
  273. brelse(*pbh);
  274. *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt,
  275. nilfs->ns_blocksize);
  276. }
  277. }
  278. /**
  279. * nilfs_scan_dsync_log - get block information of a log written for data sync
  280. * @nilfs: nilfs object
  281. * @start_blocknr: start block number of the log
  282. * @sum: log summary information
  283. * @head: list head to add nilfs_recovery_block struct
  284. */
  285. static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
  286. struct nilfs_segment_summary *sum,
  287. struct list_head *head)
  288. {
  289. struct buffer_head *bh;
  290. unsigned int offset;
  291. u32 nfinfo, sumbytes;
  292. sector_t blocknr;
  293. ino_t ino;
  294. int err = -EIO;
  295. nfinfo = le32_to_cpu(sum->ss_nfinfo);
  296. if (!nfinfo)
  297. return 0;
  298. sumbytes = le32_to_cpu(sum->ss_sumbytes);
  299. blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize);
  300. bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize);
  301. if (unlikely(!bh))
  302. goto out;
  303. offset = le16_to_cpu(sum->ss_bytes);
  304. for (;;) {
  305. unsigned long nblocks, ndatablk, nnodeblk;
  306. struct nilfs_finfo *finfo;
  307. finfo = nilfs_read_summary_info(nilfs, &bh, &offset,
  308. sizeof(*finfo));
  309. if (unlikely(!finfo))
  310. goto out;
  311. ino = le64_to_cpu(finfo->fi_ino);
  312. nblocks = le32_to_cpu(finfo->fi_nblocks);
  313. ndatablk = le32_to_cpu(finfo->fi_ndatablk);
  314. nnodeblk = nblocks - ndatablk;
  315. while (ndatablk-- > 0) {
  316. struct nilfs_recovery_block *rb;
  317. struct nilfs_binfo_v *binfo;
  318. binfo = nilfs_read_summary_info(nilfs, &bh, &offset,
  319. sizeof(*binfo));
  320. if (unlikely(!binfo))
  321. goto out;
  322. rb = kmalloc(sizeof(*rb), GFP_NOFS);
  323. if (unlikely(!rb)) {
  324. err = -ENOMEM;
  325. goto out;
  326. }
  327. rb->ino = ino;
  328. rb->blocknr = blocknr++;
  329. rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr);
  330. rb->blkoff = le64_to_cpu(binfo->bi_blkoff);
  331. /* INIT_LIST_HEAD(&rb->list); */
  332. list_add_tail(&rb->list, head);
  333. }
  334. if (--nfinfo == 0)
  335. break;
  336. blocknr += nnodeblk; /* always 0 for data sync logs */
  337. nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64),
  338. nnodeblk);
  339. if (unlikely(!bh))
  340. goto out;
  341. }
  342. err = 0;
  343. out:
  344. brelse(bh); /* brelse(NULL) is just ignored */
  345. return err;
  346. }
  347. static void dispose_recovery_list(struct list_head *head)
  348. {
  349. while (!list_empty(head)) {
  350. struct nilfs_recovery_block *rb;
  351. rb = list_first_entry(head, struct nilfs_recovery_block, list);
  352. list_del(&rb->list);
  353. kfree(rb);
  354. }
  355. }
  356. struct nilfs_segment_entry {
  357. struct list_head list;
  358. __u64 segnum;
  359. };
  360. static int nilfs_segment_list_add(struct list_head *head, __u64 segnum)
  361. {
  362. struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS);
  363. if (unlikely(!ent))
  364. return -ENOMEM;
  365. ent->segnum = segnum;
  366. INIT_LIST_HEAD(&ent->list);
  367. list_add_tail(&ent->list, head);
  368. return 0;
  369. }
  370. void nilfs_dispose_segment_list(struct list_head *head)
  371. {
  372. while (!list_empty(head)) {
  373. struct nilfs_segment_entry *ent;
  374. ent = list_first_entry(head, struct nilfs_segment_entry, list);
  375. list_del(&ent->list);
  376. kfree(ent);
  377. }
  378. }
  379. static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs,
  380. struct super_block *sb,
  381. struct nilfs_recovery_info *ri)
  382. {
  383. struct list_head *head = &ri->ri_used_segments;
  384. struct nilfs_segment_entry *ent, *n;
  385. struct inode *sufile = nilfs->ns_sufile;
  386. __u64 segnum[4];
  387. int err;
  388. int i;
  389. segnum[0] = nilfs->ns_segnum;
  390. segnum[1] = nilfs->ns_nextnum;
  391. segnum[2] = ri->ri_segnum;
  392. segnum[3] = ri->ri_nextnum;
  393. /*
  394. * Releasing the next segment of the latest super root.
  395. * The next segment is invalidated by this recovery.
  396. */
  397. err = nilfs_sufile_free(sufile, segnum[1]);
  398. if (unlikely(err))
  399. goto failed;
  400. for (i = 1; i < 4; i++) {
  401. err = nilfs_segment_list_add(head, segnum[i]);
  402. if (unlikely(err))
  403. goto failed;
  404. }
  405. /*
  406. * Collecting segments written after the latest super root.
  407. * These are marked dirty to avoid being reallocated in the next write.
  408. */
  409. list_for_each_entry_safe(ent, n, head, list) {
  410. if (ent->segnum != segnum[0]) {
  411. err = nilfs_sufile_scrap(sufile, ent->segnum);
  412. if (unlikely(err))
  413. goto failed;
  414. }
  415. list_del(&ent->list);
  416. kfree(ent);
  417. }
  418. /* Allocate new segments for recovery */
  419. err = nilfs_sufile_alloc(sufile, &segnum[0]);
  420. if (unlikely(err))
  421. goto failed;
  422. nilfs->ns_pseg_offset = 0;
  423. nilfs->ns_seg_seq = ri->ri_seq + 2;
  424. nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0];
  425. failed:
  426. /* No need to recover sufile because it will be destroyed on error */
  427. return err;
  428. }
  429. static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
  430. struct nilfs_recovery_block *rb,
  431. struct page *page)
  432. {
  433. struct buffer_head *bh_org;
  434. void *kaddr;
  435. bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize);
  436. if (unlikely(!bh_org))
  437. return -EIO;
  438. kaddr = kmap_atomic(page);
  439. memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
  440. kunmap_atomic(kaddr);
  441. brelse(bh_org);
  442. return 0;
  443. }
  444. static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
  445. struct super_block *sb,
  446. struct nilfs_root *root,
  447. struct list_head *head,
  448. unsigned long *nr_salvaged_blocks)
  449. {
  450. struct inode *inode;
  451. struct nilfs_recovery_block *rb, *n;
  452. unsigned int blocksize = nilfs->ns_blocksize;
  453. struct page *page;
  454. loff_t pos;
  455. int err = 0, err2 = 0;
  456. list_for_each_entry_safe(rb, n, head, list) {
  457. inode = nilfs_iget(sb, root, rb->ino);
  458. if (IS_ERR(inode)) {
  459. err = PTR_ERR(inode);
  460. inode = NULL;
  461. goto failed_inode;
  462. }
  463. pos = rb->blkoff << inode->i_blkbits;
  464. err = block_write_begin(inode->i_mapping, pos, blocksize,
  465. 0, &page, nilfs_get_block);
  466. if (unlikely(err)) {
  467. loff_t isize = inode->i_size;
  468. if (pos + blocksize > isize)
  469. nilfs_write_failed(inode->i_mapping,
  470. pos + blocksize);
  471. goto failed_inode;
  472. }
  473. err = nilfs_recovery_copy_block(nilfs, rb, page);
  474. if (unlikely(err))
  475. goto failed_page;
  476. err = nilfs_set_file_dirty(inode, 1);
  477. if (unlikely(err))
  478. goto failed_page;
  479. block_write_end(NULL, inode->i_mapping, pos, blocksize,
  480. blocksize, page, NULL);
  481. unlock_page(page);
  482. put_page(page);
  483. (*nr_salvaged_blocks)++;
  484. goto next;
  485. failed_page:
  486. unlock_page(page);
  487. put_page(page);
  488. failed_inode:
  489. printk(KERN_WARNING
  490. "NILFS warning: error recovering data block "
  491. "(err=%d, ino=%lu, block-offset=%llu)\n",
  492. err, (unsigned long)rb->ino,
  493. (unsigned long long)rb->blkoff);
  494. if (!err2)
  495. err2 = err;
  496. next:
  497. iput(inode); /* iput(NULL) is just ignored */
  498. list_del_init(&rb->list);
  499. kfree(rb);
  500. }
  501. return err2;
  502. }
  503. /**
  504. * nilfs_do_roll_forward - salvage logical segments newer than the latest
  505. * checkpoint
  506. * @nilfs: nilfs object
  507. * @sb: super block instance
  508. * @ri: pointer to a nilfs_recovery_info
  509. */
  510. static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
  511. struct super_block *sb,
  512. struct nilfs_root *root,
  513. struct nilfs_recovery_info *ri)
  514. {
  515. struct buffer_head *bh_sum = NULL;
  516. struct nilfs_segment_summary *sum = NULL;
  517. sector_t pseg_start;
  518. sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */
  519. unsigned long nsalvaged_blocks = 0;
  520. unsigned int flags;
  521. u64 seg_seq;
  522. __u64 segnum, nextnum = 0;
  523. int empty_seg = 0;
  524. int err = 0, ret;
  525. LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */
  526. enum {
  527. RF_INIT_ST,
  528. RF_DSYNC_ST, /* scanning data-sync segments */
  529. };
  530. int state = RF_INIT_ST;
  531. pseg_start = ri->ri_lsegs_start;
  532. seg_seq = ri->ri_lsegs_start_seq;
  533. segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
  534. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  535. while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) {
  536. brelse(bh_sum);
  537. bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
  538. if (!bh_sum) {
  539. err = -EIO;
  540. goto failed;
  541. }
  542. ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
  543. if (ret) {
  544. if (ret == NILFS_SEG_FAIL_IO) {
  545. err = -EIO;
  546. goto failed;
  547. }
  548. goto strayed;
  549. }
  550. flags = le16_to_cpu(sum->ss_flags);
  551. if (flags & NILFS_SS_SR)
  552. goto confused;
  553. /* Found a valid partial segment; do recovery actions */
  554. nextnum = nilfs_get_segnum_of_block(nilfs,
  555. le64_to_cpu(sum->ss_next));
  556. empty_seg = 0;
  557. nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
  558. if (!(flags & NILFS_SS_GC))
  559. nilfs->ns_nongc_ctime = nilfs->ns_ctime;
  560. switch (state) {
  561. case RF_INIT_ST:
  562. if (!(flags & NILFS_SS_LOGBGN) ||
  563. !(flags & NILFS_SS_SYNDT))
  564. goto try_next_pseg;
  565. state = RF_DSYNC_ST;
  566. /* Fall through */
  567. case RF_DSYNC_ST:
  568. if (!(flags & NILFS_SS_SYNDT))
  569. goto confused;
  570. err = nilfs_scan_dsync_log(nilfs, pseg_start, sum,
  571. &dsync_blocks);
  572. if (unlikely(err))
  573. goto failed;
  574. if (flags & NILFS_SS_LOGEND) {
  575. err = nilfs_recover_dsync_blocks(
  576. nilfs, sb, root, &dsync_blocks,
  577. &nsalvaged_blocks);
  578. if (unlikely(err))
  579. goto failed;
  580. state = RF_INIT_ST;
  581. }
  582. break; /* Fall through to try_next_pseg */
  583. }
  584. try_next_pseg:
  585. if (pseg_start == ri->ri_lsegs_end)
  586. break;
  587. pseg_start += le32_to_cpu(sum->ss_nblocks);
  588. if (pseg_start < seg_end)
  589. continue;
  590. goto feed_segment;
  591. strayed:
  592. if (pseg_start == ri->ri_lsegs_end)
  593. break;
  594. feed_segment:
  595. /* Looking to the next full segment */
  596. if (empty_seg++)
  597. break;
  598. seg_seq++;
  599. segnum = nextnum;
  600. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  601. pseg_start = seg_start;
  602. }
  603. if (nsalvaged_blocks) {
  604. printk(KERN_INFO "NILFS (device %s): salvaged %lu blocks\n",
  605. sb->s_id, nsalvaged_blocks);
  606. ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE;
  607. }
  608. out:
  609. brelse(bh_sum);
  610. dispose_recovery_list(&dsync_blocks);
  611. return err;
  612. confused:
  613. err = -EINVAL;
  614. failed:
  615. printk(KERN_ERR
  616. "NILFS (device %s): Error roll-forwarding "
  617. "(err=%d, pseg block=%llu). ",
  618. sb->s_id, err, (unsigned long long)pseg_start);
  619. goto out;
  620. }
  621. static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
  622. struct nilfs_recovery_info *ri)
  623. {
  624. struct buffer_head *bh;
  625. int err;
  626. if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) !=
  627. nilfs_get_segnum_of_block(nilfs, ri->ri_super_root))
  628. return;
  629. bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
  630. BUG_ON(!bh);
  631. memset(bh->b_data, 0, bh->b_size);
  632. set_buffer_dirty(bh);
  633. err = sync_dirty_buffer(bh);
  634. if (unlikely(err))
  635. printk(KERN_WARNING
  636. "NILFS warning: buffer sync write failed during "
  637. "post-cleaning of recovery.\n");
  638. brelse(bh);
  639. }
  640. /**
  641. * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint
  642. * @nilfs: nilfs object
  643. * @sb: super block instance
  644. * @ri: pointer to a nilfs_recovery_info struct to store search results.
  645. *
  646. * Return Value: On success, 0 is returned. On error, one of the following
  647. * negative error code is returned.
  648. *
  649. * %-EINVAL - Inconsistent filesystem state.
  650. *
  651. * %-EIO - I/O error
  652. *
  653. * %-ENOSPC - No space left on device (only in a panic state).
  654. *
  655. * %-ERESTARTSYS - Interrupted.
  656. *
  657. * %-ENOMEM - Insufficient memory available.
  658. */
  659. int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
  660. struct super_block *sb,
  661. struct nilfs_recovery_info *ri)
  662. {
  663. struct nilfs_root *root;
  664. int err;
  665. if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0)
  666. return 0;
  667. err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root);
  668. if (unlikely(err)) {
  669. printk(KERN_ERR
  670. "NILFS: error loading the latest checkpoint.\n");
  671. return err;
  672. }
  673. err = nilfs_do_roll_forward(nilfs, sb, root, ri);
  674. if (unlikely(err))
  675. goto failed;
  676. if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) {
  677. err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri);
  678. if (unlikely(err)) {
  679. printk(KERN_ERR "NILFS: Error preparing segments for "
  680. "recovery.\n");
  681. goto failed;
  682. }
  683. err = nilfs_attach_log_writer(sb, root);
  684. if (unlikely(err))
  685. goto failed;
  686. set_nilfs_discontinued(nilfs);
  687. err = nilfs_construct_segment(sb);
  688. nilfs_detach_log_writer(sb);
  689. if (unlikely(err)) {
  690. printk(KERN_ERR "NILFS: Oops! recovery failed. "
  691. "(err=%d)\n", err);
  692. goto failed;
  693. }
  694. nilfs_finish_roll_forward(nilfs, ri);
  695. }
  696. failed:
  697. nilfs_put_root(root);
  698. return err;
  699. }
  700. /**
  701. * nilfs_search_super_root - search the latest valid super root
  702. * @nilfs: the_nilfs
  703. * @ri: pointer to a nilfs_recovery_info struct to store search results.
  704. *
  705. * nilfs_search_super_root() looks for the latest super-root from a partial
  706. * segment pointed by the superblock. It sets up struct the_nilfs through
  707. * this search. It fills nilfs_recovery_info (ri) required for recovery.
  708. *
  709. * Return Value: On success, 0 is returned. On error, one of the following
  710. * negative error code is returned.
  711. *
  712. * %-EINVAL - No valid segment found
  713. *
  714. * %-EIO - I/O error
  715. *
  716. * %-ENOMEM - Insufficient memory available.
  717. */
  718. int nilfs_search_super_root(struct the_nilfs *nilfs,
  719. struct nilfs_recovery_info *ri)
  720. {
  721. struct buffer_head *bh_sum = NULL;
  722. struct nilfs_segment_summary *sum = NULL;
  723. sector_t pseg_start, pseg_end, sr_pseg_start = 0;
  724. sector_t seg_start, seg_end; /* range of full segment (block number) */
  725. sector_t b, end;
  726. unsigned long nblocks;
  727. unsigned int flags;
  728. u64 seg_seq;
  729. __u64 segnum, nextnum = 0;
  730. __u64 cno;
  731. LIST_HEAD(segments);
  732. int empty_seg = 0, scan_newer = 0;
  733. int ret;
  734. pseg_start = nilfs->ns_last_pseg;
  735. seg_seq = nilfs->ns_last_seq;
  736. cno = nilfs->ns_last_cno;
  737. segnum = nilfs_get_segnum_of_block(nilfs, pseg_start);
  738. /* Calculate range of segment */
  739. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  740. /* Read ahead segment */
  741. b = seg_start;
  742. while (b <= seg_end)
  743. __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize);
  744. for (;;) {
  745. brelse(bh_sum);
  746. ret = NILFS_SEG_FAIL_IO;
  747. bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum);
  748. if (!bh_sum)
  749. goto failed;
  750. ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum);
  751. if (ret) {
  752. if (ret == NILFS_SEG_FAIL_IO)
  753. goto failed;
  754. goto strayed;
  755. }
  756. nblocks = le32_to_cpu(sum->ss_nblocks);
  757. pseg_end = pseg_start + nblocks - 1;
  758. if (unlikely(pseg_end > seg_end)) {
  759. ret = NILFS_SEG_FAIL_CONSISTENCY;
  760. goto strayed;
  761. }
  762. /* A valid partial segment */
  763. ri->ri_pseg_start = pseg_start;
  764. ri->ri_seq = seg_seq;
  765. ri->ri_segnum = segnum;
  766. nextnum = nilfs_get_segnum_of_block(nilfs,
  767. le64_to_cpu(sum->ss_next));
  768. ri->ri_nextnum = nextnum;
  769. empty_seg = 0;
  770. flags = le16_to_cpu(sum->ss_flags);
  771. if (!(flags & NILFS_SS_SR) && !scan_newer) {
  772. /*
  773. * This will never happen because a superblock
  774. * (last_segment) always points to a pseg with
  775. * a super root.
  776. */
  777. ret = NILFS_SEG_FAIL_CONSISTENCY;
  778. goto failed;
  779. }
  780. if (pseg_start == seg_start) {
  781. nilfs_get_segment_range(nilfs, nextnum, &b, &end);
  782. while (b <= end)
  783. __breadahead(nilfs->ns_bdev, b++,
  784. nilfs->ns_blocksize);
  785. }
  786. if (!(flags & NILFS_SS_SR)) {
  787. if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) {
  788. ri->ri_lsegs_start = pseg_start;
  789. ri->ri_lsegs_start_seq = seg_seq;
  790. }
  791. if (flags & NILFS_SS_LOGEND)
  792. ri->ri_lsegs_end = pseg_start;
  793. goto try_next_pseg;
  794. }
  795. /* A valid super root was found. */
  796. ri->ri_cno = cno++;
  797. ri->ri_super_root = pseg_end;
  798. ri->ri_lsegs_start = ri->ri_lsegs_end = 0;
  799. nilfs_dispose_segment_list(&segments);
  800. sr_pseg_start = pseg_start;
  801. nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start;
  802. nilfs->ns_seg_seq = seg_seq;
  803. nilfs->ns_segnum = segnum;
  804. nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */
  805. nilfs->ns_ctime = le64_to_cpu(sum->ss_create);
  806. nilfs->ns_nextnum = nextnum;
  807. if (scan_newer)
  808. ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED;
  809. else {
  810. if (nilfs->ns_mount_state & NILFS_VALID_FS)
  811. goto super_root_found;
  812. scan_newer = 1;
  813. }
  814. try_next_pseg:
  815. /* Standing on a course, or met an inconsistent state */
  816. pseg_start += nblocks;
  817. if (pseg_start < seg_end)
  818. continue;
  819. goto feed_segment;
  820. strayed:
  821. /* Off the trail */
  822. if (!scan_newer)
  823. /*
  824. * This can happen if a checkpoint was written without
  825. * barriers, or as a result of an I/O failure.
  826. */
  827. goto failed;
  828. feed_segment:
  829. /* Looking to the next full segment */
  830. if (empty_seg++)
  831. goto super_root_found; /* found a valid super root */
  832. ret = nilfs_segment_list_add(&segments, segnum);
  833. if (unlikely(ret))
  834. goto failed;
  835. seg_seq++;
  836. segnum = nextnum;
  837. nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end);
  838. pseg_start = seg_start;
  839. }
  840. super_root_found:
  841. /* Updating pointers relating to the latest checkpoint */
  842. brelse(bh_sum);
  843. list_splice_tail(&segments, &ri->ri_used_segments);
  844. nilfs->ns_last_pseg = sr_pseg_start;
  845. nilfs->ns_last_seq = nilfs->ns_seg_seq;
  846. nilfs->ns_last_cno = ri->ri_cno;
  847. return 0;
  848. failed:
  849. brelse(bh_sum);
  850. nilfs_dispose_segment_list(&segments);
  851. return (ret < 0) ? ret : nilfs_warn_segment_error(ret);
  852. }