refcount.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2017 Oracle. All Rights Reserved.
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. */
  6. #include "xfs.h"
  7. #include "xfs_fs.h"
  8. #include "xfs_shared.h"
  9. #include "xfs_format.h"
  10. #include "xfs_trans_resv.h"
  11. #include "xfs_mount.h"
  12. #include "xfs_defer.h"
  13. #include "xfs_btree.h"
  14. #include "xfs_bit.h"
  15. #include "xfs_log_format.h"
  16. #include "xfs_trans.h"
  17. #include "xfs_sb.h"
  18. #include "xfs_alloc.h"
  19. #include "xfs_rmap.h"
  20. #include "xfs_refcount.h"
  21. #include "scrub/xfs_scrub.h"
  22. #include "scrub/scrub.h"
  23. #include "scrub/common.h"
  24. #include "scrub/btree.h"
  25. #include "scrub/trace.h"
  26. /*
  27. * Set us up to scrub reference count btrees.
  28. */
  29. int
  30. xfs_scrub_setup_ag_refcountbt(
  31. struct xfs_scrub_context *sc,
  32. struct xfs_inode *ip)
  33. {
  34. return xfs_scrub_setup_ag_btree(sc, ip, false);
  35. }
  36. /* Reference count btree scrubber. */
  37. /*
  38. * Confirming Reference Counts via Reverse Mappings
  39. *
  40. * We want to count the reverse mappings overlapping a refcount record
  41. * (bno, len, refcount), allowing for the possibility that some of the
  42. * overlap may come from smaller adjoining reverse mappings, while some
  43. * comes from single extents which overlap the range entirely. The
  44. * outer loop is as follows:
  45. *
  46. * 1. For all reverse mappings overlapping the refcount extent,
  47. * a. If a given rmap completely overlaps, mark it as seen.
  48. * b. Otherwise, record the fragment (in agbno order) for later
  49. * processing.
  50. *
  51. * Once we've seen all the rmaps, we know that for all blocks in the
  52. * refcount record we want to find $refcount owners and we've already
  53. * visited $seen extents that overlap all the blocks. Therefore, we
  54. * need to find ($refcount - $seen) owners for every block in the
  55. * extent; call that quantity $target_nr. Proceed as follows:
  56. *
  57. * 2. Pull the first $target_nr fragments from the list; all of them
  58. * should start at or before the start of the extent.
  59. * Call this subset of fragments the working set.
  60. * 3. Until there are no more unprocessed fragments,
  61. * a. Find the shortest fragments in the set and remove them.
  62. * b. Note the block number of the end of these fragments.
  63. * c. Pull the same number of fragments from the list. All of these
  64. * fragments should start at the block number recorded in the
  65. * previous step.
  66. * d. Put those fragments in the set.
  67. * 4. Check that there are $target_nr fragments remaining in the list,
  68. * and that they all end at or beyond the end of the refcount extent.
  69. *
  70. * If the refcount is correct, all the check conditions in the algorithm
  71. * should always hold true. If not, the refcount is incorrect.
  72. */
  73. struct xfs_scrub_refcnt_frag {
  74. struct list_head list;
  75. struct xfs_rmap_irec rm;
  76. };
  77. struct xfs_scrub_refcnt_check {
  78. struct xfs_scrub_context *sc;
  79. struct list_head fragments;
  80. /* refcount extent we're examining */
  81. xfs_agblock_t bno;
  82. xfs_extlen_t len;
  83. xfs_nlink_t refcount;
  84. /* number of owners seen */
  85. xfs_nlink_t seen;
  86. };
  87. /*
  88. * Decide if the given rmap is large enough that we can redeem it
  89. * towards refcount verification now, or if it's a fragment, in
  90. * which case we'll hang onto it in the hopes that we'll later
  91. * discover that we've collected exactly the correct number of
  92. * fragments as the refcountbt says we should have.
  93. */
  94. STATIC int
  95. xfs_scrub_refcountbt_rmap_check(
  96. struct xfs_btree_cur *cur,
  97. struct xfs_rmap_irec *rec,
  98. void *priv)
  99. {
  100. struct xfs_scrub_refcnt_check *refchk = priv;
  101. struct xfs_scrub_refcnt_frag *frag;
  102. xfs_agblock_t rm_last;
  103. xfs_agblock_t rc_last;
  104. int error = 0;
  105. if (xfs_scrub_should_terminate(refchk->sc, &error))
  106. return error;
  107. rm_last = rec->rm_startblock + rec->rm_blockcount - 1;
  108. rc_last = refchk->bno + refchk->len - 1;
  109. /* Confirm that a single-owner refc extent is a CoW stage. */
  110. if (refchk->refcount == 1 && rec->rm_owner != XFS_RMAP_OWN_COW) {
  111. xfs_scrub_btree_xref_set_corrupt(refchk->sc, cur, 0);
  112. return 0;
  113. }
  114. if (rec->rm_startblock <= refchk->bno && rm_last >= rc_last) {
  115. /*
  116. * The rmap overlaps the refcount record, so we can confirm
  117. * one refcount owner seen.
  118. */
  119. refchk->seen++;
  120. } else {
  121. /*
  122. * This rmap covers only part of the refcount record, so
  123. * save the fragment for later processing. If the rmapbt
  124. * is healthy each rmap_irec we see will be in agbno order
  125. * so we don't need insertion sort here.
  126. */
  127. frag = kmem_alloc(sizeof(struct xfs_scrub_refcnt_frag),
  128. KM_MAYFAIL);
  129. if (!frag)
  130. return -ENOMEM;
  131. memcpy(&frag->rm, rec, sizeof(frag->rm));
  132. list_add_tail(&frag->list, &refchk->fragments);
  133. }
  134. return 0;
  135. }
  136. /*
  137. * Given a bunch of rmap fragments, iterate through them, keeping
  138. * a running tally of the refcount. If this ever deviates from
  139. * what we expect (which is the refcountbt's refcount minus the
  140. * number of extents that totally covered the refcountbt extent),
  141. * we have a refcountbt error.
  142. */
  143. STATIC void
  144. xfs_scrub_refcountbt_process_rmap_fragments(
  145. struct xfs_scrub_refcnt_check *refchk)
  146. {
  147. struct list_head worklist;
  148. struct xfs_scrub_refcnt_frag *frag;
  149. struct xfs_scrub_refcnt_frag *n;
  150. xfs_agblock_t bno;
  151. xfs_agblock_t rbno;
  152. xfs_agblock_t next_rbno;
  153. xfs_nlink_t nr;
  154. xfs_nlink_t target_nr;
  155. target_nr = refchk->refcount - refchk->seen;
  156. if (target_nr == 0)
  157. return;
  158. /*
  159. * There are (refchk->rc.rc_refcount - refchk->nr refcount)
  160. * references we haven't found yet. Pull that many off the
  161. * fragment list and figure out where the smallest rmap ends
  162. * (and therefore the next rmap should start). All the rmaps
  163. * we pull off should start at or before the beginning of the
  164. * refcount record's range.
  165. */
  166. INIT_LIST_HEAD(&worklist);
  167. rbno = NULLAGBLOCK;
  168. nr = 1;
  169. /* Make sure the fragments actually /are/ in agbno order. */
  170. bno = 0;
  171. list_for_each_entry(frag, &refchk->fragments, list) {
  172. if (frag->rm.rm_startblock < bno)
  173. goto done;
  174. bno = frag->rm.rm_startblock;
  175. }
  176. /*
  177. * Find all the rmaps that start at or before the refc extent,
  178. * and put them on the worklist.
  179. */
  180. list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
  181. if (frag->rm.rm_startblock > refchk->bno)
  182. goto done;
  183. bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
  184. if (bno < rbno)
  185. rbno = bno;
  186. list_move_tail(&frag->list, &worklist);
  187. if (nr == target_nr)
  188. break;
  189. nr++;
  190. }
  191. /*
  192. * We should have found exactly $target_nr rmap fragments starting
  193. * at or before the refcount extent.
  194. */
  195. if (nr != target_nr)
  196. goto done;
  197. while (!list_empty(&refchk->fragments)) {
  198. /* Discard any fragments ending at rbno from the worklist. */
  199. nr = 0;
  200. next_rbno = NULLAGBLOCK;
  201. list_for_each_entry_safe(frag, n, &worklist, list) {
  202. bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
  203. if (bno != rbno) {
  204. if (bno < next_rbno)
  205. next_rbno = bno;
  206. continue;
  207. }
  208. list_del(&frag->list);
  209. kmem_free(frag);
  210. nr++;
  211. }
  212. /* Try to add nr rmaps starting at rbno to the worklist. */
  213. list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
  214. bno = frag->rm.rm_startblock + frag->rm.rm_blockcount;
  215. if (frag->rm.rm_startblock != rbno)
  216. goto done;
  217. list_move_tail(&frag->list, &worklist);
  218. if (next_rbno > bno)
  219. next_rbno = bno;
  220. nr--;
  221. if (nr == 0)
  222. break;
  223. }
  224. /*
  225. * If we get here and nr > 0, this means that we added fewer
  226. * items to the worklist than we discarded because the fragment
  227. * list ran out of items. Therefore, we cannot maintain the
  228. * required refcount. Something is wrong, so we're done.
  229. */
  230. if (nr)
  231. goto done;
  232. rbno = next_rbno;
  233. }
  234. /*
  235. * Make sure the last extent we processed ends at or beyond
  236. * the end of the refcount extent.
  237. */
  238. if (rbno < refchk->bno + refchk->len)
  239. goto done;
  240. /* Actually record us having seen the remaining refcount. */
  241. refchk->seen = refchk->refcount;
  242. done:
  243. /* Delete fragments and work list. */
  244. list_for_each_entry_safe(frag, n, &worklist, list) {
  245. list_del(&frag->list);
  246. kmem_free(frag);
  247. }
  248. list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
  249. list_del(&frag->list);
  250. kmem_free(frag);
  251. }
  252. }
  253. /* Use the rmap entries covering this extent to verify the refcount. */
  254. STATIC void
  255. xfs_scrub_refcountbt_xref_rmap(
  256. struct xfs_scrub_context *sc,
  257. xfs_agblock_t bno,
  258. xfs_extlen_t len,
  259. xfs_nlink_t refcount)
  260. {
  261. struct xfs_scrub_refcnt_check refchk = {
  262. .sc = sc,
  263. .bno = bno,
  264. .len = len,
  265. .refcount = refcount,
  266. .seen = 0,
  267. };
  268. struct xfs_rmap_irec low;
  269. struct xfs_rmap_irec high;
  270. struct xfs_scrub_refcnt_frag *frag;
  271. struct xfs_scrub_refcnt_frag *n;
  272. int error;
  273. if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
  274. return;
  275. /* Cross-reference with the rmapbt to confirm the refcount. */
  276. memset(&low, 0, sizeof(low));
  277. low.rm_startblock = bno;
  278. memset(&high, 0xFF, sizeof(high));
  279. high.rm_startblock = bno + len - 1;
  280. INIT_LIST_HEAD(&refchk.fragments);
  281. error = xfs_rmap_query_range(sc->sa.rmap_cur, &low, &high,
  282. &xfs_scrub_refcountbt_rmap_check, &refchk);
  283. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  284. goto out_free;
  285. xfs_scrub_refcountbt_process_rmap_fragments(&refchk);
  286. if (refcount != refchk.seen)
  287. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  288. out_free:
  289. list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
  290. list_del(&frag->list);
  291. kmem_free(frag);
  292. }
  293. }
  294. /* Cross-reference with the other btrees. */
  295. STATIC void
  296. xfs_scrub_refcountbt_xref(
  297. struct xfs_scrub_context *sc,
  298. xfs_agblock_t agbno,
  299. xfs_extlen_t len,
  300. xfs_nlink_t refcount)
  301. {
  302. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  303. return;
  304. xfs_scrub_xref_is_used_space(sc, agbno, len);
  305. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, len);
  306. xfs_scrub_refcountbt_xref_rmap(sc, agbno, len, refcount);
  307. }
  308. /* Scrub a refcountbt record. */
  309. STATIC int
  310. xfs_scrub_refcountbt_rec(
  311. struct xfs_scrub_btree *bs,
  312. union xfs_btree_rec *rec)
  313. {
  314. struct xfs_mount *mp = bs->cur->bc_mp;
  315. xfs_agblock_t *cow_blocks = bs->private;
  316. xfs_agnumber_t agno = bs->cur->bc_private.a.agno;
  317. xfs_agblock_t bno;
  318. xfs_extlen_t len;
  319. xfs_nlink_t refcount;
  320. bool has_cowflag;
  321. int error = 0;
  322. bno = be32_to_cpu(rec->refc.rc_startblock);
  323. len = be32_to_cpu(rec->refc.rc_blockcount);
  324. refcount = be32_to_cpu(rec->refc.rc_refcount);
  325. /* Only CoW records can have refcount == 1. */
  326. has_cowflag = (bno & XFS_REFC_COW_START);
  327. if ((refcount == 1 && !has_cowflag) || (refcount != 1 && has_cowflag))
  328. xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
  329. if (has_cowflag)
  330. (*cow_blocks) += len;
  331. /* Check the extent. */
  332. bno &= ~XFS_REFC_COW_START;
  333. if (bno + len <= bno ||
  334. !xfs_verify_agbno(mp, agno, bno) ||
  335. !xfs_verify_agbno(mp, agno, bno + len - 1))
  336. xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
  337. if (refcount == 0)
  338. xfs_scrub_btree_set_corrupt(bs->sc, bs->cur, 0);
  339. xfs_scrub_refcountbt_xref(bs->sc, bno, len, refcount);
  340. return error;
  341. }
  342. /* Make sure we have as many refc blocks as the rmap says. */
  343. STATIC void
  344. xfs_scrub_refcount_xref_rmap(
  345. struct xfs_scrub_context *sc,
  346. struct xfs_owner_info *oinfo,
  347. xfs_filblks_t cow_blocks)
  348. {
  349. xfs_extlen_t refcbt_blocks = 0;
  350. xfs_filblks_t blocks;
  351. int error;
  352. if (!sc->sa.rmap_cur || xfs_scrub_skip_xref(sc->sm))
  353. return;
  354. /* Check that we saw as many refcbt blocks as the rmap knows about. */
  355. error = xfs_btree_count_blocks(sc->sa.refc_cur, &refcbt_blocks);
  356. if (!xfs_scrub_btree_process_error(sc, sc->sa.refc_cur, 0, &error))
  357. return;
  358. error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
  359. &blocks);
  360. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  361. return;
  362. if (blocks != refcbt_blocks)
  363. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  364. /* Check that we saw as many cow blocks as the rmap knows about. */
  365. xfs_rmap_ag_owner(oinfo, XFS_RMAP_OWN_COW);
  366. error = xfs_scrub_count_rmap_ownedby_ag(sc, sc->sa.rmap_cur, oinfo,
  367. &blocks);
  368. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  369. return;
  370. if (blocks != cow_blocks)
  371. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.rmap_cur, 0);
  372. }
  373. /* Scrub the refcount btree for some AG. */
  374. int
  375. xfs_scrub_refcountbt(
  376. struct xfs_scrub_context *sc)
  377. {
  378. struct xfs_owner_info oinfo;
  379. xfs_agblock_t cow_blocks = 0;
  380. int error;
  381. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_REFC);
  382. error = xfs_scrub_btree(sc, sc->sa.refc_cur, xfs_scrub_refcountbt_rec,
  383. &oinfo, &cow_blocks);
  384. if (error)
  385. return error;
  386. xfs_scrub_refcount_xref_rmap(sc, &oinfo, cow_blocks);
  387. return 0;
  388. }
  389. /* xref check that a cow staging extent is marked in the refcountbt. */
  390. void
  391. xfs_scrub_xref_is_cow_staging(
  392. struct xfs_scrub_context *sc,
  393. xfs_agblock_t agbno,
  394. xfs_extlen_t len)
  395. {
  396. struct xfs_refcount_irec rc;
  397. bool has_cowflag;
  398. int has_refcount;
  399. int error;
  400. if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
  401. return;
  402. /* Find the CoW staging extent. */
  403. error = xfs_refcount_lookup_le(sc->sa.refc_cur,
  404. agbno + XFS_REFC_COW_START, &has_refcount);
  405. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
  406. return;
  407. if (!has_refcount) {
  408. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  409. return;
  410. }
  411. error = xfs_refcount_get_rec(sc->sa.refc_cur, &rc, &has_refcount);
  412. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
  413. return;
  414. if (!has_refcount) {
  415. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  416. return;
  417. }
  418. /* CoW flag must be set, refcount must be 1. */
  419. has_cowflag = (rc.rc_startblock & XFS_REFC_COW_START);
  420. if (!has_cowflag || rc.rc_refcount != 1)
  421. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  422. /* Must be at least as long as what was passed in */
  423. if (rc.rc_blockcount < len)
  424. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  425. }
  426. /*
  427. * xref check that the extent is not shared. Only file data blocks
  428. * can have multiple owners.
  429. */
  430. void
  431. xfs_scrub_xref_is_not_shared(
  432. struct xfs_scrub_context *sc,
  433. xfs_agblock_t agbno,
  434. xfs_extlen_t len)
  435. {
  436. bool shared;
  437. int error;
  438. if (!sc->sa.refc_cur || xfs_scrub_skip_xref(sc->sm))
  439. return;
  440. error = xfs_refcount_has_record(sc->sa.refc_cur, agbno, len, &shared);
  441. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
  442. return;
  443. if (shared)
  444. xfs_scrub_btree_xref_set_corrupt(sc, sc->sa.refc_cur, 0);
  445. }