agheader.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*
  2. * Copyright (C) 2017 Oracle. All Rights Reserved.
  3. *
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it would be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write the Free Software Foundation,
  18. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  19. */
  20. #include "xfs.h"
  21. #include "xfs_fs.h"
  22. #include "xfs_shared.h"
  23. #include "xfs_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_mount.h"
  26. #include "xfs_defer.h"
  27. #include "xfs_btree.h"
  28. #include "xfs_bit.h"
  29. #include "xfs_log_format.h"
  30. #include "xfs_trans.h"
  31. #include "xfs_sb.h"
  32. #include "xfs_inode.h"
  33. #include "xfs_alloc.h"
  34. #include "xfs_ialloc.h"
  35. #include "xfs_rmap.h"
  36. #include "scrub/xfs_scrub.h"
  37. #include "scrub/scrub.h"
  38. #include "scrub/common.h"
  39. #include "scrub/trace.h"
  40. /* Superblock */
  41. /* Cross-reference with the other btrees. */
  42. STATIC void
  43. xfs_scrub_superblock_xref(
  44. struct xfs_scrub_context *sc,
  45. struct xfs_buf *bp)
  46. {
  47. struct xfs_owner_info oinfo;
  48. struct xfs_mount *mp = sc->mp;
  49. xfs_agnumber_t agno = sc->sm->sm_agno;
  50. xfs_agblock_t agbno;
  51. int error;
  52. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  53. return;
  54. agbno = XFS_SB_BLOCK(mp);
  55. error = xfs_scrub_ag_init(sc, agno, &sc->sa);
  56. if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
  57. return;
  58. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  59. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  60. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  61. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  62. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  63. /* scrub teardown will take care of sc->sa for us */
  64. }
  65. /*
  66. * Scrub the filesystem superblock.
  67. *
  68. * Note: We do /not/ attempt to check AG 0's superblock. Mount is
  69. * responsible for validating all the geometry information in sb 0, so
  70. * if the filesystem is capable of initiating online scrub, then clearly
  71. * sb 0 is ok and we can use its information to check everything else.
  72. */
  73. int
  74. xfs_scrub_superblock(
  75. struct xfs_scrub_context *sc)
  76. {
  77. struct xfs_mount *mp = sc->mp;
  78. struct xfs_buf *bp;
  79. struct xfs_dsb *sb;
  80. xfs_agnumber_t agno;
  81. uint32_t v2_ok;
  82. __be32 features_mask;
  83. int error;
  84. __be16 vernum_mask;
  85. agno = sc->sm->sm_agno;
  86. if (agno == 0)
  87. return 0;
  88. error = xfs_sb_read_secondary(mp, sc->tp, agno, &bp);
  89. /*
  90. * The superblock verifier can return several different error codes
  91. * if it thinks the superblock doesn't look right. For a mount these
  92. * would all get bounced back to userspace, but if we're here then the
  93. * fs mounted successfully, which means that this secondary superblock
  94. * is simply incorrect. Treat all these codes the same way we treat
  95. * any corruption.
  96. */
  97. switch (error) {
  98. case -EINVAL: /* also -EWRONGFS */
  99. case -ENOSYS:
  100. case -EFBIG:
  101. error = -EFSCORRUPTED;
  102. default:
  103. break;
  104. }
  105. if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
  106. return error;
  107. sb = XFS_BUF_TO_SBP(bp);
  108. /*
  109. * Verify the geometries match. Fields that are permanently
  110. * set by mkfs are checked; fields that can be updated later
  111. * (and are not propagated to backup superblocks) are preen
  112. * checked.
  113. */
  114. if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
  115. xfs_scrub_block_set_corrupt(sc, bp);
  116. if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
  117. xfs_scrub_block_set_corrupt(sc, bp);
  118. if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
  119. xfs_scrub_block_set_corrupt(sc, bp);
  120. if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
  121. xfs_scrub_block_set_corrupt(sc, bp);
  122. if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
  123. xfs_scrub_block_set_preen(sc, bp);
  124. if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
  125. xfs_scrub_block_set_corrupt(sc, bp);
  126. if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
  127. xfs_scrub_block_set_preen(sc, bp);
  128. if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
  129. xfs_scrub_block_set_preen(sc, bp);
  130. if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
  131. xfs_scrub_block_set_preen(sc, bp);
  132. if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
  133. xfs_scrub_block_set_corrupt(sc, bp);
  134. if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
  135. xfs_scrub_block_set_corrupt(sc, bp);
  136. if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
  137. xfs_scrub_block_set_corrupt(sc, bp);
  138. if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
  139. xfs_scrub_block_set_corrupt(sc, bp);
  140. if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
  141. xfs_scrub_block_set_corrupt(sc, bp);
  142. /* Check sb_versionnum bits that are set at mkfs time. */
  143. vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
  144. XFS_SB_VERSION_NUMBITS |
  145. XFS_SB_VERSION_ALIGNBIT |
  146. XFS_SB_VERSION_DALIGNBIT |
  147. XFS_SB_VERSION_SHAREDBIT |
  148. XFS_SB_VERSION_LOGV2BIT |
  149. XFS_SB_VERSION_SECTORBIT |
  150. XFS_SB_VERSION_EXTFLGBIT |
  151. XFS_SB_VERSION_DIRV2BIT);
  152. if ((sb->sb_versionnum & vernum_mask) !=
  153. (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
  154. xfs_scrub_block_set_corrupt(sc, bp);
  155. /* Check sb_versionnum bits that can be set after mkfs time. */
  156. vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
  157. XFS_SB_VERSION_NLINKBIT |
  158. XFS_SB_VERSION_QUOTABIT);
  159. if ((sb->sb_versionnum & vernum_mask) !=
  160. (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
  161. xfs_scrub_block_set_preen(sc, bp);
  162. if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
  163. xfs_scrub_block_set_corrupt(sc, bp);
  164. if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
  165. xfs_scrub_block_set_corrupt(sc, bp);
  166. if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
  167. xfs_scrub_block_set_corrupt(sc, bp);
  168. if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
  169. xfs_scrub_block_set_preen(sc, bp);
  170. if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
  171. xfs_scrub_block_set_corrupt(sc, bp);
  172. if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
  173. xfs_scrub_block_set_corrupt(sc, bp);
  174. if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
  175. xfs_scrub_block_set_corrupt(sc, bp);
  176. if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
  177. xfs_scrub_block_set_corrupt(sc, bp);
  178. if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
  179. xfs_scrub_block_set_corrupt(sc, bp);
  180. if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
  181. xfs_scrub_block_set_corrupt(sc, bp);
  182. if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
  183. xfs_scrub_block_set_preen(sc, bp);
  184. /*
  185. * Skip the summary counters since we track them in memory anyway.
  186. * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
  187. */
  188. if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
  189. xfs_scrub_block_set_preen(sc, bp);
  190. if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
  191. xfs_scrub_block_set_preen(sc, bp);
  192. /*
  193. * Skip the quota flags since repair will force quotacheck.
  194. * sb_qflags
  195. */
  196. if (sb->sb_flags != mp->m_sb.sb_flags)
  197. xfs_scrub_block_set_corrupt(sc, bp);
  198. if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
  199. xfs_scrub_block_set_corrupt(sc, bp);
  200. if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
  201. xfs_scrub_block_set_corrupt(sc, bp);
  202. if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
  203. xfs_scrub_block_set_preen(sc, bp);
  204. if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
  205. xfs_scrub_block_set_preen(sc, bp);
  206. if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
  207. xfs_scrub_block_set_corrupt(sc, bp);
  208. if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
  209. xfs_scrub_block_set_corrupt(sc, bp);
  210. if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
  211. xfs_scrub_block_set_corrupt(sc, bp);
  212. if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
  213. xfs_scrub_block_set_corrupt(sc, bp);
  214. /* Do we see any invalid bits in sb_features2? */
  215. if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
  216. if (sb->sb_features2 != 0)
  217. xfs_scrub_block_set_corrupt(sc, bp);
  218. } else {
  219. v2_ok = XFS_SB_VERSION2_OKBITS;
  220. if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
  221. v2_ok |= XFS_SB_VERSION2_CRCBIT;
  222. if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
  223. xfs_scrub_block_set_corrupt(sc, bp);
  224. if (sb->sb_features2 != sb->sb_bad_features2)
  225. xfs_scrub_block_set_preen(sc, bp);
  226. }
  227. /* Check sb_features2 flags that are set at mkfs time. */
  228. features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
  229. XFS_SB_VERSION2_PROJID32BIT |
  230. XFS_SB_VERSION2_CRCBIT |
  231. XFS_SB_VERSION2_FTYPE);
  232. if ((sb->sb_features2 & features_mask) !=
  233. (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
  234. xfs_scrub_block_set_corrupt(sc, bp);
  235. /* Check sb_features2 flags that can be set after mkfs time. */
  236. features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
  237. if ((sb->sb_features2 & features_mask) !=
  238. (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
  239. xfs_scrub_block_set_corrupt(sc, bp);
  240. if (!xfs_sb_version_hascrc(&mp->m_sb)) {
  241. /* all v5 fields must be zero */
  242. if (memchr_inv(&sb->sb_features_compat, 0,
  243. sizeof(struct xfs_dsb) -
  244. offsetof(struct xfs_dsb, sb_features_compat)))
  245. xfs_scrub_block_set_corrupt(sc, bp);
  246. } else {
  247. /* Check compat flags; all are set at mkfs time. */
  248. features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
  249. if ((sb->sb_features_compat & features_mask) !=
  250. (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
  251. xfs_scrub_block_set_corrupt(sc, bp);
  252. /* Check ro compat flags; all are set at mkfs time. */
  253. features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
  254. XFS_SB_FEAT_RO_COMPAT_FINOBT |
  255. XFS_SB_FEAT_RO_COMPAT_RMAPBT |
  256. XFS_SB_FEAT_RO_COMPAT_REFLINK);
  257. if ((sb->sb_features_ro_compat & features_mask) !=
  258. (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
  259. features_mask))
  260. xfs_scrub_block_set_corrupt(sc, bp);
  261. /* Check incompat flags; all are set at mkfs time. */
  262. features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
  263. XFS_SB_FEAT_INCOMPAT_FTYPE |
  264. XFS_SB_FEAT_INCOMPAT_SPINODES |
  265. XFS_SB_FEAT_INCOMPAT_META_UUID);
  266. if ((sb->sb_features_incompat & features_mask) !=
  267. (cpu_to_be32(mp->m_sb.sb_features_incompat) &
  268. features_mask))
  269. xfs_scrub_block_set_corrupt(sc, bp);
  270. /* Check log incompat flags; all are set at mkfs time. */
  271. features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
  272. if ((sb->sb_features_log_incompat & features_mask) !=
  273. (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
  274. features_mask))
  275. xfs_scrub_block_set_corrupt(sc, bp);
  276. /* Don't care about sb_crc */
  277. if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
  278. xfs_scrub_block_set_corrupt(sc, bp);
  279. if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
  280. xfs_scrub_block_set_preen(sc, bp);
  281. /* Don't care about sb_lsn */
  282. }
  283. if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
  284. /* The metadata UUID must be the same for all supers */
  285. if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
  286. xfs_scrub_block_set_corrupt(sc, bp);
  287. }
  288. /* Everything else must be zero. */
  289. if (memchr_inv(sb + 1, 0,
  290. BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
  291. xfs_scrub_block_set_corrupt(sc, bp);
  292. xfs_scrub_superblock_xref(sc, bp);
  293. return error;
  294. }
  295. /* AGF */
  296. /* Tally freespace record lengths. */
  297. STATIC int
  298. xfs_scrub_agf_record_bno_lengths(
  299. struct xfs_btree_cur *cur,
  300. struct xfs_alloc_rec_incore *rec,
  301. void *priv)
  302. {
  303. xfs_extlen_t *blocks = priv;
  304. (*blocks) += rec->ar_blockcount;
  305. return 0;
  306. }
  307. /* Check agf_freeblks */
  308. static inline void
  309. xfs_scrub_agf_xref_freeblks(
  310. struct xfs_scrub_context *sc)
  311. {
  312. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  313. xfs_extlen_t blocks = 0;
  314. int error;
  315. if (!sc->sa.bno_cur)
  316. return;
  317. error = xfs_alloc_query_all(sc->sa.bno_cur,
  318. xfs_scrub_agf_record_bno_lengths, &blocks);
  319. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
  320. return;
  321. if (blocks != be32_to_cpu(agf->agf_freeblks))
  322. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  323. }
  324. /* Cross reference the AGF with the cntbt (freespace by length btree) */
  325. static inline void
  326. xfs_scrub_agf_xref_cntbt(
  327. struct xfs_scrub_context *sc)
  328. {
  329. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  330. xfs_agblock_t agbno;
  331. xfs_extlen_t blocks;
  332. int have;
  333. int error;
  334. if (!sc->sa.cnt_cur)
  335. return;
  336. /* Any freespace at all? */
  337. error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
  338. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
  339. return;
  340. if (!have) {
  341. if (agf->agf_freeblks != be32_to_cpu(0))
  342. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  343. return;
  344. }
  345. /* Check agf_longest */
  346. error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
  347. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
  348. return;
  349. if (!have || blocks != be32_to_cpu(agf->agf_longest))
  350. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  351. }
  352. /* Check the btree block counts in the AGF against the btrees. */
  353. STATIC void
  354. xfs_scrub_agf_xref_btreeblks(
  355. struct xfs_scrub_context *sc)
  356. {
  357. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  358. struct xfs_mount *mp = sc->mp;
  359. xfs_agblock_t blocks;
  360. xfs_agblock_t btreeblks;
  361. int error;
  362. /* Check agf_rmap_blocks; set up for agf_btreeblks check */
  363. if (sc->sa.rmap_cur) {
  364. error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
  365. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  366. return;
  367. btreeblks = blocks - 1;
  368. if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
  369. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  370. } else {
  371. btreeblks = 0;
  372. }
  373. /*
  374. * No rmap cursor; we can't xref if we have the rmapbt feature.
  375. * We also can't do it if we're missing the free space btree cursors.
  376. */
  377. if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
  378. !sc->sa.bno_cur || !sc->sa.cnt_cur)
  379. return;
  380. /* Check agf_btreeblks */
  381. error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
  382. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
  383. return;
  384. btreeblks += blocks - 1;
  385. error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
  386. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
  387. return;
  388. btreeblks += blocks - 1;
  389. if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
  390. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  391. }
  392. /* Check agf_refcount_blocks against tree size */
  393. static inline void
  394. xfs_scrub_agf_xref_refcblks(
  395. struct xfs_scrub_context *sc)
  396. {
  397. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  398. xfs_agblock_t blocks;
  399. int error;
  400. if (!sc->sa.refc_cur)
  401. return;
  402. error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
  403. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
  404. return;
  405. if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
  406. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  407. }
  408. /* Cross-reference with the other btrees. */
  409. STATIC void
  410. xfs_scrub_agf_xref(
  411. struct xfs_scrub_context *sc)
  412. {
  413. struct xfs_owner_info oinfo;
  414. struct xfs_mount *mp = sc->mp;
  415. xfs_agblock_t agbno;
  416. int error;
  417. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  418. return;
  419. agbno = XFS_AGF_BLOCK(mp);
  420. error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
  421. if (error)
  422. return;
  423. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  424. xfs_scrub_agf_xref_freeblks(sc);
  425. xfs_scrub_agf_xref_cntbt(sc);
  426. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  427. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  428. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  429. xfs_scrub_agf_xref_btreeblks(sc);
  430. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  431. xfs_scrub_agf_xref_refcblks(sc);
  432. /* scrub teardown will take care of sc->sa for us */
  433. }
  434. /* Scrub the AGF. */
  435. int
  436. xfs_scrub_agf(
  437. struct xfs_scrub_context *sc)
  438. {
  439. struct xfs_mount *mp = sc->mp;
  440. struct xfs_agf *agf;
  441. xfs_agnumber_t agno;
  442. xfs_agblock_t agbno;
  443. xfs_agblock_t eoag;
  444. xfs_agblock_t agfl_first;
  445. xfs_agblock_t agfl_last;
  446. xfs_agblock_t agfl_count;
  447. xfs_agblock_t fl_count;
  448. int level;
  449. int error = 0;
  450. agno = sc->sa.agno = sc->sm->sm_agno;
  451. error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
  452. &sc->sa.agf_bp, &sc->sa.agfl_bp);
  453. if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
  454. goto out;
  455. xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
  456. agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  457. /* Check the AG length */
  458. eoag = be32_to_cpu(agf->agf_length);
  459. if (eoag != xfs_ag_block_count(mp, agno))
  460. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  461. /* Check the AGF btree roots and levels */
  462. agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
  463. if (!xfs_verify_agbno(mp, agno, agbno))
  464. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  465. agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
  466. if (!xfs_verify_agbno(mp, agno, agbno))
  467. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  468. level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
  469. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  470. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  471. level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
  472. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  473. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  474. if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
  475. agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
  476. if (!xfs_verify_agbno(mp, agno, agbno))
  477. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  478. level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
  479. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  480. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  481. }
  482. if (xfs_sb_version_hasreflink(&mp->m_sb)) {
  483. agbno = be32_to_cpu(agf->agf_refcount_root);
  484. if (!xfs_verify_agbno(mp, agno, agbno))
  485. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  486. level = be32_to_cpu(agf->agf_refcount_level);
  487. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  488. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  489. }
  490. /* Check the AGFL counters */
  491. agfl_first = be32_to_cpu(agf->agf_flfirst);
  492. agfl_last = be32_to_cpu(agf->agf_fllast);
  493. agfl_count = be32_to_cpu(agf->agf_flcount);
  494. if (agfl_last > agfl_first)
  495. fl_count = agfl_last - agfl_first + 1;
  496. else
  497. fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
  498. if (agfl_count != 0 && fl_count != agfl_count)
  499. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  500. xfs_scrub_agf_xref(sc);
  501. out:
  502. return error;
  503. }
  504. /* AGFL */
  505. struct xfs_scrub_agfl_info {
  506. struct xfs_owner_info oinfo;
  507. unsigned int sz_entries;
  508. unsigned int nr_entries;
  509. xfs_agblock_t *entries;
  510. struct xfs_scrub_context *sc;
  511. };
  512. /* Cross-reference with the other btrees. */
  513. STATIC void
  514. xfs_scrub_agfl_block_xref(
  515. struct xfs_scrub_context *sc,
  516. xfs_agblock_t agbno,
  517. struct xfs_owner_info *oinfo)
  518. {
  519. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  520. return;
  521. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  522. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  523. xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
  524. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  525. }
  526. /* Scrub an AGFL block. */
  527. STATIC int
  528. xfs_scrub_agfl_block(
  529. struct xfs_mount *mp,
  530. xfs_agblock_t agbno,
  531. void *priv)
  532. {
  533. struct xfs_scrub_agfl_info *sai = priv;
  534. struct xfs_scrub_context *sc = sai->sc;
  535. xfs_agnumber_t agno = sc->sa.agno;
  536. if (xfs_verify_agbno(mp, agno, agbno) &&
  537. sai->nr_entries < sai->sz_entries)
  538. sai->entries[sai->nr_entries++] = agbno;
  539. else
  540. xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
  541. xfs_scrub_agfl_block_xref(sc, agbno, priv);
  542. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  543. return XFS_BTREE_QUERY_RANGE_ABORT;
  544. return 0;
  545. }
  546. static int
  547. xfs_scrub_agblock_cmp(
  548. const void *pa,
  549. const void *pb)
  550. {
  551. const xfs_agblock_t *a = pa;
  552. const xfs_agblock_t *b = pb;
  553. return (int)*a - (int)*b;
  554. }
  555. /* Cross-reference with the other btrees. */
  556. STATIC void
  557. xfs_scrub_agfl_xref(
  558. struct xfs_scrub_context *sc)
  559. {
  560. struct xfs_owner_info oinfo;
  561. struct xfs_mount *mp = sc->mp;
  562. xfs_agblock_t agbno;
  563. int error;
  564. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  565. return;
  566. agbno = XFS_AGFL_BLOCK(mp);
  567. error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
  568. if (error)
  569. return;
  570. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  571. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  572. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  573. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  574. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  575. /*
  576. * Scrub teardown will take care of sc->sa for us. Leave sc->sa
  577. * active so that the agfl block xref can use it too.
  578. */
  579. }
  580. /* Scrub the AGFL. */
  581. int
  582. xfs_scrub_agfl(
  583. struct xfs_scrub_context *sc)
  584. {
  585. struct xfs_scrub_agfl_info sai;
  586. struct xfs_agf *agf;
  587. xfs_agnumber_t agno;
  588. unsigned int agflcount;
  589. unsigned int i;
  590. int error;
  591. agno = sc->sa.agno = sc->sm->sm_agno;
  592. error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
  593. &sc->sa.agf_bp, &sc->sa.agfl_bp);
  594. if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
  595. goto out;
  596. if (!sc->sa.agf_bp)
  597. return -EFSCORRUPTED;
  598. xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
  599. xfs_scrub_agfl_xref(sc);
  600. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  601. goto out;
  602. /* Allocate buffer to ensure uniqueness of AGFL entries. */
  603. agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  604. agflcount = be32_to_cpu(agf->agf_flcount);
  605. if (agflcount > xfs_agfl_size(sc->mp)) {
  606. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  607. goto out;
  608. }
  609. memset(&sai, 0, sizeof(sai));
  610. sai.sc = sc;
  611. sai.sz_entries = agflcount;
  612. sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
  613. KM_MAYFAIL);
  614. if (!sai.entries) {
  615. error = -ENOMEM;
  616. goto out;
  617. }
  618. /* Check the blocks in the AGFL. */
  619. xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
  620. error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
  621. sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai);
  622. if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
  623. error = 0;
  624. goto out_free;
  625. }
  626. if (error)
  627. goto out_free;
  628. if (agflcount != sai.nr_entries) {
  629. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  630. goto out_free;
  631. }
  632. /* Sort entries, check for duplicates. */
  633. sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
  634. xfs_scrub_agblock_cmp, NULL);
  635. for (i = 1; i < sai.nr_entries; i++) {
  636. if (sai.entries[i] == sai.entries[i - 1]) {
  637. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  638. break;
  639. }
  640. }
  641. out_free:
  642. kmem_free(sai.entries);
  643. out:
  644. return error;
  645. }
  646. /* AGI */
  647. /* Check agi_count/agi_freecount */
  648. static inline void
  649. xfs_scrub_agi_xref_icounts(
  650. struct xfs_scrub_context *sc)
  651. {
  652. struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
  653. xfs_agino_t icount;
  654. xfs_agino_t freecount;
  655. int error;
  656. if (!sc->sa.ino_cur)
  657. return;
  658. error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
  659. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
  660. return;
  661. if (be32_to_cpu(agi->agi_count) != icount ||
  662. be32_to_cpu(agi->agi_freecount) != freecount)
  663. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
  664. }
  665. /* Cross-reference with the other btrees. */
  666. STATIC void
  667. xfs_scrub_agi_xref(
  668. struct xfs_scrub_context *sc)
  669. {
  670. struct xfs_owner_info oinfo;
  671. struct xfs_mount *mp = sc->mp;
  672. xfs_agblock_t agbno;
  673. int error;
  674. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  675. return;
  676. agbno = XFS_AGI_BLOCK(mp);
  677. error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
  678. if (error)
  679. return;
  680. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  681. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  682. xfs_scrub_agi_xref_icounts(sc);
  683. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  684. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  685. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  686. /* scrub teardown will take care of sc->sa for us */
  687. }
  688. /* Scrub the AGI. */
  689. int
  690. xfs_scrub_agi(
  691. struct xfs_scrub_context *sc)
  692. {
  693. struct xfs_mount *mp = sc->mp;
  694. struct xfs_agi *agi;
  695. xfs_agnumber_t agno;
  696. xfs_agblock_t agbno;
  697. xfs_agblock_t eoag;
  698. xfs_agino_t agino;
  699. xfs_agino_t first_agino;
  700. xfs_agino_t last_agino;
  701. xfs_agino_t icount;
  702. int i;
  703. int level;
  704. int error = 0;
  705. agno = sc->sa.agno = sc->sm->sm_agno;
  706. error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
  707. &sc->sa.agf_bp, &sc->sa.agfl_bp);
  708. if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
  709. goto out;
  710. xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
  711. agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
  712. /* Check the AG length */
  713. eoag = be32_to_cpu(agi->agi_length);
  714. if (eoag != xfs_ag_block_count(mp, agno))
  715. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  716. /* Check btree roots and levels */
  717. agbno = be32_to_cpu(agi->agi_root);
  718. if (!xfs_verify_agbno(mp, agno, agbno))
  719. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  720. level = be32_to_cpu(agi->agi_level);
  721. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  722. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  723. if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
  724. agbno = be32_to_cpu(agi->agi_free_root);
  725. if (!xfs_verify_agbno(mp, agno, agbno))
  726. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  727. level = be32_to_cpu(agi->agi_free_level);
  728. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  729. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  730. }
  731. /* Check inode counters */
  732. xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
  733. icount = be32_to_cpu(agi->agi_count);
  734. if (icount > last_agino - first_agino + 1 ||
  735. icount < be32_to_cpu(agi->agi_freecount))
  736. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  737. /* Check inode pointers */
  738. agino = be32_to_cpu(agi->agi_newino);
  739. if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
  740. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  741. agino = be32_to_cpu(agi->agi_dirino);
  742. if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
  743. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  744. /* Check unlinked inode buckets */
  745. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
  746. agino = be32_to_cpu(agi->agi_unlinked[i]);
  747. if (agino == NULLAGINO)
  748. continue;
  749. if (!xfs_verify_agino(mp, agno, agino))
  750. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  751. }
  752. if (agi->agi_pad32 != cpu_to_be32(0))
  753. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  754. xfs_scrub_agi_xref(sc);
  755. out:
  756. return error;
  757. }