agheader.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * Copyright (C) 2017 Oracle. All Rights Reserved.
  3. *
  4. * Author: Darrick J. Wong <darrick.wong@oracle.com>
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version 2
  9. * of the License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it would be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write the Free Software Foundation,
  18. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
  19. */
  20. #include "xfs.h"
  21. #include "xfs_fs.h"
  22. #include "xfs_shared.h"
  23. #include "xfs_format.h"
  24. #include "xfs_trans_resv.h"
  25. #include "xfs_mount.h"
  26. #include "xfs_defer.h"
  27. #include "xfs_btree.h"
  28. #include "xfs_bit.h"
  29. #include "xfs_log_format.h"
  30. #include "xfs_trans.h"
  31. #include "xfs_sb.h"
  32. #include "xfs_inode.h"
  33. #include "xfs_alloc.h"
  34. #include "xfs_ialloc.h"
  35. #include "xfs_rmap.h"
  36. #include "scrub/xfs_scrub.h"
  37. #include "scrub/scrub.h"
  38. #include "scrub/common.h"
  39. #include "scrub/trace.h"
  40. /*
  41. * Walk all the blocks in the AGFL. The fn function can return any negative
  42. * error code or XFS_BTREE_QUERY_RANGE_ABORT.
  43. */
  44. int
  45. xfs_scrub_walk_agfl(
  46. struct xfs_scrub_context *sc,
  47. int (*fn)(struct xfs_scrub_context *,
  48. xfs_agblock_t bno, void *),
  49. void *priv)
  50. {
  51. struct xfs_agf *agf;
  52. __be32 *agfl_bno;
  53. struct xfs_mount *mp = sc->mp;
  54. unsigned int flfirst;
  55. unsigned int fllast;
  56. int i;
  57. int error;
  58. agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  59. agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
  60. flfirst = be32_to_cpu(agf->agf_flfirst);
  61. fllast = be32_to_cpu(agf->agf_fllast);
  62. /* Nothing to walk in an empty AGFL. */
  63. if (agf->agf_flcount == cpu_to_be32(0))
  64. return 0;
  65. /* first to last is a consecutive list. */
  66. if (fllast >= flfirst) {
  67. for (i = flfirst; i <= fllast; i++) {
  68. error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
  69. if (error)
  70. return error;
  71. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  72. return error;
  73. }
  74. return 0;
  75. }
  76. /* first to the end */
  77. for (i = flfirst; i < xfs_agfl_size(mp); i++) {
  78. error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
  79. if (error)
  80. return error;
  81. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  82. return error;
  83. }
  84. /* the start to last. */
  85. for (i = 0; i <= fllast; i++) {
  86. error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
  87. if (error)
  88. return error;
  89. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  90. return error;
  91. }
  92. return 0;
  93. }
  94. /* Superblock */
  95. /* Cross-reference with the other btrees. */
  96. STATIC void
  97. xfs_scrub_superblock_xref(
  98. struct xfs_scrub_context *sc,
  99. struct xfs_buf *bp)
  100. {
  101. struct xfs_owner_info oinfo;
  102. struct xfs_mount *mp = sc->mp;
  103. xfs_agnumber_t agno = sc->sm->sm_agno;
  104. xfs_agblock_t agbno;
  105. int error;
  106. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  107. return;
  108. agbno = XFS_SB_BLOCK(mp);
  109. error = xfs_scrub_ag_init(sc, agno, &sc->sa);
  110. if (!xfs_scrub_xref_process_error(sc, agno, agbno, &error))
  111. return;
  112. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  113. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  114. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  115. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  116. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  117. /* scrub teardown will take care of sc->sa for us */
  118. }
  119. /*
  120. * Scrub the filesystem superblock.
  121. *
  122. * Note: We do /not/ attempt to check AG 0's superblock. Mount is
  123. * responsible for validating all the geometry information in sb 0, so
  124. * if the filesystem is capable of initiating online scrub, then clearly
  125. * sb 0 is ok and we can use its information to check everything else.
  126. */
  127. int
  128. xfs_scrub_superblock(
  129. struct xfs_scrub_context *sc)
  130. {
  131. struct xfs_mount *mp = sc->mp;
  132. struct xfs_buf *bp;
  133. struct xfs_dsb *sb;
  134. xfs_agnumber_t agno;
  135. uint32_t v2_ok;
  136. __be32 features_mask;
  137. int error;
  138. __be16 vernum_mask;
  139. agno = sc->sm->sm_agno;
  140. if (agno == 0)
  141. return 0;
  142. error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
  143. XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
  144. XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
  145. /*
  146. * The superblock verifier can return several different error codes
  147. * if it thinks the superblock doesn't look right. For a mount these
  148. * would all get bounced back to userspace, but if we're here then the
  149. * fs mounted successfully, which means that this secondary superblock
  150. * is simply incorrect. Treat all these codes the same way we treat
  151. * any corruption.
  152. */
  153. switch (error) {
  154. case -EINVAL: /* also -EWRONGFS */
  155. case -ENOSYS:
  156. case -EFBIG:
  157. error = -EFSCORRUPTED;
  158. default:
  159. break;
  160. }
  161. if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
  162. return error;
  163. sb = XFS_BUF_TO_SBP(bp);
  164. /*
  165. * Verify the geometries match. Fields that are permanently
  166. * set by mkfs are checked; fields that can be updated later
  167. * (and are not propagated to backup superblocks) are preen
  168. * checked.
  169. */
  170. if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
  171. xfs_scrub_block_set_corrupt(sc, bp);
  172. if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
  173. xfs_scrub_block_set_corrupt(sc, bp);
  174. if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
  175. xfs_scrub_block_set_corrupt(sc, bp);
  176. if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
  177. xfs_scrub_block_set_corrupt(sc, bp);
  178. if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
  179. xfs_scrub_block_set_preen(sc, bp);
  180. if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
  181. xfs_scrub_block_set_corrupt(sc, bp);
  182. if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
  183. xfs_scrub_block_set_preen(sc, bp);
  184. if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
  185. xfs_scrub_block_set_preen(sc, bp);
  186. if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
  187. xfs_scrub_block_set_preen(sc, bp);
  188. if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
  189. xfs_scrub_block_set_corrupt(sc, bp);
  190. if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
  191. xfs_scrub_block_set_corrupt(sc, bp);
  192. if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
  193. xfs_scrub_block_set_corrupt(sc, bp);
  194. if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
  195. xfs_scrub_block_set_corrupt(sc, bp);
  196. if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
  197. xfs_scrub_block_set_corrupt(sc, bp);
  198. /* Check sb_versionnum bits that are set at mkfs time. */
  199. vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
  200. XFS_SB_VERSION_NUMBITS |
  201. XFS_SB_VERSION_ALIGNBIT |
  202. XFS_SB_VERSION_DALIGNBIT |
  203. XFS_SB_VERSION_SHAREDBIT |
  204. XFS_SB_VERSION_LOGV2BIT |
  205. XFS_SB_VERSION_SECTORBIT |
  206. XFS_SB_VERSION_EXTFLGBIT |
  207. XFS_SB_VERSION_DIRV2BIT);
  208. if ((sb->sb_versionnum & vernum_mask) !=
  209. (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
  210. xfs_scrub_block_set_corrupt(sc, bp);
  211. /* Check sb_versionnum bits that can be set after mkfs time. */
  212. vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
  213. XFS_SB_VERSION_NLINKBIT |
  214. XFS_SB_VERSION_QUOTABIT);
  215. if ((sb->sb_versionnum & vernum_mask) !=
  216. (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
  217. xfs_scrub_block_set_preen(sc, bp);
  218. if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
  219. xfs_scrub_block_set_corrupt(sc, bp);
  220. if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
  221. xfs_scrub_block_set_corrupt(sc, bp);
  222. if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
  223. xfs_scrub_block_set_corrupt(sc, bp);
  224. if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
  225. xfs_scrub_block_set_preen(sc, bp);
  226. if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
  227. xfs_scrub_block_set_corrupt(sc, bp);
  228. if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
  229. xfs_scrub_block_set_corrupt(sc, bp);
  230. if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
  231. xfs_scrub_block_set_corrupt(sc, bp);
  232. if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
  233. xfs_scrub_block_set_corrupt(sc, bp);
  234. if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
  235. xfs_scrub_block_set_corrupt(sc, bp);
  236. if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
  237. xfs_scrub_block_set_corrupt(sc, bp);
  238. if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
  239. xfs_scrub_block_set_preen(sc, bp);
  240. /*
  241. * Skip the summary counters since we track them in memory anyway.
  242. * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
  243. */
  244. if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
  245. xfs_scrub_block_set_preen(sc, bp);
  246. if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
  247. xfs_scrub_block_set_preen(sc, bp);
  248. /*
  249. * Skip the quota flags since repair will force quotacheck.
  250. * sb_qflags
  251. */
  252. if (sb->sb_flags != mp->m_sb.sb_flags)
  253. xfs_scrub_block_set_corrupt(sc, bp);
  254. if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
  255. xfs_scrub_block_set_corrupt(sc, bp);
  256. if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
  257. xfs_scrub_block_set_corrupt(sc, bp);
  258. if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
  259. xfs_scrub_block_set_preen(sc, bp);
  260. if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
  261. xfs_scrub_block_set_preen(sc, bp);
  262. if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
  263. xfs_scrub_block_set_corrupt(sc, bp);
  264. if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
  265. xfs_scrub_block_set_corrupt(sc, bp);
  266. if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
  267. xfs_scrub_block_set_corrupt(sc, bp);
  268. if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
  269. xfs_scrub_block_set_corrupt(sc, bp);
  270. /* Do we see any invalid bits in sb_features2? */
  271. if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
  272. if (sb->sb_features2 != 0)
  273. xfs_scrub_block_set_corrupt(sc, bp);
  274. } else {
  275. v2_ok = XFS_SB_VERSION2_OKBITS;
  276. if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
  277. v2_ok |= XFS_SB_VERSION2_CRCBIT;
  278. if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
  279. xfs_scrub_block_set_corrupt(sc, bp);
  280. if (sb->sb_features2 != sb->sb_bad_features2)
  281. xfs_scrub_block_set_preen(sc, bp);
  282. }
  283. /* Check sb_features2 flags that are set at mkfs time. */
  284. features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
  285. XFS_SB_VERSION2_PROJID32BIT |
  286. XFS_SB_VERSION2_CRCBIT |
  287. XFS_SB_VERSION2_FTYPE);
  288. if ((sb->sb_features2 & features_mask) !=
  289. (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
  290. xfs_scrub_block_set_corrupt(sc, bp);
  291. /* Check sb_features2 flags that can be set after mkfs time. */
  292. features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
  293. if ((sb->sb_features2 & features_mask) !=
  294. (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
  295. xfs_scrub_block_set_corrupt(sc, bp);
  296. if (!xfs_sb_version_hascrc(&mp->m_sb)) {
  297. /* all v5 fields must be zero */
  298. if (memchr_inv(&sb->sb_features_compat, 0,
  299. sizeof(struct xfs_dsb) -
  300. offsetof(struct xfs_dsb, sb_features_compat)))
  301. xfs_scrub_block_set_corrupt(sc, bp);
  302. } else {
  303. /* Check compat flags; all are set at mkfs time. */
  304. features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
  305. if ((sb->sb_features_compat & features_mask) !=
  306. (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
  307. xfs_scrub_block_set_corrupt(sc, bp);
  308. /* Check ro compat flags; all are set at mkfs time. */
  309. features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
  310. XFS_SB_FEAT_RO_COMPAT_FINOBT |
  311. XFS_SB_FEAT_RO_COMPAT_RMAPBT |
  312. XFS_SB_FEAT_RO_COMPAT_REFLINK);
  313. if ((sb->sb_features_ro_compat & features_mask) !=
  314. (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
  315. features_mask))
  316. xfs_scrub_block_set_corrupt(sc, bp);
  317. /* Check incompat flags; all are set at mkfs time. */
  318. features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
  319. XFS_SB_FEAT_INCOMPAT_FTYPE |
  320. XFS_SB_FEAT_INCOMPAT_SPINODES |
  321. XFS_SB_FEAT_INCOMPAT_META_UUID);
  322. if ((sb->sb_features_incompat & features_mask) !=
  323. (cpu_to_be32(mp->m_sb.sb_features_incompat) &
  324. features_mask))
  325. xfs_scrub_block_set_corrupt(sc, bp);
  326. /* Check log incompat flags; all are set at mkfs time. */
  327. features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
  328. if ((sb->sb_features_log_incompat & features_mask) !=
  329. (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
  330. features_mask))
  331. xfs_scrub_block_set_corrupt(sc, bp);
  332. /* Don't care about sb_crc */
  333. if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
  334. xfs_scrub_block_set_corrupt(sc, bp);
  335. if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
  336. xfs_scrub_block_set_preen(sc, bp);
  337. /* Don't care about sb_lsn */
  338. }
  339. if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
  340. /* The metadata UUID must be the same for all supers */
  341. if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
  342. xfs_scrub_block_set_corrupt(sc, bp);
  343. }
  344. /* Everything else must be zero. */
  345. if (memchr_inv(sb + 1, 0,
  346. BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
  347. xfs_scrub_block_set_corrupt(sc, bp);
  348. xfs_scrub_superblock_xref(sc, bp);
  349. return error;
  350. }
  351. /* AGF */
  352. /* Tally freespace record lengths. */
  353. STATIC int
  354. xfs_scrub_agf_record_bno_lengths(
  355. struct xfs_btree_cur *cur,
  356. struct xfs_alloc_rec_incore *rec,
  357. void *priv)
  358. {
  359. xfs_extlen_t *blocks = priv;
  360. (*blocks) += rec->ar_blockcount;
  361. return 0;
  362. }
  363. /* Check agf_freeblks */
  364. static inline void
  365. xfs_scrub_agf_xref_freeblks(
  366. struct xfs_scrub_context *sc)
  367. {
  368. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  369. xfs_extlen_t blocks = 0;
  370. int error;
  371. if (!sc->sa.bno_cur)
  372. return;
  373. error = xfs_alloc_query_all(sc->sa.bno_cur,
  374. xfs_scrub_agf_record_bno_lengths, &blocks);
  375. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
  376. return;
  377. if (blocks != be32_to_cpu(agf->agf_freeblks))
  378. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  379. }
  380. /* Cross reference the AGF with the cntbt (freespace by length btree) */
  381. static inline void
  382. xfs_scrub_agf_xref_cntbt(
  383. struct xfs_scrub_context *sc)
  384. {
  385. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  386. xfs_agblock_t agbno;
  387. xfs_extlen_t blocks;
  388. int have;
  389. int error;
  390. if (!sc->sa.cnt_cur)
  391. return;
  392. /* Any freespace at all? */
  393. error = xfs_alloc_lookup_le(sc->sa.cnt_cur, 0, -1U, &have);
  394. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
  395. return;
  396. if (!have) {
  397. if (agf->agf_freeblks != be32_to_cpu(0))
  398. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  399. return;
  400. }
  401. /* Check agf_longest */
  402. error = xfs_alloc_get_rec(sc->sa.cnt_cur, &agbno, &blocks, &have);
  403. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
  404. return;
  405. if (!have || blocks != be32_to_cpu(agf->agf_longest))
  406. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  407. }
  408. /* Check the btree block counts in the AGF against the btrees. */
  409. STATIC void
  410. xfs_scrub_agf_xref_btreeblks(
  411. struct xfs_scrub_context *sc)
  412. {
  413. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  414. struct xfs_mount *mp = sc->mp;
  415. xfs_agblock_t blocks;
  416. xfs_agblock_t btreeblks;
  417. int error;
  418. /* Check agf_rmap_blocks; set up for agf_btreeblks check */
  419. if (sc->sa.rmap_cur) {
  420. error = xfs_btree_count_blocks(sc->sa.rmap_cur, &blocks);
  421. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.rmap_cur))
  422. return;
  423. btreeblks = blocks - 1;
  424. if (blocks != be32_to_cpu(agf->agf_rmap_blocks))
  425. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  426. } else {
  427. btreeblks = 0;
  428. }
  429. /*
  430. * No rmap cursor; we can't xref if we have the rmapbt feature.
  431. * We also can't do it if we're missing the free space btree cursors.
  432. */
  433. if ((xfs_sb_version_hasrmapbt(&mp->m_sb) && !sc->sa.rmap_cur) ||
  434. !sc->sa.bno_cur || !sc->sa.cnt_cur)
  435. return;
  436. /* Check agf_btreeblks */
  437. error = xfs_btree_count_blocks(sc->sa.bno_cur, &blocks);
  438. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.bno_cur))
  439. return;
  440. btreeblks += blocks - 1;
  441. error = xfs_btree_count_blocks(sc->sa.cnt_cur, &blocks);
  442. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.cnt_cur))
  443. return;
  444. btreeblks += blocks - 1;
  445. if (btreeblks != be32_to_cpu(agf->agf_btreeblks))
  446. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  447. }
  448. /* Check agf_refcount_blocks against tree size */
  449. static inline void
  450. xfs_scrub_agf_xref_refcblks(
  451. struct xfs_scrub_context *sc)
  452. {
  453. struct xfs_agf *agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  454. xfs_agblock_t blocks;
  455. int error;
  456. if (!sc->sa.refc_cur)
  457. return;
  458. error = xfs_btree_count_blocks(sc->sa.refc_cur, &blocks);
  459. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.refc_cur))
  460. return;
  461. if (blocks != be32_to_cpu(agf->agf_refcount_blocks))
  462. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agf_bp);
  463. }
  464. /* Cross-reference with the other btrees. */
  465. STATIC void
  466. xfs_scrub_agf_xref(
  467. struct xfs_scrub_context *sc)
  468. {
  469. struct xfs_owner_info oinfo;
  470. struct xfs_mount *mp = sc->mp;
  471. xfs_agblock_t agbno;
  472. int error;
  473. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  474. return;
  475. agbno = XFS_AGF_BLOCK(mp);
  476. error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
  477. if (error)
  478. return;
  479. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  480. xfs_scrub_agf_xref_freeblks(sc);
  481. xfs_scrub_agf_xref_cntbt(sc);
  482. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  483. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  484. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  485. xfs_scrub_agf_xref_btreeblks(sc);
  486. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  487. xfs_scrub_agf_xref_refcblks(sc);
  488. /* scrub teardown will take care of sc->sa for us */
  489. }
  490. /* Scrub the AGF. */
  491. int
  492. xfs_scrub_agf(
  493. struct xfs_scrub_context *sc)
  494. {
  495. struct xfs_mount *mp = sc->mp;
  496. struct xfs_agf *agf;
  497. xfs_agnumber_t agno;
  498. xfs_agblock_t agbno;
  499. xfs_agblock_t eoag;
  500. xfs_agblock_t agfl_first;
  501. xfs_agblock_t agfl_last;
  502. xfs_agblock_t agfl_count;
  503. xfs_agblock_t fl_count;
  504. int level;
  505. int error = 0;
  506. agno = sc->sa.agno = sc->sm->sm_agno;
  507. error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
  508. &sc->sa.agf_bp, &sc->sa.agfl_bp);
  509. if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
  510. goto out;
  511. xfs_scrub_buffer_recheck(sc, sc->sa.agf_bp);
  512. agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  513. /* Check the AG length */
  514. eoag = be32_to_cpu(agf->agf_length);
  515. if (eoag != xfs_ag_block_count(mp, agno))
  516. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  517. /* Check the AGF btree roots and levels */
  518. agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
  519. if (!xfs_verify_agbno(mp, agno, agbno))
  520. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  521. agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
  522. if (!xfs_verify_agbno(mp, agno, agbno))
  523. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  524. level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
  525. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  526. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  527. level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
  528. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  529. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  530. if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
  531. agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
  532. if (!xfs_verify_agbno(mp, agno, agbno))
  533. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  534. level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
  535. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  536. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  537. }
  538. if (xfs_sb_version_hasreflink(&mp->m_sb)) {
  539. agbno = be32_to_cpu(agf->agf_refcount_root);
  540. if (!xfs_verify_agbno(mp, agno, agbno))
  541. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  542. level = be32_to_cpu(agf->agf_refcount_level);
  543. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  544. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  545. }
  546. /* Check the AGFL counters */
  547. agfl_first = be32_to_cpu(agf->agf_flfirst);
  548. agfl_last = be32_to_cpu(agf->agf_fllast);
  549. agfl_count = be32_to_cpu(agf->agf_flcount);
  550. if (agfl_last > agfl_first)
  551. fl_count = agfl_last - agfl_first + 1;
  552. else
  553. fl_count = xfs_agfl_size(mp) - agfl_first + agfl_last + 1;
  554. if (agfl_count != 0 && fl_count != agfl_count)
  555. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  556. xfs_scrub_agf_xref(sc);
  557. out:
  558. return error;
  559. }
  560. /* AGFL */
  561. struct xfs_scrub_agfl_info {
  562. struct xfs_owner_info oinfo;
  563. unsigned int sz_entries;
  564. unsigned int nr_entries;
  565. xfs_agblock_t *entries;
  566. };
  567. /* Cross-reference with the other btrees. */
  568. STATIC void
  569. xfs_scrub_agfl_block_xref(
  570. struct xfs_scrub_context *sc,
  571. xfs_agblock_t agbno,
  572. struct xfs_owner_info *oinfo)
  573. {
  574. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  575. return;
  576. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  577. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  578. xfs_scrub_xref_is_owned_by(sc, agbno, 1, oinfo);
  579. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  580. }
  581. /* Scrub an AGFL block. */
  582. STATIC int
  583. xfs_scrub_agfl_block(
  584. struct xfs_scrub_context *sc,
  585. xfs_agblock_t agbno,
  586. void *priv)
  587. {
  588. struct xfs_mount *mp = sc->mp;
  589. struct xfs_scrub_agfl_info *sai = priv;
  590. xfs_agnumber_t agno = sc->sa.agno;
  591. if (xfs_verify_agbno(mp, agno, agbno) &&
  592. sai->nr_entries < sai->sz_entries)
  593. sai->entries[sai->nr_entries++] = agbno;
  594. else
  595. xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
  596. xfs_scrub_agfl_block_xref(sc, agbno, priv);
  597. return 0;
  598. }
  599. static int
  600. xfs_scrub_agblock_cmp(
  601. const void *pa,
  602. const void *pb)
  603. {
  604. const xfs_agblock_t *a = pa;
  605. const xfs_agblock_t *b = pb;
  606. return (int)*a - (int)*b;
  607. }
  608. /* Cross-reference with the other btrees. */
  609. STATIC void
  610. xfs_scrub_agfl_xref(
  611. struct xfs_scrub_context *sc)
  612. {
  613. struct xfs_owner_info oinfo;
  614. struct xfs_mount *mp = sc->mp;
  615. xfs_agblock_t agbno;
  616. int error;
  617. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  618. return;
  619. agbno = XFS_AGFL_BLOCK(mp);
  620. error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
  621. if (error)
  622. return;
  623. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  624. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  625. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  626. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  627. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  628. /*
  629. * Scrub teardown will take care of sc->sa for us. Leave sc->sa
  630. * active so that the agfl block xref can use it too.
  631. */
  632. }
  633. /* Scrub the AGFL. */
  634. int
  635. xfs_scrub_agfl(
  636. struct xfs_scrub_context *sc)
  637. {
  638. struct xfs_scrub_agfl_info sai;
  639. struct xfs_agf *agf;
  640. xfs_agnumber_t agno;
  641. unsigned int agflcount;
  642. unsigned int i;
  643. int error;
  644. agno = sc->sa.agno = sc->sm->sm_agno;
  645. error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
  646. &sc->sa.agf_bp, &sc->sa.agfl_bp);
  647. if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
  648. goto out;
  649. if (!sc->sa.agf_bp)
  650. return -EFSCORRUPTED;
  651. xfs_scrub_buffer_recheck(sc, sc->sa.agfl_bp);
  652. xfs_scrub_agfl_xref(sc);
  653. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  654. goto out;
  655. /* Allocate buffer to ensure uniqueness of AGFL entries. */
  656. agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
  657. agflcount = be32_to_cpu(agf->agf_flcount);
  658. if (agflcount > xfs_agfl_size(sc->mp)) {
  659. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  660. goto out;
  661. }
  662. memset(&sai, 0, sizeof(sai));
  663. sai.sz_entries = agflcount;
  664. sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
  665. if (!sai.entries) {
  666. error = -ENOMEM;
  667. goto out;
  668. }
  669. /* Check the blocks in the AGFL. */
  670. xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
  671. error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
  672. if (error)
  673. goto out_free;
  674. if (agflcount != sai.nr_entries) {
  675. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  676. goto out_free;
  677. }
  678. /* Sort entries, check for duplicates. */
  679. sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
  680. xfs_scrub_agblock_cmp, NULL);
  681. for (i = 1; i < sai.nr_entries; i++) {
  682. if (sai.entries[i] == sai.entries[i - 1]) {
  683. xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
  684. break;
  685. }
  686. }
  687. out_free:
  688. kmem_free(sai.entries);
  689. out:
  690. return error;
  691. }
  692. /* AGI */
  693. /* Check agi_count/agi_freecount */
  694. static inline void
  695. xfs_scrub_agi_xref_icounts(
  696. struct xfs_scrub_context *sc)
  697. {
  698. struct xfs_agi *agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
  699. xfs_agino_t icount;
  700. xfs_agino_t freecount;
  701. int error;
  702. if (!sc->sa.ino_cur)
  703. return;
  704. error = xfs_ialloc_count_inodes(sc->sa.ino_cur, &icount, &freecount);
  705. if (!xfs_scrub_should_check_xref(sc, &error, &sc->sa.ino_cur))
  706. return;
  707. if (be32_to_cpu(agi->agi_count) != icount ||
  708. be32_to_cpu(agi->agi_freecount) != freecount)
  709. xfs_scrub_block_xref_set_corrupt(sc, sc->sa.agi_bp);
  710. }
  711. /* Cross-reference with the other btrees. */
  712. STATIC void
  713. xfs_scrub_agi_xref(
  714. struct xfs_scrub_context *sc)
  715. {
  716. struct xfs_owner_info oinfo;
  717. struct xfs_mount *mp = sc->mp;
  718. xfs_agblock_t agbno;
  719. int error;
  720. if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
  721. return;
  722. agbno = XFS_AGI_BLOCK(mp);
  723. error = xfs_scrub_ag_btcur_init(sc, &sc->sa);
  724. if (error)
  725. return;
  726. xfs_scrub_xref_is_used_space(sc, agbno, 1);
  727. xfs_scrub_xref_is_not_inode_chunk(sc, agbno, 1);
  728. xfs_scrub_agi_xref_icounts(sc);
  729. xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_FS);
  730. xfs_scrub_xref_is_owned_by(sc, agbno, 1, &oinfo);
  731. xfs_scrub_xref_is_not_shared(sc, agbno, 1);
  732. /* scrub teardown will take care of sc->sa for us */
  733. }
  734. /* Scrub the AGI. */
  735. int
  736. xfs_scrub_agi(
  737. struct xfs_scrub_context *sc)
  738. {
  739. struct xfs_mount *mp = sc->mp;
  740. struct xfs_agi *agi;
  741. xfs_agnumber_t agno;
  742. xfs_agblock_t agbno;
  743. xfs_agblock_t eoag;
  744. xfs_agino_t agino;
  745. xfs_agino_t first_agino;
  746. xfs_agino_t last_agino;
  747. xfs_agino_t icount;
  748. int i;
  749. int level;
  750. int error = 0;
  751. agno = sc->sa.agno = sc->sm->sm_agno;
  752. error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
  753. &sc->sa.agf_bp, &sc->sa.agfl_bp);
  754. if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
  755. goto out;
  756. xfs_scrub_buffer_recheck(sc, sc->sa.agi_bp);
  757. agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
  758. /* Check the AG length */
  759. eoag = be32_to_cpu(agi->agi_length);
  760. if (eoag != xfs_ag_block_count(mp, agno))
  761. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  762. /* Check btree roots and levels */
  763. agbno = be32_to_cpu(agi->agi_root);
  764. if (!xfs_verify_agbno(mp, agno, agbno))
  765. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  766. level = be32_to_cpu(agi->agi_level);
  767. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  768. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  769. if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
  770. agbno = be32_to_cpu(agi->agi_free_root);
  771. if (!xfs_verify_agbno(mp, agno, agbno))
  772. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  773. level = be32_to_cpu(agi->agi_free_level);
  774. if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
  775. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  776. }
  777. /* Check inode counters */
  778. xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
  779. icount = be32_to_cpu(agi->agi_count);
  780. if (icount > last_agino - first_agino + 1 ||
  781. icount < be32_to_cpu(agi->agi_freecount))
  782. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  783. /* Check inode pointers */
  784. agino = be32_to_cpu(agi->agi_newino);
  785. if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
  786. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  787. agino = be32_to_cpu(agi->agi_dirino);
  788. if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
  789. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  790. /* Check unlinked inode buckets */
  791. for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
  792. agino = be32_to_cpu(agi->agi_unlinked[i]);
  793. if (agino == NULLAGINO)
  794. continue;
  795. if (!xfs_verify_agino(mp, agno, agino))
  796. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  797. }
  798. if (agi->agi_pad32 != cpu_to_be32(0))
  799. xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
  800. xfs_scrub_agi_xref(sc);
  801. out:
  802. return error;
  803. }