|
@@ -38,68 +38,6 @@
|
|
|
#include "scrub/common.h"
|
|
|
#include "scrub/trace.h"
|
|
|
|
|
|
-/*
|
|
|
- * Walk all the blocks in the AGFL. The fn function can return any negative
|
|
|
- * error code or XFS_BTREE_QUERY_RANGE_ABORT.
|
|
|
- */
|
|
|
-int
|
|
|
-xfs_scrub_walk_agfl(
|
|
|
- struct xfs_scrub_context *sc,
|
|
|
- int (*fn)(struct xfs_scrub_context *,
|
|
|
- xfs_agblock_t bno, void *),
|
|
|
- void *priv)
|
|
|
-{
|
|
|
- struct xfs_agf *agf;
|
|
|
- __be32 *agfl_bno;
|
|
|
- struct xfs_mount *mp = sc->mp;
|
|
|
- unsigned int flfirst;
|
|
|
- unsigned int fllast;
|
|
|
- int i;
|
|
|
- int error;
|
|
|
-
|
|
|
- agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
|
|
|
- agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
|
|
|
- flfirst = be32_to_cpu(agf->agf_flfirst);
|
|
|
- fllast = be32_to_cpu(agf->agf_fllast);
|
|
|
-
|
|
|
- /* Nothing to walk in an empty AGFL. */
|
|
|
- if (agf->agf_flcount == cpu_to_be32(0))
|
|
|
- return 0;
|
|
|
-
|
|
|
- /* first to last is a consecutive list. */
|
|
|
- if (fllast >= flfirst) {
|
|
|
- for (i = flfirst; i <= fllast; i++) {
|
|
|
- error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
|
|
- return error;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
- }
|
|
|
-
|
|
|
- /* first to the end */
|
|
|
- for (i = flfirst; i < xfs_agfl_size(mp); i++) {
|
|
|
- error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
|
|
- return error;
|
|
|
- }
|
|
|
-
|
|
|
- /* the start to last. */
|
|
|
- for (i = 0; i <= fllast; i++) {
|
|
|
- error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
|
|
|
- if (error)
|
|
|
- return error;
|
|
|
- if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
|
|
- return error;
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/* Superblock */
|
|
|
|
|
|
/* Cross-reference with the other btrees. */
|
|
@@ -678,6 +616,7 @@ struct xfs_scrub_agfl_info {
|
|
|
unsigned int sz_entries;
|
|
|
unsigned int nr_entries;
|
|
|
xfs_agblock_t *entries;
|
|
|
+ struct xfs_scrub_context *sc;
|
|
|
};
|
|
|
|
|
|
/* Cross-reference with the other btrees. */
|
|
@@ -699,12 +638,12 @@ xfs_scrub_agfl_block_xref(
|
|
|
/* Scrub an AGFL block. */
|
|
|
STATIC int
|
|
|
xfs_scrub_agfl_block(
|
|
|
- struct xfs_scrub_context *sc,
|
|
|
+ struct xfs_mount *mp,
|
|
|
xfs_agblock_t agbno,
|
|
|
void *priv)
|
|
|
{
|
|
|
- struct xfs_mount *mp = sc->mp;
|
|
|
struct xfs_scrub_agfl_info *sai = priv;
|
|
|
+ struct xfs_scrub_context *sc = sai->sc;
|
|
|
xfs_agnumber_t agno = sc->sa.agno;
|
|
|
|
|
|
if (xfs_verify_agbno(mp, agno, agbno) &&
|
|
@@ -715,6 +654,9 @@ xfs_scrub_agfl_block(
|
|
|
|
|
|
xfs_scrub_agfl_block_xref(sc, agbno, priv);
|
|
|
|
|
|
+ if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
|
|
|
+ return XFS_BTREE_QUERY_RANGE_ABORT;
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -794,6 +736,7 @@ xfs_scrub_agfl(
|
|
|
goto out;
|
|
|
}
|
|
|
memset(&sai, 0, sizeof(sai));
|
|
|
+ sai.sc = sc;
|
|
|
sai.sz_entries = agflcount;
|
|
|
sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount,
|
|
|
KM_MAYFAIL);
|
|
@@ -804,7 +747,12 @@ xfs_scrub_agfl(
|
|
|
|
|
|
/* Check the blocks in the AGFL. */
|
|
|
xfs_rmap_ag_owner(&sai.oinfo, XFS_RMAP_OWN_AG);
|
|
|
- error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
|
|
|
+ error = xfs_agfl_walk(sc->mp, XFS_BUF_TO_AGF(sc->sa.agf_bp),
|
|
|
+ sc->sa.agfl_bp, xfs_scrub_agfl_block, &sai);
|
|
|
+ if (error == XFS_BTREE_QUERY_RANGE_ABORT) {
|
|
|
+ error = 0;
|
|
|
+ goto out_free;
|
|
|
+ }
|
|
|
if (error)
|
|
|
goto out_free;
|
|
|
|